diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile index 11fe0104482576769d830bfc5455dbac21f40b1e..21d809367bf97e4d272a8ef8bde34c1153bc22ae 100644 --- a/drivers/net/ethernet/huawei/hinic3/Makefile +++ b/drivers/net/ethernet/huawei/hinic3/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/ ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/hw/ +ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/bond/ ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/cqm/ ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/ ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/cqm/ diff --git a/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.c b/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.c index ba3076c9589241d5e360ce211aa50b304fa8af7c..a252e099ad0211ded0035e17b184a308dacc5fbb 100644 --- a/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.c +++ b/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.c @@ -9,14 +9,18 @@ #include #include #include +#include +#include "ossl_knl.h" #include "hinic3_lld.h" #include "hinic3_srv_nic.h" #include "hinic3_nic_dev.h" #include "hinic3_hw.h" -#include "mpu_inband_cmd.h" -#include "hinic3_hwdev.h" #include "hinic3_bond.h" +#include "hinic3_hwdev.h" + +#include "bond_common_defs.h" +#include "vram_common.h" #define PORT_INVALID_ID 0xFF @@ -214,10 +218,10 @@ static void bond_dev_untrack_port(struct hinic3_bond_dev *bdev, u8 idx) spin_lock(&bdev->lock); if (bdev->tracker.ndev[idx]) { - pr_info("hinic3_bond: untrack port:%u ndev:%s cnt:%d\n", idx, - bdev->tracker.ndev[idx]->name, bdev->tracker.cnt); bdev->tracker.ndev[idx] = NULL; bdev->tracker.cnt--; + pr_info("hinic3_bond: untrack port:%u ndev:%s cnt:%d\n", idx, + bdev->tracker.ndev[idx]->name, bdev->tracker.cnt); } spin_unlock(&bdev->lock); @@ -268,11 +272,16 @@ static void bond_master_event(struct hinic3_bond_dev *bdev, queue_delayed_work(bdev->wq, &bdev->bond_work, 0); } -static struct hinic3_bond_dev *bond_get_bdev(const struct bonding *bond) +static struct hinic3_bond_dev *bond_get_bdev(struct bonding *bond) { struct hinic3_bond_dev *bdev = NULL; int bid; + if (bond == NULL) { + pr_err("hinic3_bond: bond is NULL\n"); + return NULL; + } + mutex_lock(&g_bond_mutex); for (bid = BOND_FIRST_ID; bid <= BOND_MAX_ID; bid++) { bdev = bond_mngr.bond_dev[bid]; @@ -283,38 +292,72 @@ static struct hinic3_bond_dev *bond_get_bdev(const struct bonding *bond) mutex_unlock(&g_bond_mutex); return bdev; } + + if (strncmp(bond->dev->name, bdev->name, BOND_NAME_MAX_LEN) == 0) { + bdev->bond = bond; + return bdev; + } } mutex_unlock(&g_bond_mutex); return NULL; } -static void bond_handle_rtnl_event(struct net_device *ndev) +static struct bonding *get_bonding_by_netdev(struct net_device *ndev) { - struct hinic3_bond_dev *bdev = NULL; struct bonding *bond = NULL; struct slave *slave = NULL; if (netif_is_bond_master(ndev)) { bond = netdev_priv(ndev); - bdev = bond_get_bdev(bond); } else if (netif_is_bond_slave(ndev)) { - /*lint -e(160) */ slave = bond_slave_get_rtnl(ndev); if (slave) { bond = bond_get_bond_by_slave(slave); - bdev = bond_get_bdev(bond); } } - if (!bond || !bdev) + return bond; +} +/*lint -e580 -e546*/ +bool hinic3_is_bond_dev_status_actived(struct net_device *ndev) +{ + struct hinic3_bond_dev *bdev = NULL; + struct bonding *bond = NULL; + + if (!ndev) { + pr_err("hinic3_bond: netdev is NULL\n"); + return false; + } + + bond = get_bonding_by_netdev(ndev); + bdev = bond_get_bdev(bond); + if (!bdev) + return false; + + return bdev->status == BOND_DEV_STATUS_ACTIVATED; +} +EXPORT_SYMBOL(hinic3_is_bond_dev_status_actived); +/*lint +e580 +e546*/ + +static void bond_handle_rtnl_event(struct net_device *ndev) +{ + struct hinic3_bond_dev *bdev = NULL; + struct bonding *bond = NULL; + struct slave *slave = NULL; + + bond = get_bonding_by_netdev(ndev); + bdev = bond_get_bdev(bond); + if (!bdev) return; bond_update_attr(bdev, bond); - if (slave) + if (netif_is_bond_slave(ndev)) { + slave = bond_slave_get_rtnl(ndev); bond_slave_event(bdev, slave); - else + } else { bond_master_event(bdev, bond); + } } static void bond_rtnl_data_ready(struct sock *sk) @@ -478,7 +521,11 @@ static void bond_update_slave_info(struct hinic3_bond_dev *bdev, continue; } + if (!bdev->tracker.ndev[i]) + continue; + bond_pf_bitmap_set(bdev, i); + if (!bdev->tracker.netdev_state[i].tx_enabled) continue; @@ -516,6 +563,7 @@ static int bond_upcmd_config(struct hinic3_bond_dev *bdev, attr->active_slaves, attr->lacp_collect_slaves); pr_info("bond_pf_bitmap: 0x%x\n", attr->bond_pf_bitmap); + pr_info("bond user_bitmap 0x%x\n", attr->user_bitmap); err = bond_send_upcmd(bdev, attr, MPU_CMD_BOND_SET_ATTR); if (!err) @@ -560,17 +608,36 @@ static void bond_call_service_func(struct hinic3_bond_dev *bdev, struct bond_att mutex_unlock(&g_bond_service_func_mutex); } +static u32 bond_get_user_bitmap(struct hinic3_bond_dev *bdev) +{ + u32 user_bitmap = 0; + u8 user; + + for (user = HINIC3_BOND_USER_OVS; user < HINIC3_BOND_USER_NUM; user++) { + if (bdev->slot_used[user] == 1) + BITMAP_SET(user_bitmap, user); + } + return user_bitmap; +} + static void bond_do_work(struct hinic3_bond_dev *bdev) { bool is_bonded = 0; struct bond_attr attr; + int is_in_kexec; int err = 0; + is_in_kexec = vram_get_kexec_flag(); + if (is_in_kexec != 0) { + pr_info("Skip changing bond status during os replace\n"); + return; + } + spin_lock(&bdev->lock); is_bonded = bdev->tracker.is_bonded; attr = bdev->new_attr; spin_unlock(&bdev->lock); - attr.user_bitmap = 0; + attr.user_bitmap = bond_get_user_bitmap(bdev); /* is_bonded indicates whether bond should be activated. */ if (is_bonded && !bond_dev_is_activated(bdev)) { @@ -591,17 +658,21 @@ static void bond_do_work(struct hinic3_bond_dev *bdev) pr_err("hinic3_bond: Do bond failed\n"); } -#define MIN_BOND_SLAVE_CNT 2 static void bond_try_do_work(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct hinic3_bond_dev *bdev = container_of(delayed_work, struct hinic3_bond_dev, bond_work); + int status; - if (g_bond_service_func[HINIC3_BOND_USER_ROCE] && bdev->tracker.cnt < MIN_BOND_SLAVE_CNT) + status = mutex_trylock(&g_bond_mutex); + if (status == 0) { + /* Delay 1 sec and retry */ queue_delayed_work(bdev->wq, &bdev->bond_work, HZ); - else + } else { bond_do_work(bdev); + mutex_unlock(&g_bond_mutex); + } } static int bond_dev_init(struct hinic3_bond_dev *bdev, const char *name) @@ -630,6 +701,7 @@ static int bond_dev_release(struct hinic3_bond_dev *bdev) err = bond_upcmd_deactivate(bdev); if (err) { pr_err("hinic3_bond: Failed to deactivate dev\n"); + mutex_unlock(&g_bond_mutex); return err; } @@ -764,20 +836,28 @@ static struct hinic3_bond_dev *bond_dev_by_name(const char *name) static void bond_dev_user_attach(struct hinic3_bond_dev *bdev, enum hinic3_bond_user user) { + u32 user_bitmap; + + if (user < 0 || user >= HINIC3_BOND_USER_NUM) + return; + if (bdev->slot_used[user]) return; bdev->slot_used[user] = 1; if (!kref_get_unless_zero(&bdev->ref)) kref_init(&bdev->ref); + else { + user_bitmap = bond_get_user_bitmap(bdev); + pr_info("hinic3_bond: user %u attach bond %s, user_bitmap %#x\n", + user, bdev->name, user_bitmap); + queue_delayed_work(bdev->wq, &bdev->bond_work, 0); + } } static void bond_dev_user_detach(struct hinic3_bond_dev *bdev, enum hinic3_bond_user user, bool *freed) { - if (user < 0 || user >= HINIC3_BOND_USER_NUM) - return; - if (bdev->slot_used[user]) { bdev->slot_used[user] = 0; if (kref_read(&bdev->ref) == 1) @@ -790,12 +870,15 @@ static struct bonding *bond_get_knl_bonding(const char *name) { struct net_device *ndev_tmp = NULL; + rcu_read_lock(); for_each_netdev(&init_net, ndev_tmp) { if (netif_is_bond_master(ndev_tmp) && - !strcmp(ndev_tmp->name, name)) + !strcmp(ndev_tmp->name, name)) { + rcu_read_unlock(); return netdev_priv(ndev_tmp); + } } - + rcu_read_unlock(); return NULL; } @@ -852,8 +935,9 @@ int hinic3_bond_detach(u16 bond_id, enum hinic3_bond_user user) int err = 0; bool lock_freed = false; - if (bond_id < BOND_FIRST_ID || bond_id > BOND_MAX_ID) { - pr_warn("hinic3_bond: Invalid bond id:%u to delete\n", bond_id); + if (!BOND_ID_IS_VALID(bond_id) || user >= HINIC3_BOND_USER_NUM) { + pr_warn("hinic3_bond: Invalid bond id or user, bond_id: %u, user: %d\n", + bond_id, user); return -EINVAL; } @@ -891,7 +975,7 @@ EXPORT_SYMBOL(hinic3_bond_clean_user); int hinic3_bond_get_uplink_id(u16 bond_id, u32 *uplink_id) { - if (bond_id < BOND_FIRST_ID || bond_id > BOND_MAX_ID || !uplink_id) { + if (!BOND_ID_IS_VALID(bond_id) || !uplink_id) { pr_warn("hinic3_bond: Invalid args, id: %u, uplink: %d\n", bond_id, !!uplink_id); return -EINVAL; @@ -941,7 +1025,7 @@ int hinic3_bond_get_slaves(u16 bond_id, struct hinic3_bond_info_s *info) int i; int len; - if (!info || bond_id < BOND_FIRST_ID || bond_id > BOND_MAX_ID) { + if (!info || !BOND_ID_IS_VALID(bond_id)) { pr_warn("hinic3_bond: Invalid args, info: %d,id: %u\n", !!info, bond_id); return -EINVAL; diff --git a/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.h b/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.h index 024de95aec9d4bd55b320901b638526f18631579..5ab36f727b0c2203521fb2015775bd512a19d7cb 100644 --- a/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.h +++ b/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.h @@ -82,6 +82,7 @@ struct hinic3_bond_cmd { char bond_name[16]; }; +bool hinic3_is_bond_dev_status_actived(struct net_device *ndev); void hinic3_bond_set_user_bitmap(struct bond_attr *attr, enum hinic3_bond_user user); int hinic3_bond_attach(const char *name, enum hinic3_bond_user user, u16 *bond_id); int hinic3_bond_detach(u16 bond_id, enum hinic3_bond_user user); diff --git a/drivers/net/ethernet/huawei/hinic3/cfg_mgt_comm_pub.h b/drivers/net/ethernet/huawei/hinic3/cfg_mgt_comm_pub.h deleted file mode 100644 index 557f326bdc0529c47ec2cfc97a5b1741c8d85102..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic3/cfg_mgt_comm_pub.h +++ /dev/null @@ -1,212 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) Huawei Technologies Co., Ltd. 2016-2022. All rights reserved. - * File name: Cfg_mgt_comm_pub.h - * Version No.: Draft - * Generation date: 2016 year 05 month 07 day - * Latest modification: - * Function description: Header file for communication between the: Host and FW - * Function list: - * Modification history: - * 1. Date: 2016 May 07 - * Modify content: Create a file. - */ -#ifndef CFG_MGT_COMM_PUB_H -#define CFG_MGT_COMM_PUB_H - -#include "mgmt_msg_base.h" - -enum servic_bit_define { - SERVICE_BIT_NIC = 0, - SERVICE_BIT_ROCE = 1, - SERVICE_BIT_VBS = 2, - SERVICE_BIT_TOE = 3, - SERVICE_BIT_IPSEC = 4, - SERVICE_BIT_FC = 5, - SERVICE_BIT_VIRTIO = 6, - SERVICE_BIT_OVS = 7, - SERVICE_BIT_NVME = 8, - SERVICE_BIT_ROCEAA = 9, - SERVICE_BIT_CURRENET = 10, - SERVICE_BIT_PPA = 11, - SERVICE_BIT_MIGRATE = 12, - SERVICE_BIT_MAX -}; - -#define CFG_SERVICE_MASK_NIC (0x1 << SERVICE_BIT_NIC) -#define CFG_SERVICE_MASK_ROCE (0x1 << SERVICE_BIT_ROCE) -#define CFG_SERVICE_MASK_VBS (0x1 << SERVICE_BIT_VBS) -#define CFG_SERVICE_MASK_TOE (0x1 << SERVICE_BIT_TOE) -#define CFG_SERVICE_MASK_IPSEC (0x1 << SERVICE_BIT_IPSEC) -#define CFG_SERVICE_MASK_FC (0x1 << SERVICE_BIT_FC) -#define CFG_SERVICE_MASK_VIRTIO (0x1 << SERVICE_BIT_VIRTIO) -#define CFG_SERVICE_MASK_OVS (0x1 << SERVICE_BIT_OVS) -#define CFG_SERVICE_MASK_NVME (0x1 << SERVICE_BIT_NVME) -#define CFG_SERVICE_MASK_ROCEAA (0x1 << SERVICE_BIT_ROCEAA) -#define CFG_SERVICE_MASK_CURRENET (0x1 << SERVICE_BIT_CURRENET) -#define CFG_SERVICE_MASK_PPA (0x1 << SERVICE_BIT_PPA) -#define CFG_SERVICE_MASK_MIGRATE (0x1 << SERVICE_BIT_MIGRATE) - -/* Definition of the scenario ID in the cfg_data, which is used for SML memory allocation. */ -enum scenes_id_define { - SCENES_ID_FPGA_ETH = 0, - SCENES_ID_FPGA_TIOE = 1, /* Discarded */ - SCENES_ID_STORAGE_ROCEAA_2x100 = 2, - SCENES_ID_STORAGE_ROCEAA_4x25 = 3, - SCENES_ID_CLOUD = 4, - SCENES_ID_FC = 5, - SCENES_ID_STORAGE_ROCE = 6, - SCENES_ID_COMPUTE_ROCE = 7, - SCENES_ID_STORAGE_TOE = 8, - SCENES_ID_MAX -}; - -/* struct cfg_cmd_dev_cap.sf_svc_attr */ -enum { - SF_SVC_FT_BIT = (1 << 0), - SF_SVC_RDMA_BIT = (1 << 1), -}; - -enum cfg_cmd { - CFG_CMD_GET_DEV_CAP = 0, - CFG_CMD_GET_HOST_TIMER = 1, -}; - -struct cfg_cmd_host_timer { - struct mgmt_msg_head head; - - u8 host_id; - u8 rsvd1; - - u8 timer_pf_num; - u8 timer_pf_id_start; - u16 timer_vf_num; - u16 timer_vf_id_start; - u32 rsvd2[8]; -}; - -struct cfg_cmd_dev_cap { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd1; - - /* Public resources */ - u8 host_id; - u8 ep_id; - u8 er_id; - u8 port_id; - - u16 host_total_func; - u8 host_pf_num; - u8 pf_id_start; - u16 host_vf_num; - u16 vf_id_start; - u8 host_oq_id_mask_val; - u8 timer_en; - u8 host_valid_bitmap; - u8 rsvd_host; - - u16 svc_cap_en; - u16 max_vf; - u8 flexq_en; - u8 valid_cos_bitmap; - /* Reserved for func_valid_cos_bitmap */ - u8 port_cos_valid_bitmap; - u8 rsvd_func1; - u32 rsvd_func2; - - u8 sf_svc_attr; - u8 func_sf_en; - u8 lb_mode; - u8 smf_pg; - - u32 max_conn_num; - u16 max_stick2cache_num; - u16 max_bfilter_start_addr; - u16 bfilter_len; - u16 hash_bucket_num; - - /* shared resource */ - u8 host_sf_en; - u8 master_host_id; - u8 srv_multi_host_mode; - u8 virtio_vq_size; - - u32 rsvd_func3[5]; - - /* l2nic */ - u16 nic_max_sq_id; - u16 nic_max_rq_id; - u16 nic_default_num_queues; - u16 rsvd1_nic; - u32 rsvd2_nic[2]; - - /* RoCE */ - u32 roce_max_qp; - u32 roce_max_cq; - u32 roce_max_srq; - u32 roce_max_mpt; - u32 roce_max_drc_qp; - - u32 roce_cmtt_cl_start; - u32 roce_cmtt_cl_end; - u32 roce_cmtt_cl_size; - - u32 roce_dmtt_cl_start; - u32 roce_dmtt_cl_end; - u32 roce_dmtt_cl_size; - - u32 roce_wqe_cl_start; - u32 roce_wqe_cl_end; - u32 roce_wqe_cl_size; - u8 roce_srq_container_mode; - u8 rsvd_roce1[3]; - u32 rsvd_roce2[5]; - - /* IPsec */ - u32 ipsec_max_sactx; - u16 ipsec_max_cq; - u16 rsvd_ipsec1; - u32 rsvd_ipsec[2]; - - /* OVS */ - u32 ovs_max_qpc; - u32 rsvd_ovs1[3]; - - /* ToE */ - u32 toe_max_pctx; - u32 toe_max_cq; - u16 toe_max_srq; - u16 toe_srq_id_start; - u16 toe_max_mpt; - u16 toe_max_cctxt; - u32 rsvd_toe[2]; - - /* FC */ - u32 fc_max_pctx; - u32 fc_max_scq; - u32 fc_max_srq; - - u32 fc_max_cctx; - u32 fc_cctx_id_start; - - u8 fc_vp_id_start; - u8 fc_vp_id_end; - u8 rsvd_fc1[2]; - u32 rsvd_fc2[5]; - - /* VBS */ - u16 vbs_max_volq; - u16 rsvd0_vbs; - u32 rsvd1_vbs[3]; - - u16 fake_vf_start_id; - u16 fake_vf_num; - u32 fake_vf_max_pctx; - u16 fake_vf_bfilter_start_addr; - u16 fake_vf_bfilter_len; - u32 rsvd_glb[8]; -}; - -#endif diff --git a/drivers/net/ethernet/huawei/hinic3/comm_cmdq_intf.h b/drivers/net/ethernet/huawei/hinic3/comm_cmdq_intf.h deleted file mode 100644 index 6f5f87bc19b78e1ab5ca753b68beb079a71c9bf2..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic3/comm_cmdq_intf.h +++ /dev/null @@ -1,239 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/****************************************************************************** - * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. - ****************************************************************************** - File Name : comm_cmdq_intf.h - Version : Initial Draft - Description : common command queue interface - Function List : - History : - Modification: Created file - -******************************************************************************/ - -#ifndef COMM_CMDQ_INTF_H -#define COMM_CMDQ_INTF_H - -/* Cmdq ack type */ -enum hinic3_ack_type { - HINIC3_ACK_TYPE_CMDQ, - HINIC3_ACK_TYPE_SHARE_CQN, - HINIC3_ACK_TYPE_APP_CQN, - - HINIC3_MOD_ACK_MAX = 15, -}; - -/* Defines the queue type of the set arm bit. */ -enum { - SET_ARM_BIT_FOR_CMDQ = 0, - SET_ARM_BIT_FOR_L2NIC_SQ, - SET_ARM_BIT_FOR_L2NIC_RQ, - SET_ARM_BIT_TYPE_NUM -}; - -/* Defines the type. Each function supports a maximum of eight CMDQ types. */ -enum { - CMDQ_0 = 0, - CMDQ_1 = 1, /* dedicated and non-blocking queues */ - CMDQ_NUM -}; - -/* *******************cmd common command data structure ************************ */ -// Func->ucode, which is used to set arm bit data, -// The microcode needs to perform big-endian conversion. -struct comm_info_ucode_set_arm_bit { - u32 q_type; - u32 q_id; -}; - -/* *******************WQE data structure ************************ */ -union cmdq_wqe_cs_dw0 { - struct { - u32 err_status : 29; - u32 error_code : 2; - u32 rsvd : 1; - } bs; - u32 val; -}; - -union cmdq_wqe_cs_dw1 { - // This structure is used when the driver writes the wqe. - struct { - u32 token : 16; // [15:0] - u32 cmd : 8; // [23:16] - u32 mod : 5; // [28:24] - u32 ack_type : 2; // [30:29] - u32 obit : 1; // [31] - } drv_wr; - - /* The uCode writes back the structure of the CS_DW1. - * The driver reads and uses the structure. */ - struct { - u32 mod : 5; // [4:0] - u32 ack_type : 3; // [7:5] - u32 cmd : 8; // [15:8] - u32 arm : 1; // [16] - u32 rsvd : 14; // [30:17] - u32 obit : 1; // [31] - } wb; - u32 val; -}; - -/* CmdQ BD information or write back buffer information */ -struct cmdq_sge { - u32 pa_h; // Upper 32 bits of the physical address - u32 pa_l; // Upper 32 bits of the physical address - u32 len; // Invalid bit[31]. - u32 resv; -}; - -/* Ctrls section definition of WQE */ -struct cmdq_wqe_ctrls { - union { - struct { - u32 bdsl : 8; // [7:0] - u32 drvsl : 2; // [9:8] - u32 rsv : 4; // [13:10] - u32 wf : 1; // [14] - u32 cf : 1; // [15] - u32 tsl : 5; // [20:16] - u32 va : 1; // [21] - u32 df : 1; // [22] - u32 cr : 1; // [23] - u32 difsl : 3; // [26:24] - u32 csl : 2; // [28:27] - u32 ctrlsl : 2; // [30:29] - u32 obit : 1; // [31] - } bs; - u32 val; - } header; - u32 qsf; -}; - -/* Complete section definition of WQE */ -struct cmdq_wqe_cs { - union cmdq_wqe_cs_dw0 dw0; - union cmdq_wqe_cs_dw1 dw1; - union { - struct cmdq_sge sge; - u32 dw2_5[4]; - } ack; -}; - -/* Inline header in WQE inline, describing the length of inline data */ -union cmdq_wqe_inline_header { - struct { - u32 buf_len : 11; // [10:0] inline data len - u32 rsv : 21; // [31:11] - } bs; - u32 val; -}; - -/* Definition of buffer descriptor section in WQE */ -union cmdq_wqe_bds { - struct { - struct cmdq_sge bds_sge; - u32 rsvd[4]; /* Zwy is used to transfer the virtual address of the buffer. */ - } lcmd; /* Long command, non-inline, and SGE describe the buffer information. */ -}; - -/* Definition of CMDQ WQE */ -/* (long cmd, 64B) - * +----------------------------------------+ - * | ctrl section(8B) | - * +----------------------------------------+ - * | | - * | complete section(24B) | - * | | - * +----------------------------------------+ - * | | - * | buffer descriptor section(16B) | - * | | - * +----------------------------------------+ - * | driver section(16B) | - * +----------------------------------------+ - * - * - * (middle cmd, 128B) - * +----------------------------------------+ - * | ctrl section(8B) | - * +----------------------------------------+ - * | | - * | complete section(24B) | - * | | - * +----------------------------------------+ - * | | - * | buffer descriptor section(88B) | - * | | - * +----------------------------------------+ - * | driver section(8B) | - * +----------------------------------------+ - * - * - * (short cmd, 64B) - * +----------------------------------------+ - * | ctrl section(8B) | - * +----------------------------------------+ - * | | - * | complete section(24B) | - * | | - * +----------------------------------------+ - * | | - * | buffer descriptor section(24B) | - * | | - * +----------------------------------------+ - * | driver section(8B) | - * +----------------------------------------+ - */ -struct cmdq_wqe { - struct cmdq_wqe_ctrls ctrls; - struct cmdq_wqe_cs cs; - union cmdq_wqe_bds bds; -}; - -/* Definition of ctrls section in inline WQE */ -struct cmdq_wqe_ctrls_inline { - union { - struct { - u32 bdsl : 8; // [7:0] - u32 drvsl : 2; // [9:8] - u32 rsv : 4; // [13:10] - u32 wf : 1; // [14] - u32 cf : 1; // [15] - u32 tsl : 5; // [20:16] - u32 va : 1; // [21] - u32 df : 1; // [22] - u32 cr : 1; // [23] - u32 difsl : 3; // [26:24] - u32 csl : 2; // [28:27] - u32 ctrlsl : 2; // [30:29] - u32 obit : 1; // [31] - } bs; - u32 val; - } header; - u32 qsf; - u64 db; -}; - -/* Buffer descriptor section definition of WQE */ -union cmdq_wqe_bds_inline { - struct { - union cmdq_wqe_inline_header header; - u32 rsvd; - u8 data_inline[80]; - } mcmd; /* Middle command, inline mode */ - - struct { - union cmdq_wqe_inline_header header; - u32 rsvd; - u8 data_inline[16]; - } scmd; /* Short command, inline mode */ -}; - -struct cmdq_wqe_inline { - struct cmdq_wqe_ctrls_inline ctrls; - struct cmdq_wqe_cs cs; - union cmdq_wqe_bds_inline bds; -}; - -#endif diff --git a/drivers/net/ethernet/huawei/hinic3/comm_defs.h b/drivers/net/ethernet/huawei/hinic3/comm_defs.h deleted file mode 100644 index 91d298d93fb1b1828e9ebe0991e255ea82b2e3cd..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic3/comm_defs.h +++ /dev/null @@ -1,105 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved. - * File Name : comm_defs.h - * Version : Initial Draft - * Description : common definitions - * Function List : - * History : - * Modification: Created file - */ - -#ifndef COMM_DEFS_H -#define COMM_DEFS_H - -/* CMDQ MODULE_TYPE */ -enum hinic3_mod_type { - HINIC3_MOD_COMM = 0, /* HW communication module */ - HINIC3_MOD_L2NIC = 1, /* L2NIC module */ - HINIC3_MOD_ROCE = 2, - HINIC3_MOD_PLOG = 3, - HINIC3_MOD_TOE = 4, - HINIC3_MOD_FLR = 5, - HINIC3_MOD_RSVD1 = 6, - HINIC3_MOD_CFGM = 7, /* Configuration module */ - HINIC3_MOD_CQM = 8, - HINIC3_MOD_RSVD2 = 9, - COMM_MOD_FC = 10, - HINIC3_MOD_OVS = 11, - HINIC3_MOD_DSW = 12, - HINIC3_MOD_MIGRATE = 13, - HINIC3_MOD_HILINK = 14, - HINIC3_MOD_CRYPT = 15, /* secure crypto module */ - HINIC3_MOD_VIO = 16, - HINIC3_MOD_IMU = 17, - HINIC3_MOD_DFT = 18, /* DFT */ - HINIC3_MOD_HW_MAX = 19, /* hardware max module id */ - /* Software module id, for PF/VF and multi-host */ - HINIC3_MOD_SW_FUNC = 20, - HINIC3_MOD_MAX, -}; - -/* func reset的flag ,用于指示清理哪种资源 */ -enum func_reset_flag { - RES_TYPE_FLUSH_BIT = 0, - RES_TYPE_MQM, - RES_TYPE_SMF, - RES_TYPE_PF_BW_CFG, - - RES_TYPE_COMM = 10, - RES_TYPE_COMM_MGMT_CH, /* clear mbox and aeq, The RES_TYPE_COMM bit must be set */ - RES_TYPE_COMM_CMD_CH, /* clear cmdq and ceq, The RES_TYPE_COMM bit must be set */ - RES_TYPE_NIC, - RES_TYPE_OVS, - RES_TYPE_VBS, - RES_TYPE_ROCE, - RES_TYPE_FC, - RES_TYPE_TOE, - RES_TYPE_IPSEC, - RES_TYPE_MAX, -}; - -#define HINIC3_COMM_RES \ - ((1 << RES_TYPE_COMM) | (1 << RES_TYPE_COMM_CMD_CH) | \ - (1 << RES_TYPE_FLUSH_BIT) | (1 << RES_TYPE_MQM) | \ - (1 << RES_TYPE_SMF) | (1 << RES_TYPE_PF_BW_CFG)) - -#define HINIC3_NIC_RES (1 << RES_TYPE_NIC) -#define HINIC3_OVS_RES (1 << RES_TYPE_OVS) -#define HINIC3_VBS_RES (1 << RES_TYPE_VBS) -#define HINIC3_ROCE_RES (1 << RES_TYPE_ROCE) -#define HINIC3_FC_RES (1 << RES_TYPE_FC) -#define HINIC3_TOE_RES (1 << RES_TYPE_TOE) -#define HINIC3_IPSEC_RES (1 << RES_TYPE_IPSEC) - -/* MODE OVS、NIC、UNKNOWN */ -#define HINIC3_WORK_MODE_OVS 0 -#define HINIC3_WORK_MODE_UNKNOWN 1 -#define HINIC3_WORK_MODE_NIC 2 - -#define DEVICE_TYPE_L2NIC 0 -#define DEVICE_TYPE_NVME 1 -#define DEVICE_TYPE_VIRTIO_NET 2 -#define DEVICE_TYPE_VIRTIO_BLK 3 -#define DEVICE_TYPE_VIRTIO_VSOCK 4 -#define DEVICE_TYPE_VIRTIO_NET_TRANSITION 5 -#define DEVICE_TYPE_VIRTIO_BLK_TRANSITION 6 -#define DEVICE_TYPE_VIRTIO_SCSI_TRANSITION 7 -#define DEVICE_TYPE_VIRTIO_HPC 8 - -#define IS_STORAGE_DEVICE_TYPE(dev_type) \ - ((dev_type) == DEVICE_TYPE_VIRTIO_BLK || \ - (dev_type) == DEVICE_TYPE_VIRTIO_BLK_TRANSITION || \ - (dev_type) == DEVICE_TYPE_VIRTIO_SCSI_TRANSITION) - -/* Common header control information of the COMM message - * interaction command word between the driver and PF - */ -struct comm_info_head { - u8 status; - u8 version; - u8 rep_aeq_num; - u8 rsvd[5]; -}; - -#endif diff --git a/drivers/net/ethernet/huawei/hinic3/comm_msg_intf.h b/drivers/net/ethernet/huawei/hinic3/comm_msg_intf.h deleted file mode 100644 index ae998cfcd1891a73a0a81b078b2bad8c77a08a88..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic3/comm_msg_intf.h +++ /dev/null @@ -1,103 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved. - * File Name : comm_msg_intf.h - * Version : Initial Draft - * Created : 2021/6/28 - * Last Modified : - * Description : COMM Command interfaces between Driver and MPU - * Function List : - */ - -#ifndef COMM_MSG_INTF_H -#define COMM_MSG_INTF_H - -#include "comm_defs.h" -#include "mgmt_msg_base.h" -#include "mpu_cmd_base_defs.h" -#include "mpu_inband_cmd_defs.h" - -struct spu_cmd_freq_operation { - struct comm_info_head head; - - u8 op_code; /* 0: get 1: set 2: check */ - u8 rsvd[3]; - u32 freq; -}; - -struct spu_cmd_power_operation { - struct comm_info_head head; - - u8 op_code; /* 0: get 1: set 2: init */ - u8 slave_addr; - u8 cmd_id; - u8 size; - u32 value; -}; - -struct spu_cmd_tsensor_operation { - struct comm_info_head head; - - u8 op_code; - u8 rsvd[3]; - s16 fabric_tsensor_temp_avg; - s16 fabric_tsensor_temp; - s16 sys_tsensor_temp_avg; - s16 sys_tsensor_temp; -}; - -enum cfg_msix_operation { - CFG_MSIX_OPERATION_FREE = 0, - CFG_MSIX_OPERATION_ALLOC = 1, -}; - -struct comm_cmd_ffm_info { - struct mgmt_msg_head head; - - u8 node_id; - /* error level of the interrupt source */ - u8 err_level; - /* Classification by interrupt source properties */ - u16 err_type; - u32 err_csr_addr; - u32 err_csr_value; - u32 rsvd1; -}; - -struct hinic3_cmd_update_firmware { - struct mgmt_msg_head msg_head; - - struct { - u32 sl : 1; - u32 sf : 1; - u32 flag : 1; - u32 bit_signed : 1; - u32 reserved : 12; - u32 fragment_len : 16; - } ctl_info; - - struct { - u32 section_crc; - u32 section_type; - } section_info; - - u32 total_len; - u32 section_len; - u32 section_version; - u32 section_offset; - u32 data[384]; -}; - -struct hinic3_cmd_activate_firmware { - struct mgmt_msg_head msg_head; - u8 index; /* 0 ~ 7 */ - u8 data[7]; -}; - -struct hinic3_cmd_switch_config { - struct mgmt_msg_head msg_head; - u8 index; /* 0 ~ 7 */ - u8 data[7]; -}; - -#endif diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.c index 76e128e9d3ad1fd4032113ca03ccbb2d3cbf11eb..1562c59387c716006dea32db51b23173816a69e4 100644 --- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.c +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.c @@ -28,6 +28,8 @@ #include "cqm_npu_cmd.h" #include "cqm_npu_cmd_defs.h" +#include "vram_common.h" + static void cqm_bat_fill_cla_common_gpa(struct tag_cqm_handle *cqm_handle, struct tag_cqm_cla_table *cla_table, struct tag_cqm_bat_entry_standerd *bat_entry_standerd) @@ -367,6 +369,14 @@ static s32 cqm_bat_update_cmd(struct tag_cqm_handle *cqm_handle, struct tag_cqm_ struct tag_cqm_cmdq_bat_update *bat_update_cmd = NULL; s32 ret = CQM_FAIL; + int is_in_kexec; + + is_in_kexec = vram_get_kexec_flag(); + if (is_in_kexec != 0) { + cqm_info(handle->dev_hdl, "Skip updating the cqm_bat to chip during kexec!\n"); + return CQM_SUCCESS; + } + bat_update_cmd = (struct tag_cqm_cmdq_bat_update *)(buf_in->buf); bat_update_cmd->offset = 0; @@ -644,6 +654,7 @@ void cqm_bat_uninit(struct tag_cqm_handle *cqm_handle) static s32 cqm_cla_fill_buf(struct tag_cqm_handle *cqm_handle, struct tag_cqm_buf *cla_base_buf, struct tag_cqm_buf *cla_sub_buf, u8 gpa_check_enable) { + struct hinic3_hwdev *handle = cqm_handle->ex_handle; struct hinic3_func_attr *func_attr = NULL; dma_addr_t *base = NULL; u64 fake_en = 0; @@ -655,13 +666,17 @@ static s32 cqm_cla_fill_buf(struct tag_cqm_handle *cqm_handle, struct tag_cqm_bu /* Apply for space for base_buf */ if (!cla_base_buf->buf_list) { - if (cqm_buf_alloc(cqm_handle, cla_base_buf, false) == CQM_FAIL) + if (cqm_buf_alloc(cqm_handle, cla_base_buf, false) == + CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(cla_base_buf)); return CQM_FAIL; + } } /* Apply for space for sub_buf */ if (!cla_sub_buf->buf_list) { if (cqm_buf_alloc(cqm_handle, cla_sub_buf, false) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(cla_sub_buf)); cqm_buf_free(cla_base_buf, cqm_handle); return CQM_FAIL; } @@ -768,8 +783,10 @@ static s32 cqm_cla_xyz_lvl1(struct tag_cqm_handle *cqm_handle, /* Applying for CLA_Z_BUF Space */ cla_z_buf = &cla_table->cla_z_buf; cla_z_buf->buf_size = trunk_size; - cla_z_buf->buf_number = (ALIGN(cla_table->max_buffer_size, trunk_size)) / trunk_size; - cla_z_buf->page_number = cla_z_buf->buf_number << cla_table->trunk_order; + cla_z_buf->buf_number = + (ALIGN(cla_table->max_buffer_size, trunk_size)) / trunk_size; + cla_z_buf->page_number = cla_z_buf->buf_number << + cla_table->trunk_order; /* All buffer space must be statically allocated. */ if (cla_table->alloc_static) { @@ -785,6 +802,7 @@ static s32 cqm_cla_xyz_lvl1(struct tag_cqm_handle *cqm_handle, cla_z_buf->buf_list = vmalloc(cla_z_buf->buf_number * sizeof(struct tag_cqm_buf_list)); if (!cla_z_buf->buf_list) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_1_z_buf)); cqm_buf_free(cla_y_buf, cqm_handle); return CQM_FAIL; } @@ -840,6 +858,7 @@ static s32 cqm_cla_xyz_lvl2_xyz_apply(struct tag_cqm_handle *cqm_handle, cla_x_buf->buf_size = trunk_size; cla_x_buf->buf_number = 1; cla_x_buf->page_number = cla_x_buf->buf_number << cla_table->trunk_order; + cla_x_buf->buf_info.use_vram = get_use_vram_flag(); ret = cqm_buf_alloc(cqm_handle, cla_x_buf, false); if (ret != CQM_SUCCESS) return CQM_FAIL; @@ -869,14 +888,20 @@ static s32 cqm_cla_xyz_vram_name_init(struct tag_cqm_cla_table *cla_table, cla_x_buf = &cla_table->cla_x_buf; cla_z_buf = &cla_table->cla_z_buf; cla_y_buf = &cla_table->cla_y_buf; + cla_x_buf->buf_info.use_vram = get_use_vram_flag(); snprintf(cla_x_buf->buf_info.buf_vram_name, - VRAM_NAME_MAX_LEN - 1, "%s%s", cla_table->name, VRAM_CQM_CLA_COORD_X); + VRAM_NAME_APPLY_LEN, "%s%s", cla_table->name, + VRAM_CQM_CLA_COORD_X); + cla_y_buf->buf_info.use_vram = get_use_vram_flag(); snprintf(cla_y_buf->buf_info.buf_vram_name, - VRAM_NAME_MAX_LEN - 1, "%s%s", cla_table->name, VRAM_CQM_CLA_COORD_Y); + VRAM_NAME_APPLY_LEN, "%s%s", cla_table->name, + VRAM_CQM_CLA_COORD_Y); + cla_z_buf->buf_info.use_vram = get_use_vram_flag(); snprintf(cla_z_buf->buf_info.buf_vram_name, - VRAM_NAME_MAX_LEN - 1, "%s%s", cla_table->name, VRAM_CQM_CLA_COORD_Z); + VRAM_NAME_APPLY_LEN, "%s%s", + cla_table->name, VRAM_CQM_CLA_COORD_Z); return CQM_SUCCESS; } @@ -925,6 +950,7 @@ static s32 cqm_cla_xyz_lvl2(struct tag_cqm_handle *cqm_handle, cla_z_buf->buf_list = vmalloc(cla_z_buf->buf_number * sizeof(struct tag_cqm_buf_list)); if (!cla_z_buf->buf_list) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_z_buf)); cqm_buf_free(cla_x_buf, cqm_handle); return CQM_FAIL; } @@ -934,6 +960,7 @@ static s32 cqm_cla_xyz_lvl2(struct tag_cqm_handle *cqm_handle, cla_y_buf->buf_list = vmalloc(cla_y_buf->buf_number * sizeof(struct tag_cqm_buf_list)); if (!cla_y_buf->buf_list) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_y_buf)); cqm_buf_free(cla_z_buf, cqm_handle); cqm_buf_free(cla_x_buf, cqm_handle); return CQM_FAIL; @@ -1188,7 +1215,7 @@ static void cqm_cla_init_entry_extern(struct tag_cqm_handle *cqm_handle, * exceed 128 x 4 KB. Otherwise, clearing the timer buffer of * the function is complex. */ - cla_table->trunk_order = CQM_4K_PAGE_ORDER; + cla_table->trunk_order = CQM_8K_PAGE_ORDER; cla_table->max_buffer_size = capability->timer_number * capability->timer_basic_size; cla_table->obj_size = capability->timer_basic_size; @@ -1237,7 +1264,7 @@ static s32 cqm_cla_init_entry_condition(struct tag_cqm_handle *cqm_handle, u32 e memcpy(cla_table_timer, cla_table, sizeof(struct tag_cqm_cla_table)); snprintf(cla_table_timer->name, - VRAM_NAME_MAX_LEN - 1, "%s%s%01u", cla_table->name, + VRAM_NAME_APPLY_LEN, "%s%s%01u", cla_table->name, VRAM_CQM_CLA_SMF_BASE, i); if (cqm_cla_xyz(cqm_handle, cla_table_timer) == @@ -1268,7 +1295,7 @@ static s32 cqm_cla_init_entry(struct tag_cqm_handle *cqm_handle, for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { cla_table = &bat_table->entry[i]; cla_table->type = bat_table->bat_entry_type[i]; - snprintf(cla_table->name, VRAM_NAME_MAX_LEN - 1, + snprintf(cla_table->name, VRAM_NAME_APPLY_LEN, "%s%s%s%02u", cqm_handle->name, VRAM_CQM_CLA_BASE, VRAM_CQM_CLA_TYPE_BASE, cla_table->type); @@ -1485,11 +1512,11 @@ static s32 cqm_cla_update(struct tag_cqm_handle *cqm_handle, spu_en = 0; pa = ((buf_node_parent->pa + (child_index * sizeof(dma_addr_t))) | - (u32)spu_en); + spu_en); cmd.gpa_h = CQM_ADDR_HI(pa); cmd.gpa_l = CQM_ADDR_LW(pa); - pa = (buf_node_child->pa | (u32)spu_en); + pa = (buf_node_child->pa | spu_en); cmd.value_h = CQM_ADDR_HI(pa); cmd.value_l = CQM_ADDR_LW(pa); @@ -1980,7 +2007,7 @@ struct tag_cqm_cla_table *cqm_cla_table_get(struct tag_cqm_bat_table *bat_table, for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { cla_table = &bat_table->entry[i]; - if (cla_table && entry_type == cla_table->type) + if ((cla_table != NULL) && (entry_type == cla_table->type)) return cla_table; } diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.h b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.h index dd0766e6d75579a3e525219c939bf205951c8af3..a51c1dc602b221a4c6bd3bc6ed867a5f12b45082 100644 --- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.h +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.h @@ -9,6 +9,7 @@ #include "cqm_bitmap_table.h" #include "cqm_object.h" +#include "vram_common.h" /* When the connection check is enabled, the maximum number of connections * supported by the chip is 1M - 63, which cannot reach 1M @@ -19,6 +20,7 @@ #define CLA_TABLE_PAGE_ORDER 0 #define CQM_4K_PAGE_ORDER 0 #define CQM_4K_PAGE_SIZE 4096 +#define CQM_8K_PAGE_ORDER 1 #define CQM_BAT_ENTRY_MAX 16 #define CQM_BAT_ENTRY_SIZE 16 diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.c index f4844d56724243e1afc88888c561223bca09bc0e..86b268cb55c7cf9d63b9dd8ad92f41868bb697d1 100644 --- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.c +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.c @@ -254,8 +254,10 @@ s32 cqm_buf_alloc_direct(struct tag_cqm_handle *cqm_handle, struct tag_cqm_buf * } pages = vmalloc(sizeof(struct page *) * buf->page_number); - if (!pages) + if (!pages) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(pages)); return CQM_FAIL; + } for (i = 0; i < buf->buf_number; i++) { for (j = 0; j < ((u32)1 << order); j++) @@ -273,6 +275,11 @@ s32 cqm_buf_alloc_direct(struct tag_cqm_handle *cqm_handle, struct tag_cqm_buf * return CQM_SUCCESS; } +static bool check_use_vram(struct hinic3_hwdev *handle, struct tag_cqm_buf *buf) +{ + return buf->buf_info.use_vram ? true : false; +} + static bool check_use_non_vram(struct hinic3_hwdev *handle, struct tag_cqm_buf *buf) { return buf->buf_info.use_vram ? false : true; @@ -294,11 +301,48 @@ static bool check_for_nouse_node_alloc(struct hinic3_hwdev *handle, struct tag_c return false; } +static s32 cqm_buf_vram_kalloc(struct hinic3_hwdev *handle, struct tag_cqm_buf *buf) +{ + void *vaddr = NULL; + int i; + + vaddr = hi_vram_kalloc(buf->buf_info.buf_vram_name, (u64)buf->buf_size * buf->buf_number); + if (!vaddr) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(buf_page)); + return CQM_FAIL; + } + + for (i = 0; i < (s32)buf->buf_number; i++) + buf->buf_list[i].va = (void *)((char *)vaddr + i * (u64)buf->buf_size); + + return CQM_SUCCESS; +} + +static void cqm_buf_vram_free(struct tag_cqm_buf *buf) +{ + s32 i; + + if (buf->buf_list == NULL) { + return; + } + + if (buf->buf_list[0].va) + hi_vram_kfree(buf->buf_list[0].va, buf->buf_info.buf_vram_name, + (u64)buf->buf_size * buf->buf_number); + + for (i = 0; i < (s32)buf->buf_number; i++) + buf->buf_list[i].va = NULL; +} + static void cqm_buf_free_page_common(struct tag_cqm_buf *buf) { u32 order; s32 i; + if (buf->buf_list == NULL) { + return; + } + order = (u32)get_order(buf->buf_size); for (i = 0; i < (s32)buf->buf_number; i++) { @@ -320,8 +364,10 @@ static s32 cqm_buf_use_node_alloc_page(struct hinic3_hwdev *handle, struct tag_c node = dev_to_node(handle->dev_hdl); for (i = 0; i < (s32)buf->buf_number; i++) { newpage = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, order); - if (!newpage) + if (!newpage) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(buf_page)); break; + } va = (void *)page_address(newpage); /* Initialize the page after the page is applied for. * If hash entries are involved, the initialization @@ -349,8 +395,10 @@ static s32 cqm_buf_unused_node_alloc_page(struct hinic3_hwdev *handle, struct ta for (i = 0; i < (s32)buf->buf_number; i++) { va = (void *)ossl_get_free_pages(GFP_KERNEL | __GFP_ZERO, order); - if (!va) + if (!va) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(buf_page)); break; + } /* Initialize the page after the page is applied for. * If hash entries are involved, the initialization * value must be 0. @@ -367,21 +415,21 @@ static s32 cqm_buf_unused_node_alloc_page(struct hinic3_hwdev *handle, struct ta return CQM_SUCCESS; } -#define MALLOC_FUNCS_COUNT 2 -#define FREE_FUNCS_COUNT 1 -static const struct malloc_memory g_malloc_funcs[MALLOC_FUNCS_COUNT] = { +static const struct malloc_memory g_malloc_funcs[] = { + {check_use_vram, cqm_buf_vram_kalloc}, {check_for_use_node_alloc, cqm_buf_use_node_alloc_page}, {check_for_nouse_node_alloc, cqm_buf_unused_node_alloc_page} }; -static const struct free_memory g_free_funcs[FREE_FUNCS_COUNT] = { +static const struct free_memory g_free_funcs[] = { + {check_use_vram, cqm_buf_vram_free}, {check_use_non_vram, cqm_buf_free_page_common} }; static s32 cqm_buf_alloc_page(struct tag_cqm_handle *cqm_handle, struct tag_cqm_buf *buf) { struct hinic3_hwdev *handle = cqm_handle->ex_handle; - u32 malloc_funcs_num = MALLOC_FUNCS_COUNT; + u32 malloc_funcs_num = ARRAY_SIZE(g_malloc_funcs); u32 i; for (i = 0; i < malloc_funcs_num; i++) { @@ -398,7 +446,7 @@ static s32 cqm_buf_alloc_page(struct tag_cqm_handle *cqm_handle, struct tag_cqm_ static void cqm_buf_free_page(struct tag_cqm_buf *buf) { - u32 free_funcs_num = FREE_FUNCS_COUNT; + u32 free_funcs_num = ARRAY_SIZE(g_free_funcs); u32 i; for (i = 0; i < free_funcs_num; i++) { @@ -447,8 +495,11 @@ static s32 cqm_buf_get_secure_mem_pages(struct tag_cqm_handle *cqm_handle, struc cqm_get_secure_mem_pages(handle, (u32)get_order(buf->buf_size), &buf->buf_list[i].pa); - if (!buf->buf_list[i].va) + if (!buf->buf_list[i].va) { + cqm_err(handle->dev_hdl, + CQM_ALLOC_FAIL(cqm_get_secure_mem_pages)); break; + } } if (i != buf->buf_number) { @@ -780,7 +831,10 @@ static s32 cqm_single_bitmap_init(struct tag_cqm_bitmap *bitmap) */ bit_number = (ALIGN(bitmap->max_num, CQM_NUM_BIT_BYTE) >> CQM_BYTE_BIT_SHIFT); - bitmap->table = vmalloc(bit_number); + if (bitmap->bitmap_info.use_vram != 0) + bitmap->table = hi_vram_kalloc(bitmap->bitmap_info.buf_vram_name, bit_number); + else + bitmap->table = vmalloc(bit_number); if (!bitmap->table) return CQM_FAIL; memset(bitmap->table, 0, bit_number); @@ -859,7 +913,7 @@ s32 cqm_bitmap_init(struct tag_cqm_handle *cqm_handle) } bitmap = &cla_table->bitmap; - snprintf(bitmap->bitmap_info.buf_vram_name, VRAM_NAME_MAX_LEN - 1, + snprintf(bitmap->bitmap_info.buf_vram_name, VRAM_NAME_APPLY_LEN, "%s%s%02d", cla_table->name, VRAM_CQM_BITMAP_BASE, cla_table->type); @@ -869,6 +923,7 @@ s32 cqm_bitmap_init(struct tag_cqm_handle *cqm_handle) bitmap->reserved_top = capability->qpc_reserved; bitmap->reserved_back = capability->qpc_reserved_back; bitmap->last = capability->qpc_reserved; + bitmap->bitmap_info.use_vram = get_use_vram_flag(); cqm_info(handle->dev_hdl, "Bitmap init: cla_table_type=%u, max_num=0x%x\n", cla_table->type, bitmap->max_num); @@ -948,7 +1003,12 @@ void cqm_bitmap_uninit(struct tag_cqm_handle *cqm_handle) if (cla_table->type != CQM_BAT_ENTRY_T_INVALID && bitmap->table) { spin_lock_deinit(&bitmap->lock); - vfree(bitmap->table); + if (bitmap->bitmap_info.use_vram != 0) + hi_vram_kfree(bitmap->table, bitmap->bitmap_info.buf_vram_name, + ALIGN(bitmap->max_num, CQM_NUM_BIT_BYTE) >> + CQM_BYTE_BIT_SHIFT); + else + vfree(bitmap->table); bitmap->table = NULL; } } diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.h b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.h index 7febf767fc67e6844a7bcca64b7bc1cd930155ef..06b8661d7603453a6e1105f1f36f9815d4e66b48 100644 --- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.h +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.h @@ -9,6 +9,7 @@ #include #include "cqm_object.h" +#include "vram_common.h" struct tag_cqm_bitmap { ulong *table; diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_cmd.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_cmd.c index cf7df37f28afa06d677071e4877e753ec793fea4..3d38edc3b0995cc2a4c7ee6610af008c2bc05fff 100644 --- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_cmd.c +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_cmd.c @@ -15,7 +15,6 @@ #include "cqm_bitmap_table.h" #include "cqm_bat_cla.h" #include "cqm_main.h" -#include "cqm_cmd.h" /** * cqm_cmd_alloc - Apply for a cmd buffer. The buffer size is fixed to 2 KB, diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_db.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_db.c index 9a13ae072970dcf865699debba95f723b1cc0308..db65c8b577f76c90262c58b153afd514a692e751 100644 --- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_db.c +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_db.c @@ -196,6 +196,12 @@ void *cqm_get_db_addr(void *ex_handle, u32 service_type) pr_err("[CQM]%s: ex_handle is null\n", __func__); return NULL; } + + if (service_type >= CQM_SERVICE_T_MAX) { + pr_err("%s service_type = %d state is error\n", __func__, + service_type); + return NULL; + } handle = (struct hinic3_hwdev *)ex_handle; cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); service = &cqm_handle->service[service_type]; @@ -218,6 +224,17 @@ s32 cqm_ring_hardware_db(void *ex_handle, u32 service_type, u8 db_count, u64 db) struct tag_cqm_service *service = NULL; struct hinic3_hwdev *handle = NULL; + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return CQM_FAIL; + } + + if (service_type >= CQM_SERVICE_T_MAX) { + pr_err("%s service_type = %d state is error\n", __func__, + service_type); + return CQM_FAIL; + } + handle = (struct hinic3_hwdev *)ex_handle; cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); service = &cqm_handle->service[service_type]; @@ -278,6 +295,17 @@ s32 cqm_ring_direct_wqe_db(void *ex_handle, u32 service_type, u8 db_count, u64 *tmp = (u64 *)direct_wqe; int i; + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return CQM_FAIL; + } + + if (service_type >= CQM_SERVICE_T_MAX) { + pr_err("%s service_type = %d state is error\n", __func__, + service_type); + return CQM_FAIL; + } + handle = (struct hinic3_hwdev *)ex_handle; cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); service = &cqm_handle->service[service_type]; diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.c index 19a8b56e0fd5df9fd11074ceae7ab9eefffb6a25..0e8a57988b68e8fd518615f56feae7aace75f03a 100644 --- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.c +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.c @@ -23,6 +23,8 @@ #include "cqm_memsec.h" #include "cqm_main.h" +#include "vram_common.h" + static unsigned char roce_qpc_rsv_mode = CQM_QPC_ROCE_NORMAL; module_param(roce_qpc_rsv_mode, byte, 0644); MODULE_PARM_DESC(roce_qpc_rsv_mode, @@ -98,10 +100,17 @@ static s32 cqm_set_timer_enable(void *ex_handle) { struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; struct tag_cqm_handle *cqm_handle = NULL; + int is_in_kexec; if (!ex_handle) return CQM_FAIL; + is_in_kexec = vram_get_kexec_flag(); + if (is_in_kexec != 0) { + cqm_info(handle->dev_hdl, "Skip starting cqm timer during kexec\n"); + return CQM_SUCCESS; + } + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); if (cqm_handle->func_capability.fake_func_type == CQM_FAKE_FUNC_PARENT && cqm_set_fake_vf_timer(cqm_handle) != CQM_SUCCESS) @@ -456,7 +465,7 @@ static void cqm_service_capability_init_roce(struct tag_cqm_handle *cqm_handle, cqm_info(handle->dev_hdl, "Cap init: roce qpc 0x%x, scqc 0x%x, srqc 0x%x, drc_qp 0x%x\n", roce_own_cap->max_qps, roce_own_cap->max_cqs, roce_own_cap->max_srqs, roce_own_cap->max_drc_qps); - cqm_info(handle->dev_hdl, "Cap init: type 0x%x, scenes:0x%x, qpc_rsv:0x%x, srv_bmp:0x%x\n", + cqm_info(handle->dev_hdl, "Cap init: board_type 0x%x, scenes_id:0x%x, qpc_rsv_mode:0x%x, srv_bmp:0x%x\n", board_info->board_type, board_info->scenes_id, roce_qpc_rsv_mode, board_info->service_en_bitmap); @@ -610,7 +619,7 @@ static void cqm_service_capability_init_ipsec(struct tag_cqm_handle *cqm_handle, func_cap->scqc_basic_size); func_cap->scqc_alloc_static = true; cqm_info(handle->dev_hdl, "Cap init: ipsec is valid\n"); - cqm_info(handle->dev_hdl, "Cap init: ipsec 0x%x, childc %d, scqc 0x%x, scqc_bsize %d\n", + cqm_info(handle->dev_hdl, "Cap init: ipsec childc_num 0x%x, childc_bsize %d, scqc_num 0x%x, scqc_bsize %d\n", ipsec_srvcap->max_sactxs, func_cap->childc_basic_size, ipsec_srvcap->max_cqs, func_cap->scqc_basic_size); } @@ -812,7 +821,7 @@ static int cqm_capability_init_bloomfilter(struct hinic3_hwdev *handle) func_cap->bloomfilter_addr = service_capability->bfilter_start_addr; if (func_cap->bloomfilter_length != 0 && !cqm_check_align(func_cap->bloomfilter_length)) { - cqm_err(handle->dev_hdl, "Cap bloomfilter len %u is not the power of 2\n", + cqm_err(handle->dev_hdl, "Cap init: bloomfilter_length %u is not the power of 2\n", func_cap->bloomfilter_length); return CQM_FAIL; @@ -883,7 +892,7 @@ static int cqm_capability_init_timer(struct hinic3_hwdev *handle) func_cap->timer_vf_num = service_capability->timer_vf_num; func_cap->timer_vf_id_start = service_capability->timer_vf_id_start; cqm_info(handle->dev_hdl, - "timer init: pf_num 0x%x, pf_start 0x%x, vf_num 0x%x, vf_start 0x%x\n", + "host timer init: timer_pf_num 0x%x, timer_pf_id_start 0x%x, timer_vf_num 0x%x, timer_vf_id_start 0x%x\n", func_cap->timer_pf_num, func_cap->timer_pf_id_start, func_cap->timer_vf_num, func_cap->timer_vf_id_start); @@ -891,7 +900,7 @@ static int cqm_capability_init_timer(struct hinic3_hwdev *handle) if (IS_SLAVE_HOST(handle)) { total_timer_num *= CQM_TIMER_NUM_MULTI; cqm_info(handle->dev_hdl, - "timer init: need double tw resources, total_timer_num=0x%x\n", + "host timer init: need double tw resources, total_timer_num=0x%x\n", total_timer_num); } } @@ -1059,8 +1068,10 @@ static s32 cqm_fake_init(struct tag_cqm_handle *cqm_handle) for (i = 0; i < (u32)child_func_number; i++) { fake_cqm_handle = kmalloc(sizeof(*fake_cqm_handle), GFP_KERNEL | __GFP_ZERO); - if (!fake_cqm_handle) + if (!fake_cqm_handle) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(fake_cqm_handle)); goto err; + } /* Copy the attributes of the parent CQM handle to the child CQM * handle and modify the values of function. @@ -1129,7 +1140,7 @@ static s32 cqm_fake_mem_init(struct tag_cqm_handle *cqm_handle) for (i = 0; i < (u32)child_func_number; i++) { fake_cqm_handle = cqm_handle->fake_cqm_handle[i]; - snprintf(fake_cqm_handle->name, VRAM_NAME_MAX_LEN - 1, + snprintf(fake_cqm_handle->name, VRAM_NAME_APPLY_LEN, "%s%s%02u", cqm_handle->name, VRAM_CQM_FAKE_MEM_BASE, i); if (cqm_bat_init(fake_cqm_handle) != CQM_SUCCESS) { @@ -1174,7 +1185,7 @@ s32 cqm_mem_init(void *ex_handle) struct tag_cqm_handle *cqm_handle = NULL; cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); - snprintf(cqm_handle->name, VRAM_NAME_MAX_LEN - 1, + snprintf(cqm_handle->name, VRAM_NAME_APPLY_LEN, "%s%02u", VRAM_CQM_GLB_FUNC_BASE, hinic3_global_func_id(handle)); if (cqm_fake_init(cqm_handle) != CQM_SUCCESS) { diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.h b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.h index 4f87bd1d7164b16eda8c7a2a8a7de10386def278..8d1e4816965d9601c0ca7b4655c81ac98f6f6ba4 100644 --- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.h +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.h @@ -362,6 +362,7 @@ s32 cqm_fake_vf_num_set(void *ex_handle, u16 fake_vf_num_cfg); #define CQM_LOG_ID 0 #define CQM_PTR_NULL(x) "%s: " #x " is null\n", __func__ +#define CQM_ALLOC_FAIL(x) "%s: " #x " alloc fail\n", __func__ #define CQM_MAP_FAIL(x) "%s: " #x " map fail\n", __func__ #define CQM_FUNCTION_FAIL(x) "%s: " #x " return failure\n", __func__ #define CQM_WRONG_VALUE(x) "%s: " #x " %u is wrong\n", __func__, (u32)(x) diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_memsec.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_memsec.c index 4888b0a14a2d74c4397b199cd4bb633197ffe6da..d3def8191b92d2f420e8e278b90246bb9bd13d2e 100644 --- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_memsec.c +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_memsec.c @@ -21,6 +21,7 @@ #include "cqm_bloomfilter.h" #include "cqm_db.h" #include "cqm_main.h" +#include "vram_common.h" #include "vmsec_mpu_common.h" #include "cqm_memsec.h" @@ -34,6 +35,8 @@ #define STD_INPUT_ONE_PARA 1 #define STD_INPUT_TWO_PARA 2 #define MR_KEY_2_INDEX_SHIFT 8 +#define IS_ADDR_IN_MEMSEC(va, len, start, end) \ + ((va) >= (start) && (va) + (len) < (end)) static int memsec_proc_show(struct seq_file *seq, void *offset); static int memsec_proc_open(struct inode *inode, struct file *file); @@ -46,15 +49,24 @@ static int hinic3_secure_mem_proc_node_add(void *hwdev); static ssize_t memsec_proc_write(struct file *file, const char __user *data, size_t len, loff_t *pff); -static struct proc_dir_entry *g_hinic3_memsec_proc_ent; /* proc dir */ +static struct proc_dir_entry *g_hinic3_memsec_proc_ent = NULL; /* proc dir */ static atomic_t g_memsec_proc_refcnt = ATOMIC_INIT(0); +#if KERNEL_VERSION(5, 10, 0) > LINUX_VERSION_CODE +static const struct file_operations memsec_proc_fops = { + .open = memsec_proc_open, + .read = seq_read, + .write = memsec_proc_write, + .release = memsec_proc_release, +}; +#else static const struct proc_ops memsec_proc_fops = { .proc_open = memsec_proc_open, .proc_read = seq_read, .proc_write = memsec_proc_write, .proc_release = memsec_proc_release, }; +#endif bool cqm_need_secure_mem(void *hwdev) { @@ -63,6 +75,9 @@ bool cqm_need_secure_mem(void *hwdev) struct hinic3_hwdev *handle = (struct hinic3_hwdev *)hwdev; cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (cqm_handle == NULL) { + return false; + } info = &cqm_handle->secure_mem; return ((info->need_secure_mem) && hinic3_is_guest_vmsec_enable(hwdev)); } @@ -275,9 +290,10 @@ static int test_query_context(struct hinic3_hwdev *handle, char *data, size_t le case CQM_OBJECT_SERVICE_CTX: case CQM_OBJECT_MPT: qpc_mpt = (struct tag_cqm_qpc_mpt *)cqm_obj; - if (qpc_mpt->vaddr >= (u8 *)info->va_base && - (qpc_mpt->vaddr + cqm_obj->object_size) < (u8 *)info->va_end) - in_secmem = true; + in_secmem = IS_ADDR_IN_MEMSEC(qpc_mpt->vaddr, + cqm_obj->object_size, + (u8 *)info->va_base, + (u8 *)info->va_end); cqm_info(handle->dev_hdl, "[memsec_dfx]Query %s:0x%x, va=%p %sin secure mem\n", query_type == CQM_OBJECT_MPT ? "MPT, mpt_index" : "QPC, qpn", @@ -286,9 +302,10 @@ static int test_query_context(struct hinic3_hwdev *handle, char *data, size_t le case CQM_OBJECT_RDMA_SRQ: case CQM_OBJECT_RDMA_SCQ: cqm_queue = (struct tag_cqm_queue *)cqm_obj; - if (cqm_queue->q_ctx_vaddr >= (u8 *)info->va_base && - (cqm_queue->q_ctx_vaddr + cqm_obj->object_size) < (u8 *)info->va_end) - in_secmem = true; + in_secmem = IS_ADDR_IN_MEMSEC(cqm_queue->q_ctx_vaddr, + cqm_obj->object_size, + (u8 *)info->va_base, + (u8 *)info->va_end); cqm_info(handle->dev_hdl, "[memsec_dfx]Query %s:0x%x, va=%p %sin secure mem\n", query_type == CQM_OBJECT_RDMA_SRQ ? "SRQC, srqn " : "SCQC, scqn", @@ -378,7 +395,7 @@ static int hinic3_secure_mem_proc_node_remove(void *hwdev) atomic_dec(&g_memsec_proc_refcnt); - snprintf(pci_name, PCI_PROC_NAME_LEN - 1, + snprintf(pci_name, PCI_PROC_NAME_LEN, "%02x:%02x:%x", pdev->bus->number, pdev->slot->number, PCI_FUNC(pdev->devfn)); @@ -401,7 +418,7 @@ static int hinic3_secure_mem_proc_node_add(void *hwdev) atomic_inc(&g_memsec_proc_refcnt); - snprintf(pci_name, PCI_PROC_NAME_LEN - 1, + snprintf(pci_name, PCI_PROC_NAME_LEN, "%02x:%02x:%x", pdev->bus->number, pdev->slot->number, PCI_FUNC(pdev->devfn)); /* 0400 Read by owner */ @@ -653,7 +670,7 @@ void cqm_free_secure_mem_pages(struct hinic3_hwdev *handle, void *va, u32 order) return; } - if (va < info->va_base || va > (info->va_end - PAGE_SIZE) || + if (va < info->va_base || (va > (info->va_end - PAGE_SIZE)) || !PAGE_ALIGNED((va - info->va_base))) cqm_err(handle->dev_hdl, "%s va wrong value\n", __func__); diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.c index 7d1bd357a5c2c84af1fa9730014b793034602054..86359c0ffda34890f21e738aeb1bf511cd9f6d12 100644 --- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.c +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.c @@ -180,8 +180,10 @@ struct tag_cqm_queue *cqm_object_recv_queue_create(void *ex_handle, u32 service_ rq_qinfo->common.q_header_vaddr = cqm_kmalloc_align(sizeof(struct tag_cqm_queue_header), GFP_KERNEL | __GFP_ZERO, CQM_QHEAD_ALIGN_ORDER); - if (!rq_qinfo->common.q_header_vaddr) + if (!rq_qinfo->common.q_header_vaddr) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(q_header_vaddr)); goto err1; + } rq_qinfo->common.q_header_paddr = pci_map_single(cqm_handle->dev, rq_qinfo->common.q_header_vaddr, @@ -484,7 +486,8 @@ static bool cqm_object_nonrdma_queue_param_check(struct hinic3_hwdev *handle, u3 struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); /* exception of service registrion check */ - if (!cqm_handle->service[service_type].has_register) { + if (service_type >= CQM_SERVICE_T_MAX || + !cqm_handle->service[service_type].has_register) { cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); return false; } @@ -1487,4 +1490,4 @@ void cqm_srq_used_rq_container_delete(struct tag_cqm_object *object, u8 *contain } kfree((void *)addr); } -EXPORT_SYMBOL(cqm_srq_used_rq_container_delete); +EXPORT_SYMBOL(cqm_srq_used_rq_container_delete); \ No newline at end of file diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.c index 92c19c42ac6a471ab9555a35ebb044c6648fad92..1007b446d11d91b3f22b26e36d39a0682c66bb8f 100644 --- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.c +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.c @@ -127,8 +127,10 @@ s32 cqm_container_create(struct tag_cqm_object *object, u8 **container_addr, boo * of the Container. */ new_container = kmalloc(qinfo->container_size, GFP_ATOMIC | __GFP_ZERO); - if (!new_container) + if (!new_container) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(new_container)); return CQM_FAIL; + } /* Container PCI mapping */ new_container_pa = pci_map_single(cqm_handle->dev, new_container, @@ -299,8 +301,10 @@ s32 cqm_share_recv_queue_create(struct tag_cqm_object *object) /* apply for buffer for SRQC */ common->q_ctx_vaddr = kmalloc(qinfo->q_ctx_size, GFP_KERNEL | __GFP_ZERO); - if (!common->q_ctx_vaddr) + if (!common->q_ctx_vaddr) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(q_ctx_vaddr)); goto err2; + } return CQM_SUCCESS; err2: @@ -808,8 +812,10 @@ static s32 cqm_nonrdma_queue_ctx_create(struct tag_cqm_object *object) common->q_ctx_vaddr = cqm_kmalloc_align(qinfo->q_ctx_size, GFP_KERNEL | __GFP_ZERO, (u16)shift); - if (!common->q_ctx_vaddr) + if (!common->q_ctx_vaddr) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(q_ctx_vaddr)); return CQM_FAIL; + } common->q_ctx_paddr = pci_map_single(cqm_handle->dev, common->q_ctx_vaddr, qinfo->q_ctx_size, PCI_DMA_BIDIRECTIONAL); diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_comm_cmd.h b/drivers/net/ethernet/huawei/hinic3/hinic3_comm_cmd.h deleted file mode 100644 index ad732c337520172c92e2e65be623320b2938dbda..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_comm_cmd.h +++ /dev/null @@ -1,185 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) Huawei Technologies Co., Ltd. 2019-2022. All rights reserved. - * File Name : hinic3_comm_cmd.h - * Version : Initial Draft - * Created : 2019/4/25 - * Last Modified : - * Description : COMM Commands between Driver and MPU - * Function List : - */ - -#ifndef HINIC3_COMMON_CMD_H -#define HINIC3_COMMON_CMD_H - -/* COMM Commands between Driver to MPU */ -enum hinic3_mgmt_cmd { - /* flr及资源清理相关命令 */ - COMM_MGMT_CMD_FUNC_RESET = 0, - COMM_MGMT_CMD_FEATURE_NEGO, - COMM_MGMT_CMD_FLUSH_DOORBELL, - COMM_MGMT_CMD_START_FLUSH, - COMM_MGMT_CMD_SET_FUNC_FLR, - COMM_MGMT_CMD_GET_GLOBAL_ATTR, - COMM_MGMT_CMD_SET_PPF_FLR_TYPE, - COMM_MGMT_CMD_SET_FUNC_SVC_USED_STATE, - - /* 分配msi-x中断资源 */ - COMM_MGMT_CMD_CFG_MSIX_NUM = 10, - - /* 驱动相关配置命令 */ - COMM_MGMT_CMD_SET_CMDQ_CTXT = 20, - COMM_MGMT_CMD_SET_VAT, - COMM_MGMT_CMD_CFG_PAGESIZE, - COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, - COMM_MGMT_CMD_SET_CEQ_CTRL_REG, - COMM_MGMT_CMD_SET_DMA_ATTR, - - /* INFRA配置相关命令字 */ - COMM_MGMT_CMD_GET_MQM_FIX_INFO = 40, - COMM_MGMT_CMD_SET_MQM_CFG_INFO, - COMM_MGMT_CMD_SET_MQM_SRCH_GPA, - COMM_MGMT_CMD_SET_PPF_TMR, - COMM_MGMT_CMD_SET_PPF_HT_GPA, - COMM_MGMT_CMD_SET_FUNC_TMR_BITMAT, - COMM_MGMT_CMD_SET_MBX_CRDT, - COMM_MGMT_CMD_CFG_TEMPLATE, - COMM_MGMT_CMD_SET_MQM_LIMIT, - - /* 信息获取相关命令字 */ - COMM_MGMT_CMD_GET_FW_VERSION = 60, - COMM_MGMT_CMD_GET_BOARD_INFO, - COMM_MGMT_CMD_SYNC_TIME, - COMM_MGMT_CMD_GET_HW_PF_INFOS, - COMM_MGMT_CMD_SEND_BDF_INFO, - COMM_MGMT_CMD_GET_VIRTIO_BDF_INFO, - COMM_MGMT_CMD_GET_SML_TABLE_INFO, - COMM_MGMT_CMD_GET_SDI_INFO, - - /* 升级相关命令字 */ - COMM_MGMT_CMD_UPDATE_FW = 80, - COMM_MGMT_CMD_ACTIVE_FW, - COMM_MGMT_CMD_HOT_ACTIVE_FW, - COMM_MGMT_CMD_HOT_ACTIVE_DONE_NOTICE, - COMM_MGMT_CMD_SWITCH_CFG, - COMM_MGMT_CMD_CHECK_FLASH, - COMM_MGMT_CMD_CHECK_FLASH_RW, - COMM_MGMT_CMD_RESOURCE_CFG, - COMM_MGMT_CMD_UPDATE_BIOS, /* TODO: merge to COMM_MGMT_CMD_UPDATE_FW */ - COMM_MGMT_CMD_MPU_GIT_CODE, - - /* chip reset相关 */ - COMM_MGMT_CMD_FAULT_REPORT = 100, - COMM_MGMT_CMD_WATCHDOG_INFO, - COMM_MGMT_CMD_MGMT_RESET, - COMM_MGMT_CMD_FFM_SET, /* TODO: check if needed */ - - /* chip info/log 相关 */ - COMM_MGMT_CMD_GET_LOG = 120, - COMM_MGMT_CMD_TEMP_OP, - COMM_MGMT_CMD_EN_AUTO_RST_CHIP, - COMM_MGMT_CMD_CFG_REG, - COMM_MGMT_CMD_GET_CHIP_ID, - COMM_MGMT_CMD_SYSINFO_DFX, - COMM_MGMT_CMD_PCIE_DFX_NTC, - COMM_MGMT_CMD_DICT_LOG_STATUS, /* LOG STATUS 127 */ - COMM_MGMT_CMD_MSIX_INFO, - COMM_MGMT_CMD_CHANNEL_DETECT, - COMM_MGMT_CMD_DICT_COUNTER_STATUS, - - /* switch workmode 相关 */ - COMM_MGMT_CMD_CHECK_IF_SWITCH_WORKMODE = 140, - COMM_MGMT_CMD_SWITCH_WORKMODE, - - /* mpu 相关 */ - COMM_MGMT_CMD_MIGRATE_DFX_HPA = 150, - COMM_MGMT_CMD_BDF_INFO, - COMM_MGMT_CMD_NCSI_CFG_INFO_GET_PROC, - - /* rsvd0 section */ - COMM_MGMT_CMD_SECTION_RSVD_0 = 160, - - /* rsvd1 section */ - COMM_MGMT_CMD_SECTION_RSVD_1 = 170, - - /* rsvd2 section */ - COMM_MGMT_CMD_SECTION_RSVD_2 = 180, - - /* rsvd3 section */ - COMM_MGMT_CMD_SECTION_RSVD_3 = 190, - - /* TODO: move to DFT mode */ - COMM_MGMT_CMD_GET_DIE_ID = 200, - COMM_MGMT_CMD_GET_EFUSE_TEST, - COMM_MGMT_CMD_EFUSE_INFO_CFG, - COMM_MGMT_CMD_GPIO_CTL, - COMM_MGMT_CMD_HI30_SERLOOP_START, /* TODO: DFT or hilink */ - COMM_MGMT_CMD_HI30_SERLOOP_STOP, /* TODO: DFT or hilink */ - COMM_MGMT_CMD_HI30_MBIST_SET_FLAG, /* TODO: DFT or hilink */ - COMM_MGMT_CMD_HI30_MBIST_GET_RESULT, /* TODO: DFT or hilink */ - COMM_MGMT_CMD_ECC_TEST, - COMM_MGMT_CMD_FUNC_BIST_TEST, /* 209 */ - - COMM_MGMT_CMD_VPD_SET = 210, - COMM_MGMT_CMD_VPD_GET, - - COMM_MGMT_CMD_ERASE_FLASH, - COMM_MGMT_CMD_QUERY_FW_INFO, - COMM_MGMT_CMD_GET_CFG_INFO, - COMM_MGMT_CMD_GET_UART_LOG, - COMM_MGMT_CMD_SET_UART_CMD, - COMM_MGMT_CMD_SPI_TEST, - - /* TODO: ALL reg read/write merge to COMM_MGMT_CMD_CFG_REG */ - COMM_MGMT_CMD_UP_REG_GET, - COMM_MGMT_CMD_UP_REG_SET, /* 219 */ - - COMM_MGMT_CMD_REG_READ = 220, - COMM_MGMT_CMD_REG_WRITE, - COMM_MGMT_CMD_MAG_REG_WRITE, - COMM_MGMT_CMD_ANLT_REG_WRITE, - - COMM_MGMT_CMD_HEART_EVENT, /* TODO: delete */ - COMM_MGMT_CMD_NCSI_OEM_GET_DRV_INFO, /* TODO: delete */ - COMM_MGMT_CMD_LASTWORD_GET, - COMM_MGMT_CMD_READ_BIN_DATA, /* TODO: delete */ - /* COMM_MGMT_CMD_WWPN_GET, TODO: move to FC? */ - /* COMM_MGMT_CMD_WWPN_SET, TODO: move to FC? */ /* 229 */ - - /* TODO: check if needed */ - COMM_MGMT_CMD_SET_VIRTIO_DEV = 230, - COMM_MGMT_CMD_SET_MAC, - /* MPU patch cmd */ - COMM_MGMT_CMD_LOAD_PATCH, - COMM_MGMT_CMD_REMOVE_PATCH, - COMM_MGMT_CMD_PATCH_ACTIVE, - COMM_MGMT_CMD_PATCH_DEACTIVE, - COMM_MGMT_CMD_PATCH_SRAM_OPTIMIZE, - /* container host process */ - COMM_MGMT_CMD_CONTAINER_HOST_PROC, - /* nsci counter */ - COMM_MGMT_CMD_NCSI_COUNTER_PROC, - COMM_MGMT_CMD_CHANNEL_STATUS_CHECK, /* 239 */ - - /* hot patch rsvd cmd */ - COMM_MGMT_CMD_RSVD_0 = 240, - COMM_MGMT_CMD_RSVD_1, - COMM_MGMT_CMD_RSVD_2, - COMM_MGMT_CMD_RSVD_3, - COMM_MGMT_CMD_RSVD_4, - /* 无效字段,版本收编删除,编译使用 */ - COMM_MGMT_CMD_SEND_API_ACK_BY_UP, - - /* 注:添加cmd,不能修改已有命令字的值,请在前方rsvd - * section中添加;原则上所有分支cmd表完全一致 - */ - COMM_MGMT_CMD_MAX = 255, -}; - -/* CmdQ Common subtype */ -enum comm_cmdq_cmd { - COMM_CMD_UCODE_ARM_BIT_SET = 2, - COMM_CMD_SEND_NPU_DFT_CMD, -}; - -#endif /* HINIC3_COMMON_CMD_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h deleted file mode 100644 index 9ce908eb7200a1ae8466a01a0aebbae00464fec2..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h +++ /dev/null @@ -1,118 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ - -#ifndef HINIC3_COMMON_H -#define HINIC3_COMMON_H - -#include - -struct hinic3_dma_addr_align { - u32 real_size; - - void *ori_vaddr; - dma_addr_t ori_paddr; - - void *align_vaddr; - dma_addr_t align_paddr; -}; - -enum hinic3_wait_return { - WAIT_PROCESS_CPL = 0, - WAIT_PROCESS_WAITING = 1, - WAIT_PROCESS_ERR = 2, -}; - -struct hinic3_sge { - u32 hi_addr; - u32 lo_addr; - u32 len; -}; - -#ifdef static -#undef static -#define LLT_STATIC_DEF_SAVED -#endif - -/* * - * hinic_cpu_to_be32 - convert data to big endian 32 bit format - * @data: the data to convert - * @len: length of data to convert, must be Multiple of 4B - */ -static inline void hinic3_cpu_to_be32(void *data, int len) -{ - int i, chunk_sz = sizeof(u32); - int data_len = len; - u32 *mem = data; - - if (!data) - return; - - data_len = data_len / chunk_sz; - - for (i = 0; i < data_len; i++) { - *mem = cpu_to_be32(*mem); - mem++; - } -} - -/* * - * hinic3_cpu_to_be32 - convert data from big endian 32 bit format - * @data: the data to convert - * @len: length of data to convert - */ -static inline void hinic3_be32_to_cpu(void *data, int len) -{ - int i, chunk_sz = sizeof(u32); - int data_len = len; - u32 *mem = data; - - if (!data) - return; - - data_len = data_len / chunk_sz; - - for (i = 0; i < data_len; i++) { - *mem = be32_to_cpu(*mem); - mem++; - } -} - -/* * - * hinic3_set_sge - set dma area in scatter gather entry - * @sge: scatter gather entry - * @addr: dma address - * @len: length of relevant data in the dma address - */ -static inline void hinic3_set_sge(struct hinic3_sge *sge, dma_addr_t addr, - int len) -{ - sge->hi_addr = upper_32_bits(addr); - sge->lo_addr = lower_32_bits(addr); - sge->len = len; -} - -#define hinic3_hw_be32(val) (val) -#define hinic3_hw_cpu32(val) (val) -#define hinic3_hw_cpu16(val) (val) - -static inline void hinic3_hw_be32_len(void *data, int len) -{ -} - -static inline void hinic3_hw_cpu32_len(void *data, int len) -{ -} - -int hinic3_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, - unsigned int flag, - struct hinic3_dma_addr_align *mem_align); - -void hinic3_dma_free_coherent_align(void *dev_hdl, - struct hinic3_dma_addr_align *mem_align); - -typedef enum hinic3_wait_return (*wait_cpl_handler)(void *priv_data); - -int hinic3_wait_for_timeout(void *priv_data, wait_cpl_handler handler, - u32 wait_total_ms, u32 wait_once_us); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h index f269691d757bcf08540181b75c713743500298a8..797924408a9bfb9a6d214d643f05dd2cfdeb97ae 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h @@ -8,7 +8,7 @@ #include "mpu_cmd_base_defs.h" -#define HINIC3_DRV_VERSION "15.17.1.1" +#define HINIC3_DRV_VERSION "17.7.8.1" #define HINIC3_DRV_DESC "Intelligent Network Interface Card Driver" #define HIUDK_DRV_DESC "Intelligent Network Unified Driver" @@ -45,6 +45,8 @@ enum hinic3_service_type { SERVICE_T_CUSTOM, SERVICE_T_VROCE, SERVICE_T_CRYPT, + SERVICE_T_VSOCK, + SERVICE_T_BIFUR, SERVICE_T_MAX, /* Only used for interruption resource management, @@ -63,6 +65,9 @@ struct nic_service_cap { u16 max_sqs; u16 max_rqs; u16 default_num_queues; + u16 outband_vlan_cfg_en; + u8 lro_enable; + u8 rsvd1[3]; }; struct ppa_service_cap { @@ -76,6 +81,10 @@ struct ppa_service_cap { u16 rsvd1; }; +struct bifur_service_cap { + u8 rsvd; +}; + struct vbs_service_cap { u16 vbs_max_volq; u8 vbs_main_pf_enable; @@ -785,6 +794,15 @@ bool hinic3_support_toe(void *hwdev, struct toe_service_cap *cap); */ bool hinic3_support_ppa(void *hwdev, struct ppa_service_cap *cap); +/* * + * @brief hinic3_support_bifur - function support bifur + * @param hwdev: device pointer to hwdev + * @param cap: bifur service capbility + * @retval zero: success + * @retval non-zero: failure + */ +bool hinic3_support_bifur(void *hwdev, struct bifur_service_cap *cap); + /* * * @brief hinic3_support_migr - function support migrate * @param hwdev: device pointer to hwdev @@ -895,6 +913,13 @@ int hinic3_get_mgmt_version(void *hwdev, u8 *mgmt_ver, u8 version_size, int hinic3_get_fw_version(void *hwdev, struct hinic3_fw_version *fw_ver, u16 channel); +/* * + * @brief hinic3_get_bond_create_mode - get bond create mode + * @param hwdev: device pointer to hwdev + * @retval global function id + */ +u8 hinic3_get_bond_create_mode(void *udkdev); + /* * * @brief hinic3_global_func_id - get global function id * @param hwdev: device pointer to hwdev @@ -1249,4 +1274,7 @@ int hinic3_mbox_to_host_sync(void *hwdev, enum hinic3_mod_type mod, int hinic3_get_func_vroce_enable(void *hwdev, u16 glb_func_idx, u8 *en); +void hinic3_module_get(void *hwdev, enum hinic3_service_type type); +void hinic3_module_put(void *hwdev, enum hinic3_service_type type); + #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_dbg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_dbg.c index 5863beebde3903ac1e7c475405bf410261431110..9b5f0178f5e72eb389d5d2f6628d19691f779198 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_dbg.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_dbg.c @@ -18,9 +18,8 @@ #include "hinic3_tx.h" #include "hinic3_dcb.h" #include "hinic3_nic.h" -#include "hinic3_mgmt_interface.h" -#include "mag_mpu_cmd.h" -#include "mag_cmd.h" +#include "hinic3_bond.h" +#include "nic_mpu_cmd_defs.h" typedef int (*nic_driv_module)(struct hinic3_nic_dev *nic_dev, const void *buf_in, u32 in_size, @@ -48,7 +47,7 @@ static int get_nic_drv_version(void *buf_out, const u32 *out_size) } err = snprintf(ver_info->ver, sizeof(ver_info->ver), "%s %s", - HINIC3_NIC_DRV_VERSION, "2024-07-03_09:33:00"); + HINIC3_NIC_DRV_VERSION, "2025-05-01_00:00:03"); if (err < 0) return -EINVAL; @@ -94,13 +93,13 @@ static int get_q_num(struct hinic3_nic_dev *nic_dev, return -EFAULT; } - if (!buf_out) { + if (!buf_out || !out_size) { nicif_err(nic_dev, drv, nic_dev->netdev, "Get queue number para buf_out is NULL.\n"); return -EINVAL; } - if (!out_size || *out_size != sizeof(u16)) { + if (*out_size != sizeof(u16)) { nicif_err(nic_dev, drv, nic_dev->netdev, "Unexpect out buf size from user: %u, expect: %lu\n", *out_size, sizeof(u16)); @@ -126,7 +125,8 @@ static int get_tx_wqe_info(struct hinic3_nic_dev *nic_dev, } if (!buf_in || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Buf_in or buf_out is NULL.\n"); + nicif_err(nic_dev, drv, nic_dev->netdev, + "Buf_in or buf_out is NULL.\n"); return -EINVAL; } @@ -178,7 +178,8 @@ static int get_rx_info(struct hinic3_nic_dev *nic_dev, const void *buf_in, } rq_info->delta = (u16)nic_dev->rxqs[q_id].delta; - rq_info->ci = (u16)(nic_dev->rxqs[q_id].cons_idx & nic_dev->rxqs[q_id].q_mask); + rq_info->ci = (u16)(nic_dev->rxqs[q_id].cons_idx & + nic_dev->rxqs[q_id].q_mask); rq_info->sw_pi = nic_dev->rxqs[q_id].next_to_update; rq_info->msix_vector = nic_dev->rxqs[q_id].irq_id; @@ -201,7 +202,8 @@ static int get_rx_wqe_info(struct hinic3_nic_dev *nic_dev, const void *buf_in, } if (!buf_in || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Buf_in or buf_out is NULL.\n"); + nicif_err(nic_dev, drv, nic_dev->netdev, + "Buf_in or buf_out is NULL.\n"); return -EINVAL; } @@ -230,7 +232,7 @@ static int get_rx_cqe_info(struct hinic3_nic_dev *nic_dev, const void *buf_in, return -EFAULT; } - if (!buf_in || !buf_out) { + if (!buf_in || !buf_out || !out_size) { nicif_err(nic_dev, drv, nic_dev->netdev, "Buf_in or buf_out is NULL.\n"); return -EINVAL; @@ -243,7 +245,7 @@ static int get_rx_cqe_info(struct hinic3_nic_dev *nic_dev, const void *buf_in, return -EINVAL; } - if (!out_size || *out_size != sizeof(struct hinic3_rq_cqe)) { + if (*out_size != sizeof(struct hinic3_rq_cqe)) { nicif_err(nic_dev, drv, nic_dev->netdev, "Unexpect out buf size from user :%u, expect: %lu\n", *out_size, sizeof(struct hinic3_rq_cqe)); @@ -252,10 +254,16 @@ static int get_rx_cqe_info(struct hinic3_nic_dev *nic_dev, const void *buf_in, q_id = (u16)info->q_id; idx = (u16)info->wqe_id; - if (q_id >= nic_dev->q_params.num_qps || idx >= nic_dev->rxqs[q_id].q_depth) { + if (q_id >= nic_dev->q_params.num_qps) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid q_id[%u] >= %u.\n", q_id, + nic_dev->q_params.num_qps); + return -EFAULT; + } + if (idx >= nic_dev->rxqs[q_id].q_depth) { nicif_err(nic_dev, drv, nic_dev->netdev, - "Invalid q_id[%u] >= %u, or wqe idx[%u] >= %u.\n", - q_id, nic_dev->q_params.num_qps, idx, nic_dev->rxqs[q_id].q_depth); + "Invalid wqe idx[%u] >= %u.\n", idx, + nic_dev->rxqs[q_id].q_depth); return -EFAULT; } @@ -338,7 +346,8 @@ static int set_loopback_mode(struct hinic3_nic_dev *nic_dev, const void *buf_in, err = hinic3_set_loopback_mode(nic_dev->hwdev, (u8)mode->loop_mode, (u8)mode->loop_ctrl); if (err == 0) - nicif_info(nic_dev, drv, nic_dev->netdev, "Set loopback mode %u en %u succeed\n", + nicif_info(nic_dev, drv, nic_dev->netdev, + "Set loopback mode %u en %u succeed\n", mode->loop_mode, mode->loop_ctrl); return err; @@ -417,6 +426,59 @@ static int set_pf_bw_limit(struct hinic3_nic_dev *nic_dev, const void *buf_in, { u32 pf_bw_limit; int err; + struct hinic3_nic_io *nic_io = NULL; + struct net_device *net_dev = nic_dev->netdev; + + if (hinic3_support_roce(nic_dev->hwdev, NULL) && + hinic3_is_bond_dev_status_actived(net_dev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "The rate limit func is not supported when RoCE bonding is enabled\n"); + return -EINVAL; + } + + if (HINIC3_FUNC_IS_VF(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "To set VF bandwidth rate, please use ip link cmd\n"); + return -EINVAL; + } + + if (!buf_in || !buf_out || in_size != sizeof(u32) || + !out_size || *out_size != sizeof(u8)) + return -EINVAL; + + nic_io = hinic3_get_service_adapter(nic_dev->hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + nic_io->direct = HINIC3_NIC_TX; + pf_bw_limit = *((u32 *)buf_in); + + err = hinic3_set_pf_bw_limit(nic_dev->hwdev, pf_bw_limit); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to set pf bandwidth limit to %u%%\n", + pf_bw_limit); + if (err < 0) + return err; + } + + *((u8 *)buf_out) = (u8)err; + + return 0; +} + +static int set_rx_pf_bw_limit(struct hinic3_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u32 pf_bw_limit; + int err; + struct hinic3_nic_io *nic_io = NULL; + struct net_device *net_dev = nic_dev->netdev; + + if (hinic3_support_roce(nic_dev->hwdev, NULL) && + hinic3_is_bond_dev_status_actived(net_dev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "The rate limit func is not supported when RoCE bonding is enabled\n"); + return -EINVAL; + } if (HINIC3_FUNC_IS_VF(nic_dev->hwdev)) { nicif_err(nic_dev, drv, nic_dev->netdev, "To set VF bandwidth rate, please use ip link cmd\n"); @@ -426,11 +488,16 @@ static int set_pf_bw_limit(struct hinic3_nic_dev *nic_dev, const void *buf_in, if (!buf_in || !buf_out || in_size != sizeof(u32) || !out_size || *out_size != sizeof(u8)) return -EINVAL; + nic_io = hinic3_get_service_adapter(nic_dev->hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + nic_io->direct = HINIC3_NIC_RX; pf_bw_limit = *((u32 *)buf_in); err = hinic3_set_pf_bw_limit(nic_dev->hwdev, pf_bw_limit); if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to set pf bandwidth limit to %d%%\n", + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to set pf bandwidth limit to %d%%\n", pf_bw_limit); if (err < 0) return err; @@ -445,19 +512,21 @@ static int get_pf_bw_limit(struct hinic3_nic_dev *nic_dev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size) { struct hinic3_nic_io *nic_io = NULL; + u32 *rate_limit = (u32 *)buf_out; if (HINIC3_FUNC_IS_VF(nic_dev->hwdev)) { - nicif_err(nic_dev, drv, nic_dev->netdev, "To get VF bandwidth rate, please use ip link cmd\n"); + nicif_err(nic_dev, drv, nic_dev->netdev, + "To get VF bandwidth rate, please use ip link cmd\n"); return -EINVAL; } if (!buf_out || !out_size) return -EINVAL; - if (*out_size != sizeof(u32)) { + if (*out_size != sizeof(u32) * 2) { // 2:Stored in an array, TX and RX, both length are u32 nicif_err(nic_dev, drv, nic_dev->netdev, "Unexpect out buf size from user: %d, expect: %lu\n", - *out_size, sizeof(u32)); + *out_size, sizeof(u32) * 2); return -EFAULT; } @@ -465,8 +534,12 @@ static int get_pf_bw_limit(struct hinic3_nic_dev *nic_dev, const void *buf_in, if (!nic_io) return -EINVAL; - *((u32 *)buf_out) = nic_io->nic_cfg.pf_bw_limit; + rate_limit[HINIC3_NIC_RX] = nic_io->nic_cfg.pf_bw_rx_limit; + rate_limit[HINIC3_NIC_TX] = nic_io->nic_cfg.pf_bw_tx_limit; + nicif_info(nic_dev, drv, nic_dev->netdev, + "read rate cfg success rx rate is: %u, tx rate is : %u\n", + rate_limit[HINIC3_NIC_RX], rate_limit[HINIC3_NIC_TX]); return 0; } @@ -477,8 +550,8 @@ static int get_sset_count(struct hinic3_nic_dev *nic_dev, const void *buf_in, if (!buf_in || in_size != sizeof(u32) || !out_size || *out_size != sizeof(u32) || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid parameters, in_size: %u\n", - in_size); + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid parameters, in_size: %u\n", in_size); return -EINVAL; } @@ -504,16 +577,16 @@ static int get_sset_stats(struct hinic3_nic_dev *nic_dev, const void *buf_in, int err; if (!buf_in || in_size != sizeof(u32) || !out_size || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid parameters, in_size: %u\n", - in_size); + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid parameters, in_size: %u\n", in_size); return -EINVAL; } size = sizeof(u32); err = get_sset_count(nic_dev, buf_in, in_size, &count, &size); if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Get sset count failed, ret=%d\n", - err); + nicif_err(nic_dev, drv, nic_dev->netdev, + "Get sset count failed, ret=%d\n", err); return -EINVAL; } if (count * sizeof(*items) != *out_size) { @@ -527,12 +600,14 @@ static int get_sset_stats(struct hinic3_nic_dev *nic_dev, const void *buf_in, switch (sset) { case HINIC3_SHOW_SSET_IO_STATS: - hinic3_get_io_stats(nic_dev, items); + err = hinic3_get_io_stats(nic_dev, items); + if (err < 0) + return -EINVAL; break; default: - nicif_err(nic_dev, drv, nic_dev->netdev, "Unknown %u to get stats\n", - sset); + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unknown %u to get stats\n", sset); err = -EINVAL; break; } @@ -544,15 +619,18 @@ static int update_pcp_dscp_cfg(struct hinic3_nic_dev *nic_dev, struct hinic3_dcb_config *wanted_dcb_cfg, const struct hinic3_mt_qos_dev_cfg *qos_in) { + struct hinic3_dcb *dcb = nic_dev->dcb; int i; u8 cos_num = 0, valid_cos_bitmap = 0; if (qos_in->cfg_bitmap & CMD_QOS_DEV_PCP2COS) { for (i = 0; i < NIC_DCB_UP_MAX; i++) { - if (!(nic_dev->func_dft_cos_bitmap & BIT(qos_in->pcp2cos[i]))) { + if (!(dcb->func_dft_cos_bitmap & + BIT(qos_in->pcp2cos[i]))) { nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid cos=%u, func cos valid map is %u", - qos_in->pcp2cos[i], nic_dev->func_dft_cos_bitmap); + qos_in->pcp2cos[i], + dcb->func_dft_cos_bitmap); return -EINVAL; } @@ -562,7 +640,8 @@ static int update_pcp_dscp_cfg(struct hinic3_nic_dev *nic_dev, } } - memcpy(wanted_dcb_cfg->pcp2cos, qos_in->pcp2cos, sizeof(qos_in->pcp2cos)); + memcpy(wanted_dcb_cfg->pcp2cos, qos_in->pcp2cos, + sizeof(qos_in->pcp2cos)); wanted_dcb_cfg->pcp_user_cos_num = cos_num; wanted_dcb_cfg->pcp_valid_cos_map = valid_cos_bitmap; } @@ -572,12 +651,14 @@ static int update_pcp_dscp_cfg(struct hinic3_nic_dev *nic_dev, valid_cos_bitmap = 0; for (i = 0; i < NIC_DCB_IP_PRI_MAX; i++) { u8 cos = qos_in->dscp2cos[i] == DBG_DFLT_DSCP_VAL ? - nic_dev->wanted_dcb_cfg.dscp2cos[i] : qos_in->dscp2cos[i]; + dcb->wanted_dcb_cfg.dscp2cos[i] : + qos_in->dscp2cos[i]; - if (cos >= NIC_DCB_UP_MAX || !(nic_dev->func_dft_cos_bitmap & BIT(cos))) { + if (cos >= NIC_DCB_UP_MAX || + !(dcb->func_dft_cos_bitmap & BIT(cos))) { nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid cos=%u, func cos valid map is %u", - cos, nic_dev->func_dft_cos_bitmap); + cos, dcb->func_dft_cos_bitmap); return -EINVAL; } @@ -588,8 +669,10 @@ static int update_pcp_dscp_cfg(struct hinic3_nic_dev *nic_dev, } for (i = 0; i < NIC_DCB_IP_PRI_MAX; i++) - wanted_dcb_cfg->dscp2cos[i] = qos_in->dscp2cos[i] == DBG_DFLT_DSCP_VAL ? - nic_dev->hw_dcb_cfg.dscp2cos[i] : qos_in->dscp2cos[i]; + wanted_dcb_cfg->dscp2cos[i] = + qos_in->dscp2cos[i] == DBG_DFLT_DSCP_VAL ? + dcb->hw_dcb_cfg.dscp2cos[i] : + qos_in->dscp2cos[i]; wanted_dcb_cfg->dscp_user_cos_num = cos_num; wanted_dcb_cfg->dscp_valid_cos_map = valid_cos_bitmap; } @@ -601,11 +684,12 @@ static int update_wanted_qos_cfg(struct hinic3_nic_dev *nic_dev, struct hinic3_dcb_config *wanted_dcb_cfg, const struct hinic3_mt_qos_dev_cfg *qos_in) { + struct hinic3_dcb *dcb = nic_dev->dcb; int ret; u8 cos_num, valid_cos_bitmap; if (qos_in->cfg_bitmap & CMD_QOS_DEV_TRUST) { - if (qos_in->trust > DCB_DSCP) { + if (qos_in->trust > HINIC3_DCB_DSCP) { nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid trust=%u\n", qos_in->trust); return -EINVAL; @@ -615,7 +699,7 @@ static int update_wanted_qos_cfg(struct hinic3_nic_dev *nic_dev, } if (qos_in->cfg_bitmap & CMD_QOS_DEV_DFT_COS) { - if (!(BIT(qos_in->dft_cos) & nic_dev->func_dft_cos_bitmap)) { + if (!(BIT(qos_in->dft_cos) & dcb->func_dft_cos_bitmap)) { nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid dft_cos=%u\n", qos_in->dft_cos); return -EINVAL; @@ -628,7 +712,7 @@ static int update_wanted_qos_cfg(struct hinic3_nic_dev *nic_dev, if (ret) return ret; - if (wanted_dcb_cfg->trust == DCB_PCP) { + if (wanted_dcb_cfg->trust == HINIC3_DCB_PCP) { cos_num = wanted_dcb_cfg->pcp_user_cos_num; valid_cos_bitmap = wanted_dcb_cfg->pcp_valid_cos_map; } else { @@ -636,18 +720,11 @@ static int update_wanted_qos_cfg(struct hinic3_nic_dev *nic_dev, valid_cos_bitmap = wanted_dcb_cfg->dscp_valid_cos_map; } - if (test_bit(HINIC3_DCB_ENABLE, &nic_dev->flags)) { - if (cos_num > nic_dev->q_params.num_qps) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "DCB is on, cos num should not more than channel num:%u\n", - nic_dev->q_params.num_qps); - return -EOPNOTSUPP; - } - } - if (!(BIT(wanted_dcb_cfg->default_cos) & valid_cos_bitmap)) { - nicif_info(nic_dev, drv, nic_dev->netdev, "Current default_cos=%u, change to %u\n", - wanted_dcb_cfg->default_cos, (u8)fls(valid_cos_bitmap) - 1); + nicif_info(nic_dev, drv, nic_dev->netdev, + "Current default_cos=%u, change to %u\n", + wanted_dcb_cfg->default_cos, + (u8)fls(valid_cos_bitmap) - 1); wanted_dcb_cfg->default_cos = (u8)fls(valid_cos_bitmap) - 1; } @@ -657,6 +734,7 @@ static int update_wanted_qos_cfg(struct hinic3_nic_dev *nic_dev, static int dcb_mt_qos_map(struct hinic3_nic_dev *nic_dev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size) { + struct hinic3_dcb *dcb = nic_dev->dcb; const struct hinic3_mt_qos_dev_cfg *qos_in = buf_in; struct hinic3_mt_qos_dev_cfg *qos_out = buf_out; u8 i; @@ -675,9 +753,10 @@ static int dcb_mt_qos_map(struct hinic3_nic_dev *nic_dev, const void *buf_in, memcpy(qos_out, qos_in, sizeof(*qos_in)); qos_out->head.status = 0; if (qos_in->op_code & MT_DCB_OPCODE_WR) { - memcpy(&nic_dev->wanted_dcb_cfg, &nic_dev->hw_dcb_cfg, + memcpy(&dcb->wanted_dcb_cfg, &dcb->hw_dcb_cfg, sizeof(struct hinic3_dcb_config)); - err = update_wanted_qos_cfg(nic_dev, &nic_dev->wanted_dcb_cfg, qos_in); + err = update_wanted_qos_cfg(nic_dev, &dcb->wanted_dcb_cfg, + qos_in); if (err) { qos_out->head.status = MT_EINVAL; return 0; @@ -687,12 +766,12 @@ static int dcb_mt_qos_map(struct hinic3_nic_dev *nic_dev, const void *buf_in, if (err) qos_out->head.status = MT_EIO; } else { - qos_out->dft_cos = nic_dev->hw_dcb_cfg.default_cos; - qos_out->trust = nic_dev->hw_dcb_cfg.trust; + qos_out->dft_cos = dcb->hw_dcb_cfg.default_cos; + qos_out->trust = dcb->hw_dcb_cfg.trust; for (i = 0; i < NIC_DCB_UP_MAX; i++) - qos_out->pcp2cos[i] = nic_dev->hw_dcb_cfg.pcp2cos[i]; + qos_out->pcp2cos[i] = dcb->hw_dcb_cfg.pcp2cos[i]; for (i = 0; i < NIC_DCB_IP_PRI_MAX; i++) - qos_out->dscp2cos[i] = nic_dev->hw_dcb_cfg.dscp2cos[i]; + qos_out->dscp2cos[i] = dcb->hw_dcb_cfg.dscp2cos[i]; } return 0; @@ -721,27 +800,17 @@ static int dcb_mt_dcb_state(struct hinic3_nic_dev *nic_dev, const void *buf_in, memcpy(dcb_out, dcb_in, sizeof(*dcb_in)); dcb_out->head.status = 0; if (dcb_in->op_code & MT_DCB_OPCODE_WR) { - if (test_bit(HINIC3_DCB_ENABLE, &nic_dev->flags) == dcb_in->state) + if (test_bit(HINIC3_DCB_ENABLE, &nic_dev->flags) == + dcb_in->state) return 0; - if (dcb_in->state) { - if (user_cos_num > nic_dev->q_params.num_qps) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "cos num %u should not more than channel num %u\n", - user_cos_num, - nic_dev->q_params.num_qps); - - return -EOPNOTSUPP; - } - } - - rtnl_lock(); if (netif_running(nic_dev->netdev)) { netif_run = 1; hinic3_vport_down(nic_dev); } - err = hinic3_setup_cos(nic_dev->netdev, dcb_in->state ? user_cos_num : 0, + err = hinic3_setup_cos(nic_dev->netdev, + dcb_in->state ? user_cos_num : 0, netif_run); if (err) goto setup_cos_fail; @@ -751,7 +820,6 @@ static int dcb_mt_dcb_state(struct hinic3_nic_dev *nic_dev, const void *buf_in, if (err) goto vport_up_fail; } - rtnl_unlock(); } else { dcb_out->state = !!test_bit(HINIC3_DCB_ENABLE, &nic_dev->flags); } @@ -759,12 +827,12 @@ static int dcb_mt_dcb_state(struct hinic3_nic_dev *nic_dev, const void *buf_in, return 0; vport_up_fail: - hinic3_setup_cos(nic_dev->netdev, dcb_in->state ? 0 : user_cos_num, netif_run); + hinic3_setup_cos(nic_dev->netdev, dcb_in->state ? 0 : user_cos_num, + netif_run); setup_cos_fail: if (netif_run) hinic3_vport_up(nic_dev); - rtnl_unlock(); return err; } @@ -772,13 +840,15 @@ static int dcb_mt_dcb_state(struct hinic3_nic_dev *nic_dev, const void *buf_in, static int dcb_mt_hw_qos_get(struct hinic3_nic_dev *nic_dev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size) { + struct hinic3_dcb *dcb = nic_dev->dcb; const struct hinic3_mt_qos_cos_cfg *cos_cfg_in = buf_in; struct hinic3_mt_qos_cos_cfg *cos_cfg_out = buf_out; if (!buf_in || !buf_out || !out_size) return -EINVAL; - if (*out_size != sizeof(*cos_cfg_out) || in_size != sizeof(*cos_cfg_in)) { + if (*out_size != sizeof(*cos_cfg_out) || + in_size != sizeof(*cos_cfg_in)) { nicif_err(nic_dev, drv, nic_dev->netdev, "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", in_size, *out_size, sizeof(*cos_cfg_in)); @@ -789,9 +859,9 @@ static int dcb_mt_hw_qos_get(struct hinic3_nic_dev *nic_dev, const void *buf_in, cos_cfg_out->head.status = 0; cos_cfg_out->port_id = hinic3_physical_port_id(nic_dev->hwdev); - cos_cfg_out->func_cos_bitmap = (u8)nic_dev->func_dft_cos_bitmap; - cos_cfg_out->port_cos_bitmap = (u8)nic_dev->port_dft_cos_bitmap; - cos_cfg_out->func_max_cos_num = nic_dev->cos_config_num_max; + cos_cfg_out->func_cos_bitmap = (u8)dcb->func_dft_cos_bitmap; + cos_cfg_out->port_cos_bitmap = (u8)dcb->port_dft_cos_bitmap; + cos_cfg_out->func_max_cos_num = dcb->cos_config_num_max; return 0; } @@ -803,7 +873,13 @@ static int get_inter_num(struct hinic3_nic_dev *nic_dev, const void *buf_in, intr_num = hinic3_intr_num(nic_dev->hwdev); - if (!buf_out || !out_size || *out_size != sizeof(u16)) { + if (!buf_out || !out_size) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Buf_out or out_size is NULL.\n"); + return -EINVAL; + } + + if (*out_size != sizeof(u16)) { nicif_err(nic_dev, drv, nic_dev->netdev, "Unexpect out buf size from user :%u, expect: %lu\n", *out_size, sizeof(u16)); @@ -817,20 +893,27 @@ static int get_inter_num(struct hinic3_nic_dev *nic_dev, const void *buf_in, static int get_netdev_name(struct hinic3_nic_dev *nic_dev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size) { - if (!buf_out || !out_size || *out_size != IFNAMSIZ) { + if (!buf_out || !out_size) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Buf_out or out_size is NULL.\n"); + return -EINVAL; + } + + if (*out_size != IFNAMSIZ) { nicif_err(nic_dev, drv, nic_dev->netdev, "Unexpect out buf size from user :%u, expect: %u\n", *out_size, IFNAMSIZ); return -EFAULT; } - strlcpy(buf_out, nic_dev->netdev->name, IFNAMSIZ); + strscpy(buf_out, nic_dev->netdev->name, IFNAMSIZ); return 0; } -static int get_netdev_tx_timeout(struct hinic3_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) +static int get_netdev_tx_timeout(struct hinic3_nic_dev *nic_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) { struct net_device *net_dev = nic_dev->netdev; int *tx_timeout = buf_out; @@ -839,7 +922,8 @@ static int get_netdev_tx_timeout(struct hinic3_nic_dev *nic_dev, const void *buf return -EINVAL; if (*out_size != sizeof(int)) { - nicif_err(nic_dev, drv, net_dev, "Unexpect buf size from user, out_size: %u, expect: %lu\n", + nicif_err(nic_dev, drv, net_dev, + "Unexpect buf size from user, out_size: %u, expect: %lu\n", *out_size, sizeof(int)); return -EINVAL; } @@ -849,8 +933,9 @@ static int get_netdev_tx_timeout(struct hinic3_nic_dev *nic_dev, const void *buf return 0; } -static int set_netdev_tx_timeout(struct hinic3_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) +static int set_netdev_tx_timeout(struct hinic3_nic_dev *nic_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) { struct net_device *net_dev = nic_dev->netdev; const int *tx_timeout = buf_in; @@ -859,13 +944,15 @@ static int set_netdev_tx_timeout(struct hinic3_nic_dev *nic_dev, const void *buf return -EINVAL; if (in_size != sizeof(int)) { - nicif_err(nic_dev, drv, net_dev, "Unexpect buf size from user, in_size: %u, expect: %lu\n", + nicif_err(nic_dev, drv, net_dev, + "Unexpect buf size from user, in_size: %u, expect: %lu\n", in_size, sizeof(int)); return -EINVAL; } net_dev->watchdog_timeo = *tx_timeout * HZ; - nicif_info(nic_dev, drv, net_dev, "Set tx timeout check period to %ds\n", *tx_timeout); + nicif_info(nic_dev, drv, net_dev, + "Set tx timeout check period to %ds\n", *tx_timeout); return 0; } @@ -891,6 +978,34 @@ static int get_xsfp_present(struct hinic3_nic_dev *nic_dev, const void *buf_in, return 0; } +static int get_xsfp_tlv_info(struct hinic3_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct drv_mag_cmd_get_xsfp_tlv_rsp *sfp_tlv_info = buf_out; + const struct mag_cmd_get_xsfp_tlv_req *sfp_tlv_info_req = buf_in; + int err; + + if ((buf_in == NULL) || (buf_out == NULL) || (out_size == NULL)) + return -EINVAL; + + if (*out_size != sizeof(*sfp_tlv_info) || + in_size != sizeof(*sfp_tlv_info_req)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", + in_size, *out_size, sizeof(*sfp_tlv_info)); + return -EINVAL; + } + + err = hinic3_get_sfp_tlv_info(nic_dev->hwdev, + sfp_tlv_info, sfp_tlv_info_req); + if (err != 0) { + sfp_tlv_info->head.status = MT_EIO; + return 0; + } + + return 0; +} + static int get_xsfp_info(struct hinic3_nic_dev *nic_dev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size) { @@ -928,7 +1043,7 @@ static const struct nic_drv_module_handle nic_driv_module_cmd_handle[] = { {GET_LOOPBACK_MODE, get_loopback_mode}, {SET_LOOPBACK_MODE, set_loopback_mode}, {SET_LINK_MODE, set_link_mode}, - {SET_PF_BW_LIMIT, set_pf_bw_limit}, + {SET_TX_PF_BW_LIMIT, set_pf_bw_limit}, {GET_PF_BW_LIMIT, get_pf_bw_limit}, {GET_SSET_COUNT, get_sset_count}, {GET_SSET_ITEMS, get_sset_stats}, @@ -940,17 +1055,22 @@ static const struct nic_drv_module_handle nic_driv_module_cmd_handle[] = { {SET_TX_TIMEOUT, set_netdev_tx_timeout}, {GET_XSFP_PRESENT, get_xsfp_present}, {GET_XSFP_INFO, get_xsfp_info}, + {GET_XSFP_INFO_COMP_CMIS, get_xsfp_tlv_info}, + {SET_RX_PF_BW_LIMIT, set_rx_pf_bw_limit} }; static int send_to_nic_driver(struct hinic3_nic_dev *nic_dev, u32 cmd, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size) { - int index, num_cmds = sizeof(nic_driv_module_cmd_handle) / - sizeof(nic_driv_module_cmd_handle[0]); + int index, num_cmds = (int)(sizeof(nic_driv_module_cmd_handle) / + sizeof(nic_driv_module_cmd_handle[0])); enum driver_cmd_type cmd_type = (enum driver_cmd_type)cmd; int err = 0; + if (cmd_type == DCB_STATE || cmd_type == QOS_DEV) + rtnl_lock(); + mutex_lock(&nic_dev->nic_mutex); for (index = 0; index < num_cmds; index++) { if (cmd_type == @@ -963,6 +1083,9 @@ static int send_to_nic_driver(struct hinic3_nic_dev *nic_dev, } mutex_unlock(&nic_dev->nic_mutex); + if (cmd_type == DCB_STATE || cmd_type == QOS_DEV) + rtnl_unlock(); + if (index == num_cmds) { pr_err("Can't find callback for %d\n", cmd_type); return -EINVAL; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_dcb.c b/drivers/net/ethernet/huawei/hinic3/hinic3_dcb.c index a1fb4afb323ec6d750842804f3b1401ca577a9d5..aa53c1961ac2925e40d8afcedb47e3f6e73c17ca 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_dcb.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_dcb.c @@ -24,82 +24,80 @@ u8 hinic3_get_dev_user_cos_num(struct hinic3_nic_dev *nic_dev) { - if (nic_dev->hw_dcb_cfg.trust == 0) - return nic_dev->hw_dcb_cfg.pcp_user_cos_num; - if (nic_dev->hw_dcb_cfg.trust == 1) - return nic_dev->hw_dcb_cfg.dscp_user_cos_num; + struct hinic3_dcb *dcb = nic_dev->dcb; + + if (dcb->hw_dcb_cfg.trust == HINIC3_DCB_PCP) + return dcb->hw_dcb_cfg.pcp_user_cos_num; + if (dcb->hw_dcb_cfg.trust == HINIC3_DCB_DSCP) + return dcb->hw_dcb_cfg.dscp_user_cos_num; return 0; } u8 hinic3_get_dev_valid_cos_map(struct hinic3_nic_dev *nic_dev) { - if (nic_dev->hw_dcb_cfg.trust == 0) - return nic_dev->hw_dcb_cfg.pcp_valid_cos_map; - if (nic_dev->hw_dcb_cfg.trust == 1) - return nic_dev->hw_dcb_cfg.dscp_valid_cos_map; + struct hinic3_dcb *dcb = nic_dev->dcb; + + if (dcb->hw_dcb_cfg.trust == HINIC3_DCB_PCP) + return dcb->hw_dcb_cfg.pcp_valid_cos_map; + if (dcb->hw_dcb_cfg.trust == HINIC3_DCB_DSCP) + return dcb->hw_dcb_cfg.dscp_valid_cos_map; return 0; } void hinic3_update_qp_cos_cfg(struct hinic3_nic_dev *nic_dev, u8 num_cos) { - struct hinic3_dcb_config *dcb_cfg = &nic_dev->hw_dcb_cfg; - u8 i, remainder, num_sq_per_cos, cur_cos_num = 0; + struct hinic3_dcb_config *hw_dcb_cfg = &nic_dev->dcb->hw_dcb_cfg; + struct hinic3_dcb_config *wanted_dcb_cfg = + &nic_dev->dcb->wanted_dcb_cfg; u8 valid_cos_map = hinic3_get_dev_valid_cos_map(nic_dev); + u8 cos_qp_num, cos_qp_offset = 0; + u8 i, remainder, num_qp_per_cos; - if (num_cos == 0) - return; - - num_sq_per_cos = (u8)(nic_dev->q_params.num_qps / num_cos); - if (num_sq_per_cos == 0) + if (num_cos == 0 || nic_dev->q_params.num_qps == 0) return; - remainder = nic_dev->q_params.num_qps % num_sq_per_cos; + num_qp_per_cos = (u8)(nic_dev->q_params.num_qps / num_cos); + remainder = nic_dev->q_params.num_qps % num_cos; - memset(dcb_cfg->cos_qp_offset, 0, sizeof(dcb_cfg->cos_qp_offset)); - memset(dcb_cfg->cos_qp_num, 0, sizeof(dcb_cfg->cos_qp_num)); + memset(hw_dcb_cfg->cos_qp_offset, 0, sizeof(hw_dcb_cfg->cos_qp_offset)); + memset(hw_dcb_cfg->cos_qp_num, 0, sizeof(hw_dcb_cfg->cos_qp_num)); for (i = 0; i < PCP_MAX_UP; i++) { if (BIT(i) & valid_cos_map) { - u8 cos_qp_num = num_sq_per_cos; - u8 cos_qp_offset = (u8)(cur_cos_num * num_sq_per_cos); - - if (cur_cos_num < remainder) { - cos_qp_num++; - cos_qp_offset += cur_cos_num; - } else { - cos_qp_offset += remainder; - } - - cur_cos_num++; - valid_cos_map -= (u8)BIT(i); + cos_qp_num = num_qp_per_cos + ((remainder > 0) ? + (remainder--, 1) : 0); - dcb_cfg->cos_qp_offset[i] = cos_qp_offset; - dcb_cfg->cos_qp_num[i] = cos_qp_num; + hw_dcb_cfg->cos_qp_offset[i] = cos_qp_offset; + hw_dcb_cfg->cos_qp_num[i] = cos_qp_num; hinic3_info(nic_dev, drv, "cos %u, cos_qp_offset=%u cos_qp_num=%u\n", i, cos_qp_offset, cos_qp_num); + + cos_qp_offset += cos_qp_num; + valid_cos_map -= (int)BIT(i); } } - memcpy(nic_dev->wanted_dcb_cfg.cos_qp_offset, dcb_cfg->cos_qp_offset, - sizeof(dcb_cfg->cos_qp_offset)); - memcpy(nic_dev->wanted_dcb_cfg.cos_qp_num, dcb_cfg->cos_qp_num, - sizeof(dcb_cfg->cos_qp_num)); + memcpy(wanted_dcb_cfg->cos_qp_offset, hw_dcb_cfg->cos_qp_offset, + sizeof(hw_dcb_cfg->cos_qp_offset)); + memcpy(wanted_dcb_cfg->cos_qp_num, hw_dcb_cfg->cos_qp_num, + sizeof(hw_dcb_cfg->cos_qp_num)); } void hinic3_update_tx_db_cos(struct hinic3_nic_dev *nic_dev, u8 dcb_en) { + struct hinic3_dcb_config *hw_dcb_cfg = &nic_dev->dcb->hw_dcb_cfg; u8 i; u16 start_qid, q_num; hinic3_set_txq_cos(nic_dev, 0, nic_dev->q_params.num_qps, - nic_dev->hw_dcb_cfg.default_cos); + hw_dcb_cfg->default_cos); if (!dcb_en) return; for (i = 0; i < NIC_DCB_COS_MAX; i++) { - q_num = (u16)nic_dev->hw_dcb_cfg.cos_qp_num[i]; + q_num = (u16)hw_dcb_cfg->cos_qp_num[i]; if (q_num) { - start_qid = (u16)nic_dev->hw_dcb_cfg.cos_qp_offset[i]; + start_qid = (u16)hw_dcb_cfg->cos_qp_offset[i]; hinic3_set_txq_cos(nic_dev, start_qid, q_num, i); hinic3_info(nic_dev, drv, "update tx db cos, start_qid %u, q_num=%u cos=%u\n", @@ -110,30 +108,29 @@ void hinic3_update_tx_db_cos(struct hinic3_nic_dev *nic_dev, u8 dcb_en) static int hinic3_set_tx_cos_state(struct hinic3_nic_dev *nic_dev, u8 dcb_en) { - struct hinic3_dcb_config *dcb_cfg = &nic_dev->hw_dcb_cfg; + struct hinic3_dcb *dcb = nic_dev->dcb; + struct hinic3_dcb_config *hw_dcb_cfg = &dcb->hw_dcb_cfg; struct hinic3_dcb_state dcb_state = {0}; u8 i; int err; - if (HINIC3_FUNC_IS_VF(nic_dev->hwdev)) { - /* VF does not support DCB, use the default cos */ - dcb_cfg->default_cos = (u8)fls(nic_dev->func_dft_cos_bitmap) - 1; - - return 0; - } + u32 pcp2cos_size = sizeof(dcb_state.pcp2cos); + u32 dscp2cos_size = sizeof(dcb_state.dscp2cos); dcb_state.dcb_on = dcb_en; - dcb_state.default_cos = dcb_cfg->default_cos; - dcb_state.trust = dcb_cfg->trust; + dcb_state.default_cos = hw_dcb_cfg->default_cos; + dcb_state.trust = hw_dcb_cfg->trust; if (dcb_en) { for (i = 0; i < NIC_DCB_COS_MAX; i++) - dcb_state.pcp2cos[i] = dcb_cfg->pcp2cos[i]; + dcb_state.pcp2cos[i] = hw_dcb_cfg->pcp2cos[i]; for (i = 0; i < NIC_DCB_IP_PRI_MAX; i++) - dcb_state.dscp2cos[i] = dcb_cfg->dscp2cos[i]; + dcb_state.dscp2cos[i] = hw_dcb_cfg->dscp2cos[i]; } else { - memset(dcb_state.pcp2cos, dcb_cfg->default_cos, sizeof(dcb_state.pcp2cos)); - memset(dcb_state.dscp2cos, dcb_cfg->default_cos, sizeof(dcb_state.dscp2cos)); + memset(dcb_state.pcp2cos, hw_dcb_cfg->default_cos, + pcp2cos_size); + memset(dcb_state.dscp2cos, hw_dcb_cfg->default_cos, + dscp2cos_size); } err = hinic3_set_dcb_state(nic_dev->hwdev, &dcb_state); @@ -143,7 +140,7 @@ static int hinic3_set_tx_cos_state(struct hinic3_nic_dev *nic_dev, u8 dcb_en) return err; } -static int hinic3_configure_dcb_hw(struct hinic3_nic_dev *nic_dev, u8 dcb_en) +int hinic3_configure_dcb_hw(struct hinic3_nic_dev *nic_dev, u8 dcb_en) { int err; u8 user_cos_num = hinic3_get_dev_user_cos_num(nic_dev); @@ -169,11 +166,13 @@ static int hinic3_configure_dcb_hw(struct hinic3_nic_dev *nic_dev, u8 dcb_en) goto rx_configure_fail; } - if (dcb_en) + if (dcb_en) { set_bit(HINIC3_DCB_ENABLE, &nic_dev->flags); - else + set_bit(HINIC3_DCB_ENABLE, &nic_dev->nic_vram->flags); + } else { clear_bit(HINIC3_DCB_ENABLE, &nic_dev->flags); - + clear_bit(HINIC3_DCB_ENABLE, &nic_dev->nic_vram->flags); + } return 0; rx_configure_fail: hinic3_set_tx_cos_state(nic_dev, dcb_en ? 0 : 1); @@ -188,6 +187,7 @@ static int hinic3_configure_dcb_hw(struct hinic3_nic_dev *nic_dev, u8 dcb_en) int hinic3_setup_cos(struct net_device *netdev, u8 cos, u8 netif_run) { struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_dcb *dcb = nic_dev->dcb; int err; if (cos && test_bit(HINIC3_SAME_RXTX, &nic_dev->flags)) { @@ -195,9 +195,10 @@ int hinic3_setup_cos(struct net_device *netdev, u8 cos, u8 netif_run) return -EOPNOTSUPP; } - if (cos > nic_dev->cos_config_num_max) { - nicif_err(nic_dev, drv, netdev, "Invalid num_tc: %u, max cos: %u\n", - cos, nic_dev->cos_config_num_max); + if (cos > dcb->cos_config_num_max) { + nicif_err(nic_dev, drv, netdev, + "Invalid num_tc: %u, max cos: %u\n", + cos, dcb->cos_config_num_max); return -EINVAL; } @@ -223,36 +224,48 @@ static u8 get_cos_num(u8 hw_valid_cos_bitmap) static void hinic3_sync_dcb_cfg(struct hinic3_nic_dev *nic_dev, const struct hinic3_dcb_config *dcb_cfg) { - struct hinic3_dcb_config *hw_cfg = &nic_dev->hw_dcb_cfg; + struct hinic3_dcb_config *hw_dcb_cfg = &nic_dev->dcb->hw_dcb_cfg; - memcpy(hw_cfg, dcb_cfg, sizeof(struct hinic3_dcb_config)); + memcpy(hw_dcb_cfg, dcb_cfg, sizeof(struct hinic3_dcb_config)); } static int init_default_dcb_cfg(struct hinic3_nic_dev *nic_dev, struct hinic3_dcb_config *dcb_cfg) { + struct hinic3_dcb *dcb = nic_dev->dcb; u8 i, hw_dft_cos_map, port_cos_bitmap, dscp_ind; int err; + int is_in_kexec; - err = hinic3_cos_valid_bitmap(nic_dev->hwdev, &hw_dft_cos_map, &port_cos_bitmap); + err = hinic3_cos_valid_bitmap(nic_dev->hwdev, + &hw_dft_cos_map, &port_cos_bitmap); if (err) { hinic3_err(nic_dev, drv, "None cos supported\n"); return -EFAULT; } - nic_dev->func_dft_cos_bitmap = hw_dft_cos_map; - nic_dev->port_dft_cos_bitmap = port_cos_bitmap; - nic_dev->cos_config_num_max = get_cos_num(hw_dft_cos_map); + is_in_kexec = vram_get_kexec_flag(); - dcb_cfg->trust = DCB_PCP; - dcb_cfg->pcp_user_cos_num = nic_dev->cos_config_num_max; - dcb_cfg->dscp_user_cos_num = nic_dev->cos_config_num_max; - dcb_cfg->default_cos = (u8)fls(nic_dev->func_dft_cos_bitmap) - 1; + dcb->func_dft_cos_bitmap = hw_dft_cos_map; + dcb->port_dft_cos_bitmap = port_cos_bitmap; + + dcb->cos_config_num_max = get_cos_num(hw_dft_cos_map); + + if (is_in_kexec == 0) { + dcb_cfg->trust = HINIC3_DCB_PCP; + dcb_cfg->default_cos = (u8)fls(dcb->func_dft_cos_bitmap) - 1; + } else { + dcb_cfg->trust = nic_dev->dcb->hw_dcb_cfg.trust; + dcb_cfg->default_cos = nic_dev->dcb->hw_dcb_cfg.default_cos; + } + dcb_cfg->pcp_user_cos_num = dcb->cos_config_num_max; + dcb_cfg->dscp_user_cos_num = dcb->cos_config_num_max; dcb_cfg->pcp_valid_cos_map = hw_dft_cos_map; dcb_cfg->dscp_valid_cos_map = hw_dft_cos_map; for (i = 0; i < NIC_DCB_COS_MAX; i++) { - dcb_cfg->pcp2cos[i] = hw_dft_cos_map & BIT(i) ? i : dcb_cfg->default_cos; + dcb_cfg->pcp2cos[i] = hw_dft_cos_map & BIT(i) + ? i : (u8)fls(dcb->func_dft_cos_bitmap) - 1; for (dscp_ind = 0; dscp_ind < NIC_DCB_COS_MAX; dscp_ind++) dcb_cfg->dscp2cos[i * NIC_DCB_DSCP_NUM + dscp_ind] = dcb_cfg->pcp2cos[i]; } @@ -276,58 +289,121 @@ int hinic3_configure_dcb(struct net_device *netdev) int err; err = hinic3_sync_dcb_state(nic_dev->hwdev, 1, - test_bit(HINIC3_DCB_ENABLE, &nic_dev->flags) ? 1 : 0); + test_bit(HINIC3_DCB_ENABLE, &nic_dev->flags) + ? 1 : 0); if (err) { hinic3_err(nic_dev, drv, "Set dcb state failed\n"); return err; } if (test_bit(HINIC3_DCB_ENABLE, &nic_dev->flags)) - hinic3_sync_dcb_cfg(nic_dev, &nic_dev->wanted_dcb_cfg); + hinic3_sync_dcb_cfg(nic_dev, &nic_dev->dcb->wanted_dcb_cfg); else hinic3_dcb_reset_hw_config(nic_dev); return 0; } +static int hinic3_dcb_alloc(struct hinic3_nic_dev *nic_dev) +{ + u16 func_id; + int is_use_vram; + int ret; + + is_use_vram = get_use_vram_flag(); + if (is_use_vram) { + func_id = hinic3_global_func_id(nic_dev->hwdev); + ret = snprintf(nic_dev->dcb_name, VRAM_NAME_MAX_LEN, + "%s%u%s", VRAM_CQM_GLB_FUNC_BASE, func_id, + VRAM_NIC_DCB); + if (ret < 0) { + hinic3_err(nic_dev, drv, "Nic dcb snprintf failed, ret:%d.\n", ret); + return ret; + } + + nic_dev->dcb = (struct hinic3_dcb *)hi_vram_kalloc(nic_dev->dcb_name, + sizeof(*nic_dev->dcb)); + if (!nic_dev->dcb) { + hinic3_err(nic_dev, drv, "Failed to vram alloc dcb.\n"); + return -EFAULT; + } + } else { + nic_dev->dcb = kzalloc(sizeof(*nic_dev->dcb), GFP_KERNEL); + if (!nic_dev->dcb) { + hinic3_err(nic_dev, drv, "Failed to create dcb.\n"); + return -EFAULT; + } + } + + return 0; +} + +static void hinic3_dcb_free(struct hinic3_nic_dev *nic_dev) +{ + int is_use_vram; + + is_use_vram = get_use_vram_flag(); + if (is_use_vram) + hi_vram_kfree((void *)nic_dev->dcb, nic_dev->dcb_name, sizeof(*nic_dev->dcb)); + else + kfree(nic_dev->dcb); + nic_dev->dcb = NULL; +} + +void hinic3_dcb_deinit(struct hinic3_nic_dev *nic_dev) +{ + if (test_bit(HINIC3_DCB_ENABLE, &nic_dev->flags)) + hinic3_sync_dcb_state(nic_dev->hwdev, 1, 0); + + hinic3_dcb_free(nic_dev); +} + int hinic3_dcb_init(struct hinic3_nic_dev *nic_dev) { - struct hinic3_dcb_config *dcb_cfg = &nic_dev->hw_dcb_cfg; + struct hinic3_dcb_config *hw_dcb_cfg = NULL; int err; u8 dcb_en = test_bit(HINIC3_DCB_ENABLE, &nic_dev->flags) ? 1 : 0; - if (HINIC3_FUNC_IS_VF(nic_dev->hwdev)) - return hinic3_set_tx_cos_state(nic_dev, dcb_en); + err = hinic3_dcb_alloc(nic_dev); + if (err != 0) { + hinic3_err(nic_dev, drv, "Dcb alloc failed.\n"); + return err; + } - err = init_default_dcb_cfg(nic_dev, dcb_cfg); + hw_dcb_cfg = &nic_dev->dcb->hw_dcb_cfg; + err = init_default_dcb_cfg(nic_dev, hw_dcb_cfg); if (err) { - hinic3_err(nic_dev, drv, "Initialize dcb configuration failed\n"); + hinic3_err(nic_dev, drv, + "Initialize dcb configuration failed\n"); + hinic3_dcb_free(nic_dev); return err; } - memcpy(&nic_dev->wanted_dcb_cfg, &nic_dev->hw_dcb_cfg, sizeof(struct hinic3_dcb_config)); + memcpy(&nic_dev->dcb->wanted_dcb_cfg, hw_dcb_cfg, + sizeof(struct hinic3_dcb_config)); hinic3_info(nic_dev, drv, "Support num cos %u, default cos %u\n", - nic_dev->cos_config_num_max, dcb_cfg->default_cos); + nic_dev->dcb->cos_config_num_max, hw_dcb_cfg->default_cos); err = hinic3_set_tx_cos_state(nic_dev, dcb_en); if (err) { hinic3_err(nic_dev, drv, "Set tx cos state failed\n"); + hinic3_dcb_free(nic_dev); return err; } - sema_init(&nic_dev->dcb_sem, 1); - return 0; } -static int change_qos_cfg(struct hinic3_nic_dev *nic_dev, const struct hinic3_dcb_config *dcb_cfg) +static int change_qos_cfg(struct hinic3_nic_dev *nic_dev, + const struct hinic3_dcb_config *dcb_cfg) { struct net_device *netdev = nic_dev->netdev; + struct hinic3_dcb *dcb = nic_dev->dcb; int err = 0; u8 user_cos_num = hinic3_get_dev_user_cos_num(nic_dev); - if (test_and_set_bit(HINIC3_DCB_UP_COS_SETTING, &nic_dev->dcb_flags)) { + if (test_and_set_bit(HINIC3_DCB_UP_COS_SETTING, &dcb->dcb_flags)) { nicif_warn(nic_dev, drv, netdev, "Cos_up map setting in inprocess, please try again later\n"); return -EFAULT; @@ -337,40 +413,44 @@ static int change_qos_cfg(struct hinic3_nic_dev *nic_dev, const struct hinic3_dc hinic3_update_qp_cos_cfg(nic_dev, user_cos_num); - clear_bit(HINIC3_DCB_UP_COS_SETTING, &nic_dev->dcb_flags); + clear_bit(HINIC3_DCB_UP_COS_SETTING, &dcb->dcb_flags); return err; } int hinic3_dcbcfg_set_up_bitmap(struct hinic3_nic_dev *nic_dev) { + struct hinic3_dcb *dcb = nic_dev->dcb; int err, rollback_err; u8 netif_run = 0; struct hinic3_dcb_config old_dcb_cfg; u8 user_cos_num = hinic3_get_dev_user_cos_num(nic_dev); - memcpy(&old_dcb_cfg, &nic_dev->hw_dcb_cfg, sizeof(struct hinic3_dcb_config)); + memcpy(&old_dcb_cfg, &dcb->hw_dcb_cfg, + sizeof(struct hinic3_dcb_config)); - if (!memcmp(&nic_dev->wanted_dcb_cfg, &old_dcb_cfg, sizeof(struct hinic3_dcb_config))) { + if (!memcmp(&dcb->wanted_dcb_cfg, &old_dcb_cfg, + sizeof(struct hinic3_dcb_config))) { nicif_info(nic_dev, drv, nic_dev->netdev, "Same valid up bitmap, don't need to change anything\n"); return 0; } - rtnl_lock(); if (netif_running(nic_dev->netdev)) { netif_run = 1; hinic3_vport_down(nic_dev); } - err = change_qos_cfg(nic_dev, &nic_dev->wanted_dcb_cfg); + err = change_qos_cfg(nic_dev, &dcb->wanted_dcb_cfg); if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Set cos_up map to hw failed\n"); + nicif_err(nic_dev, drv, nic_dev->netdev, + "Set cos_up map to hw failed\n"); goto change_qos_cfg_fail; } if (test_bit(HINIC3_DCB_ENABLE, &nic_dev->flags)) { - err = hinic3_setup_cos(nic_dev->netdev, user_cos_num, netif_run); + err = hinic3_setup_cos(nic_dev->netdev, + user_cos_num, netif_run); if (err) goto set_err; } @@ -381,13 +461,12 @@ int hinic3_dcbcfg_set_up_bitmap(struct hinic3_nic_dev *nic_dev) goto vport_up_fail; } - rtnl_unlock(); - return 0; vport_up_fail: if (test_bit(HINIC3_DCB_ENABLE, &nic_dev->flags)) - hinic3_setup_cos(nic_dev->netdev, user_cos_num ? 0 : user_cos_num, netif_run); + hinic3_setup_cos(nic_dev->netdev, user_cos_num + ? 0 : user_cos_num, netif_run); set_err: rollback_err = change_qos_cfg(nic_dev, &old_dcb_cfg); @@ -399,7 +478,5 @@ int hinic3_dcbcfg_set_up_bitmap(struct hinic3_nic_dev *nic_dev) if (netif_run) hinic3_vport_up(nic_dev); - rtnl_unlock(); - return err; } diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_dcb.h b/drivers/net/ethernet/huawei/hinic3/hinic3_dcb.h index 7987f563cfff368cbd955fcc16e2be680257d726..e0b35cbd4f09bf08e0a23b67f9ad190ecd14e1c1 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_dcb.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_dcb.h @@ -24,11 +24,6 @@ struct hinic3_tc_cfg { u16 rsvd; }; -enum HINIC3_DCB_TRUST { - DCB_PCP, - DCB_DSCP, -}; - #define PCP_MAX_UP 8 #define DSCP_MAC_UP 64 #define DBG_DFLT_DSCP_VAL 0xFF @@ -50,6 +45,7 @@ struct hinic3_dcb_config { u8 hinic3_get_dev_user_cos_num(struct hinic3_nic_dev *nic_dev); u8 hinic3_get_dev_valid_cos_map(struct hinic3_nic_dev *nic_dev); int hinic3_dcb_init(struct hinic3_nic_dev *nic_dev); +void hinic3_dcb_deinit(struct hinic3_nic_dev *nic_dev); void hinic3_dcb_reset_hw_config(struct hinic3_nic_dev *nic_dev); int hinic3_configure_dcb(struct net_device *netdev); int hinic3_setup_cos(struct net_device *netdev, u8 cos, u8 netif_run); @@ -75,4 +71,5 @@ void hinic3_update_tx_db_cos(struct hinic3_nic_dev *nic_dev, u8 dcb_en); void hinic3_update_qp_cos_cfg(struct hinic3_nic_dev *nic_dev, u8 num_cos); void hinic3_vport_down(struct hinic3_nic_dev *nic_dev); int hinic3_vport_up(struct hinic3_nic_dev *nic_dev); +int hinic3_configure_dcb_hw(struct hinic3_nic_dev *nic_dev, u8 dcb_en); #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c index 2b3561e5bca12a9df7d21cd975909ffbc79cc0c6..548d67d807cf77915e046960db47f9ce69ba39a3 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c @@ -44,9 +44,9 @@ static void hinic3_get_drvinfo(struct net_device *netdev, u8 mgmt_ver[HINIC3_MGMT_VERSION_MAX_LEN] = {0}; int err; - strlcpy(info->driver, HINIC3_NIC_DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, HINIC3_NIC_DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info)); + strscpy(info->driver, HINIC3_NIC_DRV_NAME, sizeof(info->driver)); + strscpy(info->version, HINIC3_NIC_DRV_VERSION, sizeof(info->version)); + strscpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info)); err = hinic3_get_mgmt_version(nic_dev->hwdev, mgmt_ver, HINIC3_MGMT_VERSION_MAX_LEN, @@ -123,10 +123,15 @@ static int hinic3_nway_reset(struct net_device *netdev) return err; } +#ifdef HAVE_ETHTOOL_RINGPARAM_EXTACK static void hinic3_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) +#else +static void hinic3_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif { struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); @@ -176,10 +181,15 @@ static int check_ringparam_valid(struct net_device *netdev, return 0; } +#ifdef HAVE_ETHTOOL_RINGPARAM_EXTACK static int hinic3_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) +#else +static int hinic3_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif { struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); struct hinic3_dyna_txrxq_params q_params = {0}; @@ -273,7 +283,7 @@ static int get_coalesce(struct net_device *netdev, static int set_queue_coalesce(struct hinic3_nic_dev *nic_dev, u16 q_id, struct hinic3_intr_coal_info *coal) { - struct hinic3_intr_coal_info *intr_coal; + struct hinic3_intr_coal_info *intr_coal = NULL; struct interrupt_info info = {0}; struct net_device *netdev = nic_dev->netdev; int err; @@ -367,24 +377,11 @@ static int is_coalesce_exceed_limit(struct net_device *netdev, return 0; } -static int is_coalesce_legal(struct net_device *netdev, - const struct ethtool_coalesce *coal) +static int is_coalesce_allowed_change(struct net_device *netdev, + const struct ethtool_coalesce *coal) { struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); struct ethtool_coalesce tmp_coal = {0}; - int err; - - if (coal->rx_coalesce_usecs != coal->tx_coalesce_usecs) { - nicif_err(nic_dev, drv, netdev, - "tx-usecs must be equal to rx-usecs\n"); - return -EINVAL; - } - - if (coal->rx_max_coalesced_frames != coal->tx_max_coalesced_frames) { - nicif_err(nic_dev, drv, netdev, - "tx-frames must be equal to rx-frames\n"); - return -EINVAL; - } tmp_coal.cmd = coal->cmd; tmp_coal.rx_coalesce_usecs = coal->rx_coalesce_usecs; @@ -409,6 +406,31 @@ static int is_coalesce_legal(struct net_device *netdev, return -EOPNOTSUPP; } + return 0; +} + +static int is_coalesce_legal(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + if (coal->rx_coalesce_usecs != coal->tx_coalesce_usecs) { + nicif_err(nic_dev, drv, netdev, + "tx-usecs must be equal to rx-usecs\n"); + return -EINVAL; + } + + if (coal->rx_max_coalesced_frames != coal->tx_max_coalesced_frames) { + nicif_err(nic_dev, drv, netdev, + "tx-frames must be equal to rx-frames\n"); + return -EINVAL; + } + + err = is_coalesce_allowed_change(netdev, coal); + if (err) + return err; + err = is_coalesce_exceed_limit(netdev, coal); if (err) return err; @@ -491,19 +513,10 @@ static int set_hw_coal_param(struct hinic3_nic_dev *nic_dev, return 0; } -static int set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal, u16 queue) +static void check_coalesce_align(struct net_device *netdev, + struct ethtool_coalesce *coal) { struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); - struct hinic3_intr_coal_info intr_coal = {0}; - struct hinic3_intr_coal_info *ori_intr_coal = NULL; - u32 last_adaptive_rx; - char obj_str[32] = {0}; - int err = 0; - - err = is_coalesce_legal(netdev, coal); - if (err) - return err; CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT); CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames, @@ -516,6 +529,14 @@ static int set_coalesce(struct net_device *netdev, COALESCE_TIMER_CFG_UNIT); CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_low, COALESCE_PENDING_LIMIT_UNIT); +} + +static int check_coalesce_change(struct net_device *netdev, + u16 queue, struct ethtool_coalesce *coal) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_intr_coal_info *ori_intr_coal = NULL; + char obj_str[32] = {0}; if (queue == COALESCE_ALL_QUEUE) { ori_intr_coal = &nic_dev->intr_coalesce[0]; @@ -524,6 +545,7 @@ static int set_coalesce(struct net_device *netdev, ori_intr_coal = &nic_dev->intr_coalesce[queue]; snprintf(obj_str, sizeof(obj_str), "for queue %u", queue); } + CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT, ori_intr_coal->coalesce_timer_cfg, obj_str); CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames, @@ -545,28 +567,52 @@ static int set_coalesce(struct net_device *netdev, CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_low, COALESCE_PENDING_LIMIT_UNIT, ori_intr_coal->rx_pending_limt_low, obj_str); + return 0; +} - intr_coal.coalesce_timer_cfg = +static void init_intr_coal_params(struct hinic3_intr_coal_info *intr_coal, + struct ethtool_coalesce *coal) +{ + intr_coal->coalesce_timer_cfg = (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); - intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames / - COALESCE_PENDING_LIMIT_UNIT); + intr_coal->pending_limt = (u8)(coal->rx_max_coalesced_frames / + COALESCE_PENDING_LIMIT_UNIT); - last_adaptive_rx = nic_dev->adaptive_rx_coal; - nic_dev->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; - - intr_coal.pkt_rate_high = coal->pkt_rate_high; - intr_coal.rx_usecs_high = + intr_coal->pkt_rate_high = coal->pkt_rate_high; + intr_coal->rx_usecs_high = (u8)(coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT); - intr_coal.rx_pending_limt_high = + intr_coal->rx_pending_limt_high = (u8)(coal->rx_max_coalesced_frames_high / COALESCE_PENDING_LIMIT_UNIT); - intr_coal.pkt_rate_low = coal->pkt_rate_low; - intr_coal.rx_usecs_low = + intr_coal->pkt_rate_low = coal->pkt_rate_low; + intr_coal->rx_usecs_low = (u8)(coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT); - intr_coal.rx_pending_limt_low = + intr_coal->rx_pending_limt_low = (u8)(coal->rx_max_coalesced_frames_low / COALESCE_PENDING_LIMIT_UNIT); +} + +static int set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_intr_coal_info intr_coal = {0}; + u32 last_adaptive_rx; + int err = 0; + + err = is_coalesce_legal(netdev, coal); + if (err) + return err; + + check_coalesce_align(netdev, coal); + + check_coalesce_change(netdev, queue, coal); + + init_intr_coal_params(&intr_coal, coal); + + last_adaptive_rx = nic_dev->adaptive_rx_coal; + nic_dev->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; /* coalesce timer or pending set to zero will disable coalesce */ if (!nic_dev->adaptive_rx_coal && @@ -588,18 +634,28 @@ static int set_coalesce(struct net_device *netdev, return set_hw_coal_param(nic_dev, &intr_coal, queue); } +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK static int hinic3_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) +#else +static int hinic3_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +#endif { return get_coalesce(netdev, coal, COALESCE_ALL_QUEUE); } +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK static int hinic3_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) +#else +static int hinic3_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +#endif { return set_coalesce(netdev, coal, COALESCE_ALL_QUEUE); } @@ -762,6 +818,14 @@ static int hinic3_get_module_info(struct net_device *netdev, modinfo->type = ETH_MODULE_SFF_8636; modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; break; + case MODULE_TYPE_DSFP: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + break; + case MODULE_TYPE_QSFP_CMIS: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + break; default: nicif_warn(nic_dev, drv, netdev, "Optical module unknown: 0x%x\n", sfp_type); @@ -784,6 +848,9 @@ static int hinic3_get_module_eeprom(struct net_device *netdev, memset(data, 0, ee->len); err = hinic3_get_sfp_eeprom(nic_dev->hwdev, (u8 *)sfp_data, ee->len); + if (err == HINIC3_MGMT_CMD_UNSUPPORTED) + err = hinic3_get_tlv_xsfp_eeprom(nic_dev->hwdev, (u8 *)sfp_data, sizeof(sfp_data)); + if (err) return err; @@ -814,7 +881,7 @@ static u32 hinic3_get_priv_flags(struct net_device *netdev) return priv_flags; } -int hinic3_set_rxq_recovery_flag(struct net_device *netdev, u32 priv_flags) +static int hinic3_set_rxq_recovery_flag(struct net_device *netdev, u32 priv_flags) { struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); @@ -844,12 +911,14 @@ static int hinic3_set_symm_rss_flag(struct net_device *netdev, u32 priv_flags) if (priv_flags & HINIC3_PRIV_FLAGS_SYMM_RSS) { if (test_bit(HINIC3_DCB_ENABLE, &nic_dev->flags)) { - nicif_err(nic_dev, drv, netdev, "Failed to open Symmetric RSS while DCB is enabled\n"); + nicif_err(nic_dev, drv, netdev, + "Failed to open Symmetric RSS while DCB is enabled\n"); return -EOPNOTSUPP; } if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) { - nicif_err(nic_dev, drv, netdev, "Failed to open Symmetric RSS while RSS is disabled\n"); + nicif_err(nic_dev, drv, netdev, + "Failed to open Symmetric RSS while RSS is disabled\n"); return -EOPNOTSUPP; } @@ -1129,10 +1198,61 @@ static void hinic3_diag_test(struct net_device *netdev, hinic3_lp_test(netdev, eth_test, data, 0); } +#if defined(ETHTOOL_GFECPARAM) && defined(ETHTOOL_SFECPARAM) +static int hinic3_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u8 advertised_fec = 0; + u8 supported_fec = 0; + int err; + + if (fecparam->cmd != ETHTOOL_GFECPARAM) { + nicif_err(nic_dev, drv, netdev, + "get fecparam cmd err.exp:0x%x,real:0x%x\n", + ETHTOOL_GFECPARAM, fecparam->cmd); + return -EINVAL; + } + + err = get_fecparam(nic_dev->hwdev, &advertised_fec, &supported_fec); + if (err) { + nicif_err(nic_dev, drv, netdev, "Get fec param failed\n"); + return err; + } + fecparam->active_fec = (u32)advertised_fec; + fecparam->fec = (u32)supported_fec; + + nicif_info(nic_dev, drv, netdev, "Get fec param success\n"); + return 0; +} + +static int hinic3_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + if (fecparam->cmd != ETHTOOL_SFECPARAM) { + nicif_err(nic_dev, drv, netdev, "Set fecparam cmd err.exp:0x%x,real:0x%x\n", ETHTOOL_SFECPARAM, fecparam->cmd); + return -EINVAL; + } + + err = set_fecparam(nic_dev->hwdev, (u8)fecparam->fec); + if (err) { + nicif_err(nic_dev, drv, netdev, "Set fec param failed\n"); + return err; + } + + nicif_info(nic_dev, drv, netdev, "Set fec param success\n"); + return 0; +} +#endif + static const struct ethtool_ops hinic3_ethtool_ops = { #ifdef SUPPORTED_COALESCE_PARAMS .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_PKT_RATE_RX_USECS, + ETHTOOL_COALESCE_PKT_RATE_RX_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USECS_LOW_HIGH | + ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH, #endif #ifdef ETHTOOL_GLINKSETTINGS #ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS @@ -1179,6 +1299,11 @@ static const struct ethtool_ops hinic3_ethtool_ops = { .set_per_queue_coalesce = hinic3_set_per_queue_coalesce, #endif +#if defined(ETHTOOL_GFECPARAM) && defined(ETHTOOL_SFECPARAM) + .get_fecparam = hinic3_get_fecparam, + .set_fecparam = hinic3_set_fecparam, +#endif + .get_rxnfc = hinic3_get_rxnfc, .set_rxnfc = hinic3_set_rxnfc, .get_priv_flags = hinic3_get_priv_flags, @@ -1239,7 +1364,10 @@ static const struct ethtool_ops_ext hinic3_ethtool_ops_ext = { static const struct ethtool_ops hinic3vf_ethtool_ops = { #ifdef SUPPORTED_COALESCE_PARAMS .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_PKT_RATE_RX_USECS, + ETHTOOL_COALESCE_PKT_RATE_RX_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USECS_LOW_HIGH | + ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH, #endif #ifdef ETHTOOL_GLINKSETTINGS #ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS @@ -1266,6 +1394,11 @@ static const struct ethtool_ops hinic3vf_ethtool_ops = { .set_per_queue_coalesce = hinic3_set_per_queue_coalesce, #endif +#if defined(ETHTOOL_GFECPARAM) && defined(ETHTOOL_SFECPARAM) + .get_fecparam = hinic3_get_fecparam, + .set_fecparam = hinic3_set_fecparam, +#endif + .get_rxnfc = hinic3_get_rxnfc, .set_rxnfc = hinic3_set_rxnfc, .get_priv_flags = hinic3_get_priv_flags, diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool_stats.c b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool_stats.c index de59b7668254650778a16fd98a2795132375e815..ec89f62226604ec40df3948c73bd667c6832e767 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool_stats.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool_stats.c @@ -24,11 +24,24 @@ #include "hinic3_tx.h" #include "hinic3_rx.h" -#define FPGA_PORT_COUNTER 0 -#define EVB_PORT_COUNTER 1 -u16 mag_support_mode = EVB_PORT_COUNTER; -module_param(mag_support_mode, ushort, 0444); -MODULE_PARM_DESC(mag_support_mode, "Set mag port counter support mode, 0:FPGA 1:EVB, default is 1"); +#define HINIC_SET_LINK_STR_LEN 128 +#define HINIC_ETHTOOL_FEC_INFO_LEN 6 +#define HINIC_SUPPORTED_FEC_CMD 0 +#define HINIC_ADVERTISED_FEC_CMD 1 + +struct hinic3_ethtool_fec { + u8 hinic_fec_offset; + u8 ethtool_bit_offset; +}; + +static struct hinic3_ethtool_fec hinic3_ethtool_fec_info[HINIC_ETHTOOL_FEC_INFO_LEN] = { + {PORT_FEC_NOT_SET, 0xFF}, /* The ethtool does not have the corresponding enumeration variable */ + {PORT_FEC_RSFEC, 0x32}, /* ETHTOOL_LINK_MODE_FEC_RS_BIT */ + {PORT_FEC_BASEFEC, 0x33}, /* ETHTOOL_LINK_MODE_FEC_BASER_BIT */ + {PORT_FEC_NOFEC, 0x31}, /* ETHTOOL_LINK_MODE_FEC_NONE_BIT */ + {PORT_FEC_LLRSFEC, 0x4A}, /* ETHTOOL_LINK_MODE_FEC_LLRS_BIT: Available only in later versions */ + {PORT_FEC_AUTO, 0XFF} /* The ethtool does not have the corresponding enumeration variable */ +}; struct hinic3_stats { char name[ETH_GSTRING_LEN]; @@ -36,6 +49,20 @@ struct hinic3_stats { int offset; }; +struct hinic3_netdev_link_count_str { + u64 link_down_events_phy; +}; + +#define HINIC3_NETDEV_LINK_COUNT(_stat_item) { \ + .name = #_stat_item, \ + .size = FIELD_SIZEOF(struct hinic3_netdev_link_count_str, _stat_item), \ + .offset = offsetof(struct hinic3_netdev_link_count_str, _stat_item) \ +} + +static struct hinic3_stats hinic3_netdev_link_count[] = { + HINIC3_NETDEV_LINK_COUNT(link_down_events_phy), +}; + #define HINIC3_NETDEV_STAT(_stat_item) { \ .name = #_stat_item, \ .size = FIELD_SIZEOF(struct rtnl_link_stats64, _stat_item), \ @@ -94,7 +121,6 @@ static struct hinic3_stats hinic3_nic_dev_stats_extern[] = { .offset = offsetof(struct hinic3_txq_stats, _stat_item) \ } -/*lint -save -e786*/ static struct hinic3_stats hinic3_rx_queue_stats[] = { HINIC3_RXQ_STAT(packets), HINIC3_RXQ_STAT(bytes), @@ -135,8 +161,6 @@ static struct hinic3_stats hinic3_tx_queue_stats_extern[] = { HINIC3_TXQ_STAT(rsvd2), }; -/*lint -restore*/ - #define HINIC3_FUNC_STAT(_stat_item) { \ .name = #_stat_item, \ .size = FIELD_SIZEOF(struct hinic3_vport_stats, _stat_item), \ @@ -260,49 +284,22 @@ static struct hinic3_stats hinic3_port_stats[] = { HINIC3_PORT_STAT(mac_rx_unfilter_pkt_num), }; +#define HINIC3_RSFEC_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = FIELD_SIZEOF(struct mag_cmd_rsfec_stats, _stat_item), \ + .offset = offsetof(struct mag_cmd_rsfec_stats, _stat_item) \ +} + +static struct hinic3_stats g_hinic3_rsfec_stats[] = { + HINIC3_RSFEC_STAT(rx_err_lane_phy), +}; + #define HINIC3_FGPA_PORT_STAT(_stat_item) { \ .name = #_stat_item, \ .size = FIELD_SIZEOF(struct hinic3_phy_fpga_port_stats, _stat_item), \ .offset = offsetof(struct hinic3_phy_fpga_port_stats, _stat_item) \ } -static struct hinic3_stats g_hinic3_fpga_port_stats[] = { - HINIC3_FGPA_PORT_STAT(mac_rx_total_octs_port), - HINIC3_FGPA_PORT_STAT(mac_tx_total_octs_port), - HINIC3_FGPA_PORT_STAT(mac_rx_under_frame_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_rx_frag_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_rx_64_oct_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_rx_127_oct_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_rx_255_oct_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_rx_511_oct_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_rx_1023_oct_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_rx_max_oct_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_rx_over_oct_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_tx_64_oct_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_tx_127_oct_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_tx_255_oct_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_tx_511_oct_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_tx_1023_oct_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_tx_max_oct_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_tx_over_oct_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_rx_good_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_rx_crc_error_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_rx_broadcast_ok_port), - HINIC3_FGPA_PORT_STAT(mac_rx_multicast_ok_port), - HINIC3_FGPA_PORT_STAT(mac_rx_mac_frame_ok_port), - HINIC3_FGPA_PORT_STAT(mac_rx_length_err_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_rx_vlan_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_rx_pause_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_rx_unknown_mac_frame_port), - HINIC3_FGPA_PORT_STAT(mac_tx_good_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_tx_broadcast_ok_port), - HINIC3_FGPA_PORT_STAT(mac_tx_multicast_ok_port), - HINIC3_FGPA_PORT_STAT(mac_tx_underrun_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_tx_mac_frame_ok_port), - HINIC3_FGPA_PORT_STAT(mac_tx_vlan_pkts_port), - HINIC3_FGPA_PORT_STAT(mac_tx_pause_pkts_port), -}; - static char g_hinic_priv_flags_strings[][ETH_GSTRING_LEN] = { "Symmetric-RSS", "Force-Link-up", @@ -313,12 +310,12 @@ u32 hinic3_get_io_stats_size(const struct hinic3_nic_dev *nic_dev) { u32 count; - count = ARRAY_LEN(hinic3_nic_dev_stats) + + count = (u32)(ARRAY_LEN(hinic3_nic_dev_stats) + ARRAY_LEN(hinic3_nic_dev_stats_extern) + (ARRAY_LEN(hinic3_tx_queue_stats) + ARRAY_LEN(hinic3_tx_queue_stats_extern) + ARRAY_LEN(hinic3_rx_queue_stats) + - ARRAY_LEN(hinic3_rx_queue_stats_extern)) * nic_dev->max_qps; + ARRAY_LEN(hinic3_rx_queue_stats_extern)) * nic_dev->max_qps); return count; } @@ -342,46 +339,81 @@ u32 hinic3_get_io_stats_size(const struct hinic3_nic_dev *nic_dev) } \ } while (0) -#define QUEUE_STATS_PACK(items, item_idx, array, stats_ptr, qid) do { \ - int j; \ - for (j = 0; j < ARRAY_LEN(array); j++) { \ - memcpy((items)[item_idx].name, (array)[j].name, \ - HINIC3_SHOW_ITEM_LEN); \ - snprintf((items)[item_idx].name, HINIC3_SHOW_ITEM_LEN, \ - (array)[j].name, (qid)); \ - (items)[item_idx].hexadecimal = 0; \ - (items)[item_idx].value = \ - GET_VALUE_OF_PTR((array)[j].size, \ - (char *)(stats_ptr) + (array)[j].offset); \ - (item_idx)++; \ - } \ -} while (0) +int hinic3_rx_queue_stat_pack(struct hinic3_show_item *item, + struct hinic3_stats *stat, struct hinic3_rxq_stats *rxq_stats, u16 qid) +{ + int ret; -void hinic3_get_io_stats(const struct hinic3_nic_dev *nic_dev, void *stats) + ret = snprintf(item->name, HINIC3_SHOW_ITEM_LEN, stat->name, qid); + if (ret < 0) + return -EINVAL; + + item->hexadecimal = 0; + item->value = GET_VALUE_OF_PTR(stat->size, (char *)(rxq_stats) + stat->offset); + + return 0; +} + +int hinic3_tx_queue_stat_pack(struct hinic3_show_item *item, + struct hinic3_stats *stat, struct hinic3_txq_stats *txq_stats, u16 qid) +{ + int ret; + + ret = snprintf(item->name, HINIC3_SHOW_ITEM_LEN, stat->name, qid); + if (ret < 0) + return -EINVAL; + + item->hexadecimal = 0; + item->value = GET_VALUE_OF_PTR(stat->size, (char *)(txq_stats) + stat->offset); + + return 0; +} + +int hinic3_get_io_stats(const struct hinic3_nic_dev *nic_dev, void *stats) { struct hinic3_show_item *items = stats; int item_idx = 0; u16 qid; + int idx; + int ret; DEV_STATS_PACK(items, item_idx, hinic3_nic_dev_stats, &nic_dev->stats); - DEV_STATS_PACK(items, item_idx, hinic3_nic_dev_stats_extern, - &nic_dev->stats); + DEV_STATS_PACK(items, item_idx, hinic3_nic_dev_stats_extern, &nic_dev->stats); for (qid = 0; qid < nic_dev->max_qps; qid++) { - QUEUE_STATS_PACK(items, item_idx, hinic3_tx_queue_stats, - &nic_dev->txqs[qid].txq_stats, qid); - QUEUE_STATS_PACK(items, item_idx, hinic3_tx_queue_stats_extern, - &nic_dev->txqs[qid].txq_stats, qid); + for (idx = 0; idx < ARRAY_LEN(hinic3_tx_queue_stats); idx++) { + ret = hinic3_tx_queue_stat_pack(&items[item_idx++], &hinic3_tx_queue_stats[idx], + &nic_dev->txqs[qid].txq_stats, qid); + if (ret != 0) + return -EINVAL; + } + + for (idx = 0; idx < ARRAY_LEN(hinic3_tx_queue_stats_extern); idx++) { + ret = hinic3_tx_queue_stat_pack(&items[item_idx++], &hinic3_tx_queue_stats_extern[idx], + &nic_dev->txqs[qid].txq_stats, qid); + if (ret != 0) + return -EINVAL; + } } for (qid = 0; qid < nic_dev->max_qps; qid++) { - QUEUE_STATS_PACK(items, item_idx, hinic3_rx_queue_stats, - &nic_dev->rxqs[qid].rxq_stats, qid); - QUEUE_STATS_PACK(items, item_idx, hinic3_rx_queue_stats_extern, - &nic_dev->rxqs[qid].rxq_stats, qid); + for (idx = 0; idx < ARRAY_LEN(hinic3_rx_queue_stats); idx++) { + ret = hinic3_rx_queue_stat_pack(&items[item_idx++], &hinic3_rx_queue_stats[idx], + &nic_dev->rxqs[qid].rxq_stats, qid); + if (ret != 0) + return -EINVAL; + } + + for (idx = 0; idx < ARRAY_LEN(hinic3_rx_queue_stats_extern); idx++) { + ret = hinic3_rx_queue_stat_pack(&items[item_idx++], &hinic3_rx_queue_stats_extern[idx], + &nic_dev->rxqs[qid].rxq_stats, qid); + if (ret != 0) + return -EINVAL; + } } -} + return 0; +} static char g_hinic3_test_strings[][ETH_GSTRING_LEN] = { "Internal lb test (on/offline)", "External lb test (external_lb)", @@ -398,16 +430,15 @@ int hinic3_get_sset_count(struct net_device *netdev, int sset) case ETH_SS_STATS: q_num = nic_dev->q_params.num_qps; count = ARRAY_LEN(hinic3_netdev_stats) + - ARRAY_LEN(hinic3_nic_dev_stats) + + ARRAY_LEN(hinic3_nic_dev_stats) + + ARRAY_LEN(hinic3_netdev_link_count) + ARRAY_LEN(hinic3_function_stats) + (ARRAY_LEN(hinic3_tx_queue_stats) + ARRAY_LEN(hinic3_rx_queue_stats)) * q_num; if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev)) { - if (mag_support_mode == FPGA_PORT_COUNTER) - count += ARRAY_LEN(g_hinic3_fpga_port_stats); - else - count += ARRAY_LEN(hinic3_port_stats); + count += ARRAY_LEN(hinic3_port_stats); + count += ARRAY_LEN(g_hinic3_rsfec_stats); } return count; @@ -452,9 +483,9 @@ static void get_drv_queue_stats(struct hinic3_nic_dev *nic_dev, u64 *data) } } -static u16 get_fpga_port_stats(struct hinic3_nic_dev *nic_dev, u64 *data) +static u16 get_ethtool_port_stats(struct hinic3_nic_dev *nic_dev, u64 *data) { - struct hinic3_phy_fpga_port_stats *port_stats = NULL; + struct mag_cmd_port_stats *port_stats = NULL; char *p = NULL; u16 i = 0, j = 0; int err; @@ -464,19 +495,19 @@ static u16 get_fpga_port_stats(struct hinic3_nic_dev *nic_dev, u64 *data) nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to malloc port stats\n"); memset(&data[i], 0, - ARRAY_LEN(g_hinic3_fpga_port_stats) * sizeof(*data)); - i += ARRAY_LEN(g_hinic3_fpga_port_stats); + ARRAY_LEN(hinic3_port_stats) * sizeof(*data)); + i += ARRAY_LEN(hinic3_port_stats); return i; } - err = hinic3_get_fpga_phy_port_stats(nic_dev->hwdev, port_stats); + err = hinic3_get_phy_port_stats(nic_dev->hwdev, port_stats); if (err) nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get port stats from fw\n"); - for (j = 0; j < ARRAY_LEN(g_hinic3_fpga_port_stats); j++, i++) { - p = (char *)(port_stats) + g_hinic3_fpga_port_stats[j].offset; - data[i] = (g_hinic3_fpga_port_stats[j].size == + for (j = 0; j < ARRAY_LEN(hinic3_port_stats); j++, i++) { + p = (char *)(port_stats) + hinic3_port_stats[j].offset; + data[i] = (hinic3_port_stats[j].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } @@ -485,35 +516,32 @@ static u16 get_fpga_port_stats(struct hinic3_nic_dev *nic_dev, u64 *data) return i; } -static u16 get_ethtool_port_stats(struct hinic3_nic_dev *nic_dev, u64 *data) +static u16 get_ethtool_rsfec_stats(struct hinic3_nic_dev *nic_dev, u64 *data) { - struct mag_cmd_port_stats *port_stats = NULL; + struct mag_cmd_rsfec_stats *port_stats = NULL; char *p = NULL; u16 i = 0, j = 0; int err; - if (mag_support_mode == FPGA_PORT_COUNTER) - return get_fpga_port_stats(nic_dev, data); - port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); if (!port_stats) { nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to malloc port stats\n"); memset(&data[i], 0, - ARRAY_LEN(hinic3_port_stats) * sizeof(*data)); - i += ARRAY_LEN(hinic3_port_stats); + ARRAY_LEN(g_hinic3_rsfec_stats) * sizeof(*data)); + i += ARRAY_LEN(g_hinic3_rsfec_stats); return i; - } + } - err = hinic3_get_phy_port_stats(nic_dev->hwdev, port_stats); + err = hinic3_get_phy_rsfec_stats(nic_dev->hwdev, port_stats); if (err) nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to get port stats from fw\n"); + "Failed to get rsfec stats from fw\n"); - for (j = 0; j < ARRAY_LEN(hinic3_port_stats); j++, i++) { - p = (char *)(port_stats) + hinic3_port_stats[j].offset; - data[i] = (hinic3_port_stats[j].size == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + for (j = 0; j < ARRAY_LEN(g_hinic3_rsfec_stats); j++, i++) { + p = (char *)(port_stats) + g_hinic3_rsfec_stats[j].offset; + data[i] = (g_hinic3_rsfec_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } kfree(port_stats); @@ -537,6 +565,8 @@ void hinic3_get_ethtool_stats(struct net_device *netdev, u16 i = 0, j = 0; char *p = NULL; int err; + int link_down_events_phy_tmp = 0; + struct hinic3_netdev_link_count_str link_count = {0}; #ifdef HAVE_NDO_GET_STATS64 net_stats = dev_get_stats(netdev, &temp); @@ -554,6 +584,14 @@ void hinic3_get_ethtool_stats(struct net_device *netdev, data[i] = GET_VALUE_OF_PTR(hinic3_nic_dev_stats[j].size, p); } + err = hinic3_get_link_event_stats(nic_dev->hwdev, &link_down_events_phy_tmp); + + link_count.link_down_events_phy = (u64)link_down_events_phy_tmp; + for (j = 0; j < ARRAY_LEN(hinic3_netdev_link_count); j++, i++) { + p = (char *)(&link_count) + hinic3_netdev_link_count[j].offset; + data[i] = GET_VALUE_OF_PTR(hinic3_netdev_link_count[j].size, p); + } + err = hinic3_get_vport_stats(nic_dev->hwdev, hinic3_global_func_id(nic_dev->hwdev), &vport_stats); if (err) @@ -565,8 +603,10 @@ void hinic3_get_ethtool_stats(struct net_device *netdev, data[i] = GET_VALUE_OF_PTR(hinic3_function_stats[j].size, p); } - if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev)) + if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev)) { i += get_ethtool_port_stats(nic_dev, data + i); + i += get_ethtool_rsfec_stats(nic_dev, data + i); + } get_drv_queue_stats(nic_dev, data + i); } @@ -588,6 +628,12 @@ static u16 get_drv_dev_strings(struct hinic3_nic_dev *nic_dev, char *p) cnt++; } + for (i = 0; i < ARRAY_LEN(hinic3_netdev_link_count); i++) { + memcpy(p, hinic3_netdev_link_count[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + cnt++; + } + return cnt; } @@ -603,18 +649,16 @@ static u16 get_hw_stats_strings(struct hinic3_nic_dev *nic_dev, char *p) } if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev)) { - if (mag_support_mode == FPGA_PORT_COUNTER) { - for (i = 0; i < ARRAY_LEN(g_hinic3_fpga_port_stats); i++) { - memcpy(p, g_hinic3_fpga_port_stats[i].name, ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - cnt++; - } - } else { - for (i = 0; i < ARRAY_LEN(hinic3_port_stats); i++) { - memcpy(p, hinic3_port_stats[i].name, ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - cnt++; - } + for (i = 0; i < ARRAY_LEN(hinic3_port_stats); i++) { + memcpy(p, hinic3_port_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + cnt++; + } + for (i = 0; i < ARRAY_LEN(g_hinic3_rsfec_stats); i++) { + memcpy(p, g_hinic3_rsfec_stats[i].name, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + cnt++; } } @@ -758,7 +802,6 @@ struct hw2ethtool_link_mode { u32 speed; }; -/*lint -save -e26 */ static const struct hw2ethtool_link_mode hw2ethtool_link_mode_table[LINK_MODE_MAX_NUMBERS] = { [LINK_MODE_GE] = { @@ -818,8 +861,6 @@ static const struct hw2ethtool_link_mode }, }; -/*lint -restore */ - #define GET_SUPPORTED_MODE 0 #define GET_ADVERTISED_MODE 1 @@ -838,29 +879,33 @@ struct cmd_link_settings { #define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \ set_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ecmd)->advertising) -#define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \ -do { \ - u32 i; \ - for (i = 0; i < hw2ethtool_link_mode_table[mode].arr_size; i++) { \ - if (hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i] >= \ - __ETHTOOL_LINK_MODE_MASK_NBITS) \ - continue; \ - set_bit(hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i], \ - (ecmd)->supported); \ - } \ -} while (0) +static void ethtool_add_supported_speed_link_mode(struct cmd_link_settings *link_settings, + u32 mode) +{ + u32 i; -#define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \ -do { \ - u32 i; \ - for (i = 0; i < hw2ethtool_link_mode_table[mode].arr_size; i++) { \ - if (hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i] >= \ - __ETHTOOL_LINK_MODE_MASK_NBITS) \ - continue; \ - set_bit(hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i], \ - (ecmd)->advertising); \ - } \ -} while (0) + for (i = 0; i < hw2ethtool_link_mode_table[mode].arr_size; i++) { + if (hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i] >= + __ETHTOOL_LINK_MODE_MASK_NBITS) + continue; + set_bit(hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i], + link_settings->supported); + } +} + +static void ethtool_add_advertised_speed_link_mode(struct cmd_link_settings *link_settings, + u32 mode) +{ + u32 i; + + for (i = 0; i < hw2ethtool_link_mode_table[mode].arr_size; i++) { + if (hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i] >= + __ETHTOOL_LINK_MODE_MASK_NBITS) + continue; + set_bit(hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i], + link_settings->advertising); + } +} /* Related to enum mag_cmd_port_speed */ static u32 hw_to_ethtool_speed[] = { @@ -888,11 +933,11 @@ static void hinic3_add_ethtool_link_mode(struct cmd_link_settings *link_settings for (link_mode = 0; link_mode < LINK_MODE_MAX_NUMBERS; link_mode++) { if (hw_link_mode & BIT(link_mode)) { if (name == GET_SUPPORTED_MODE) - ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE - (link_settings, link_mode); + ethtool_add_supported_speed_link_mode( + link_settings, link_mode); else - ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE - (link_settings, link_mode); + ethtool_add_advertised_speed_link_mode( + link_settings, link_mode); } } } @@ -915,10 +960,15 @@ static int hinic3_link_speed_set(struct hinic3_nic_dev *nic_dev, err = hinic3_get_link_state(nic_dev->hwdev, &link_state); if (!err && link_state) { - link_settings->speed = - port_info->speed < ARRAY_LEN(hw_to_ethtool_speed) ? - hw_to_ethtool_speed[port_info->speed] : - (u32)SPEED_UNKNOWN; + if (hinic3_get_bond_create_mode(nic_dev->hwdev)) { + link_settings->speed = port_info->bond_speed; + } else { + link_settings->speed = + port_info->speed < + ARRAY_LEN(hw_to_ethtool_speed) ? + hw_to_ethtool_speed[port_info->speed] : + (u32)SPEED_UNKNOWN; + } link_settings->duplex = port_info->duplex; } else { @@ -993,6 +1043,39 @@ static int get_link_pause_settings(struct hinic3_nic_dev *nic_dev, return 0; } +static bool is_bit_offset_defined(u8 bit_offset) +{ + if (bit_offset < __ETHTOOL_LINK_MODE_MASK_NBITS) + return true; + return false; +} + +static void +ethtool_add_supported_advertised_fec(struct cmd_link_settings *link_settings, + u32 fec, u8 cmd) +{ + u8 i; + for (i = 0; i < HINIC_ETHTOOL_FEC_INFO_LEN; i++) { + if ((fec & BIT(hinic3_ethtool_fec_info[i].hinic_fec_offset)) == 0) + continue; + if ((is_bit_offset_defined(hinic3_ethtool_fec_info[i].ethtool_bit_offset) == true) && + (cmd == HINIC_ADVERTISED_FEC_CMD)) { + set_bit(hinic3_ethtool_fec_info[i].ethtool_bit_offset, link_settings->advertising); + return; /* There can be only one advertised fec mode. */ + } + if ((is_bit_offset_defined(hinic3_ethtool_fec_info[i].ethtool_bit_offset) == true) && + (cmd == HINIC_SUPPORTED_FEC_CMD)) + set_bit(hinic3_ethtool_fec_info[i].ethtool_bit_offset, link_settings->supported); + } +} + +static void hinic3_link_fec_type(struct cmd_link_settings *link_settings, + u32 fec, u32 supported_fec) +{ + ethtool_add_supported_advertised_fec(link_settings, supported_fec, HINIC_SUPPORTED_FEC_CMD); + ethtool_add_supported_advertised_fec(link_settings, fec, HINIC_ADVERTISED_FEC_CMD); +} + static int get_link_settings(struct net_device *netdev, struct cmd_link_settings *link_settings) { @@ -1013,6 +1096,9 @@ static int get_link_settings(struct net_device *netdev, hinic3_link_port_type(link_settings, port_info.port_type); + hinic3_link_fec_type(link_settings, BIT(port_info.fec), + port_info.supported_fec_mode); + link_settings->autoneg = port_info.autoneg_state == PORT_CFG_AN_ON ? AUTONEG_ENABLE : AUTONEG_DISABLE; if (port_info.autoneg_cap) @@ -1138,10 +1224,11 @@ static int hinic3_set_settings_to_hw(struct hinic3_nic_dev *nic_dev, struct net_device *netdev = nic_dev->netdev; struct hinic3_link_ksettings settings = {0}; int speed_level = 0; - char set_link_str[128] = {0}; + char set_link_str[HINIC_SET_LINK_STR_LEN] = {0}; + char link_info[HINIC_SET_LINK_STR_LEN] = {0}; int err = 0; - err = snprintf(set_link_str, sizeof(set_link_str) - 1, "%s", + err = snprintf(link_info, sizeof(link_info), "%s", (bool)(set_settings & HILINK_LINK_SET_AUTONEG) ? ((bool)autoneg ? "autong enable " : "autong disable ") : ""); if (err < 0) @@ -1149,8 +1236,8 @@ static int hinic3_set_settings_to_hw(struct hinic3_nic_dev *nic_dev, if (set_settings & HILINK_LINK_SET_SPEED) { speed_level = hinic3_ethtool_to_hw_speed_level(speed); - err = snprintf(set_link_str, sizeof(set_link_str) - 1, - "%sspeed %u ", set_link_str, speed); + err = snprintf(set_link_str, sizeof(set_link_str), + "%sspeed %u ", link_info, speed); if (err < 0) return -EINVAL; } diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_filter.c b/drivers/net/ethernet/huawei/hinic3/hinic3_filter.c index 70346d6393de5f5bd1f4963a4886f9da22149c46..2daa7f9f578abe6066ccb985d1ff3e20590ba27c 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_filter.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_filter.c @@ -82,7 +82,7 @@ static struct hinic3_mac_filter *hinic3_add_filter(struct hinic3_nic_dev *nic_de struct list_head *mac_filter_list, u8 *addr) { - struct hinic3_mac_filter *f; + struct hinic3_mac_filter *f = NULL; f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) @@ -117,7 +117,7 @@ static void hinic3_del_filter(struct hinic3_nic_dev *nic_dev, static struct hinic3_mac_filter *hinic3_mac_filter_entry_clone(const struct hinic3_mac_filter *src) { - struct hinic3_mac_filter *f; + struct hinic3_mac_filter *f = NULL; f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw.h index ef8c62b3648f24fbc9f43eeeafda6442f2d5be7d..7fed1c16d403bc7e873dc4cac2c35c2b3420f4bb 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw.h @@ -40,6 +40,7 @@ enum hinic3_channel_id { HINIC3_CHANNEL_DSW, HINIC3_CHANNEL_MIG, HINIC3_CHANNEL_CRYPT, + HINIC3_CHANNEL_VROCE, HINIC3_CHANNEL_MAX = 32, }; @@ -397,6 +398,16 @@ void hinic3_free_cmd_buf(void *hwdev, struct hinic3_cmd_buf *cmd_buf); **/ int hinic3_sm_ctr_rd16(void *hwdev, u8 node, u8 instance, u32 ctr_id, u16 *value); +/** + * hinic3_sm_ctr_rd16_clear - small single 16 counter read clear + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + **/ +int hinic3_sm_ctr_rd16_clear(void *hwdev, u8 node, u8 instance, u32 ctr_id, u16 *value); + /** * @brief hinic3_sm_ctr_rd32 - small single 32 counter read * @param hwdev: device pointer to hwdev @@ -810,6 +821,13 @@ int hinic3_dbg_lt_wr_16byte_mask(void *hwdev, u8 dest, u8 instance, */ void hinic3_link_event_stats(void *dev, u8 link); +/** + * @brief hinic3_get_link_event_stats - link event stats + * @param hwdev: device pointer to hwdev + * @param link: link status + */ +int hinic3_get_link_event_stats(void *dev, int *link_state); + /** * @brief hinic3_get_hw_pf_infos - get pf infos * @param hwdev: device pointer to hwdev diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c index 3c835ff95e89ba1eba56e557a65e5c01b7bb0d5a..7a2644c9ea356ea1e351cb8901e479e16115a4d8 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c @@ -12,6 +12,7 @@ #include #include +#include "ossl_knl.h" #include "hinic3_hw.h" #include "hinic3_crm.h" #include "hinic3_nic_io.h" @@ -47,12 +48,16 @@ static void qp_add_napi(struct hinic3_irq *irq_cfg) netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll, nic_dev->poll_weight); napi_enable(&irq_cfg->napi); + irq_cfg->napi_reign = NAPI_IS_REGIN; } -static void qp_del_napi(struct hinic3_irq *irq_cfg) +void qp_del_napi(struct hinic3_irq *irq_cfg) { - napi_disable(&irq_cfg->napi); - netif_napi_del(&irq_cfg->napi); + if (irq_cfg->napi_reign == NAPI_IS_REGIN) { + napi_disable(&irq_cfg->napi); + netif_napi_del(&irq_cfg->napi); + irq_cfg->napi_reign = NAPI_NOT_REGIN; + } } static irqreturn_t qp_irq(int irq, void *data) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h deleted file mode 100644 index 2482c5fac5fcea0b7056bdf543aec7de0a1c1f22..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h +++ /dev/null @@ -1,205 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ - -#ifndef HINIC3_LLD_H -#define HINIC3_LLD_H - -#include "hinic3_crm.h" - -struct hinic3_lld_dev { - struct pci_dev *pdev; - void *hwdev; -}; - -struct hinic3_uld_info { - /* When the function does not need to initialize the corresponding uld, - * @probe needs to return 0 and uld_dev is set to NULL; - * if uld_dev is NULL, @remove will not be called when uninstalling - */ - int (*probe)(struct hinic3_lld_dev *lld_dev, void **uld_dev, char *uld_dev_name); - void (*remove)(struct hinic3_lld_dev *lld_dev, void *uld_dev); - int (*suspend)(struct hinic3_lld_dev *lld_dev, void *uld_dev, pm_message_t state); - int (*resume)(struct hinic3_lld_dev *lld_dev, void *uld_dev); - void (*event)(struct hinic3_lld_dev *lld_dev, void *uld_dev, - struct hinic3_event_info *event); - int (*ioctl)(void *uld_dev, u32 cmd, const void *buf_in, u32 in_size, - void *buf_out, u32 *out_size); -}; - -/* hinic3_register_uld - register an upper-layer driver - * @type: uld service type - * @uld_info: uld callback - * - * Registers an upper-layer driver. - * Traverse existing devices and call @probe to initialize the uld device. - */ -int hinic3_register_uld(enum hinic3_service_type type, struct hinic3_uld_info *uld_info); - -/** - * hinic3_unregister_uld - unregister an upper-layer driver - * @type: uld service type - * - * Traverse existing devices and call @remove to uninstall the uld device. - * Unregisters an existing upper-layer driver. - */ -void hinic3_unregister_uld(enum hinic3_service_type type); - -void lld_hold(void); -void lld_put(void); - -/** - * @brief hinic3_get_lld_dev_by_chip_name - get lld device by chip name - * @param chip_name: chip name - * - * The value of lld_dev reference increases when lld_dev is obtained. The caller needs - * to release the reference by calling lld_dev_put. - **/ -struct hinic3_lld_dev *hinic3_get_lld_dev_by_chip_name(const char *chip_name); - -/** - * @brief lld_dev_hold - get reference to lld_dev - * @param dev: lld device - * - * Hold reference to device to keep it from being freed - **/ -void lld_dev_hold(struct hinic3_lld_dev *dev); - -/** - * @brief lld_dev_put - release reference to lld_dev - * @param dev: lld device - * - * Release reference to device to allow it to be freed - **/ -void lld_dev_put(struct hinic3_lld_dev *dev); - -/** - * @brief hinic3_get_lld_dev_by_dev_name - get lld device by uld device name - * @param dev_name: uld device name - * @param type: uld service type, When the type is SERVICE_T_MAX, try to match - * all ULD names to get uld_dev - * - * The value of lld_dev reference increases when lld_dev is obtained. The caller needs - * to release the reference by calling lld_dev_put. - **/ -struct hinic3_lld_dev *hinic3_get_lld_dev_by_dev_name(const char *dev_name, - enum hinic3_service_type type); - -/** - * @brief hinic3_get_lld_dev_by_dev_name_unsafe - get lld device by uld device name - * @param dev_name: uld device name - * @param type: uld service type, When the type is SERVICE_T_MAX, try to match - * all ULD names to get uld_dev - * - * hinic3_get_lld_dev_by_dev_name_unsafe() is completely analogous to - * hinic3_get_lld_dev_by_dev_name(), The only difference is that the reference - * of lld_dev is not increased when lld_dev is obtained. - * - * The caller must ensure that lld_dev will not be freed during the remove process - * when using lld_dev. - **/ -struct hinic3_lld_dev *hinic3_get_lld_dev_by_dev_name_unsafe(const char *dev_name, - enum hinic3_service_type type); - -/** - * @brief hinic3_get_lld_dev_by_chip_and_port - get lld device by chip name and port id - * @param chip_name: chip name - * @param port_id: port id - **/ -struct hinic3_lld_dev *hinic3_get_lld_dev_by_chip_and_port(const char *chip_name, u8 port_id); - -/** - * @brief hinic3_get_ppf_lld_dev - get ppf lld device by current function's lld device - * @param lld_dev: current function's lld device - * - * The value of lld_dev reference increases when lld_dev is obtained. The caller needs - * to release the reference by calling lld_dev_put. - **/ -struct hinic3_lld_dev *hinic3_get_ppf_lld_dev(struct hinic3_lld_dev *lld_dev); - -/** - * @brief hinic3_get_ppf_lld_dev_unsafe - get ppf lld device by current function's lld device - * @param lld_dev: current function's lld device - * - * hinic3_get_ppf_lld_dev_unsafe() is completely analogous to hinic3_get_ppf_lld_dev(), - * The only difference is that the reference of lld_dev is not increased when lld_dev is obtained. - * - * The caller must ensure that ppf's lld_dev will not be freed during the remove process - * when using ppf lld_dev. - **/ -struct hinic3_lld_dev *hinic3_get_ppf_lld_dev_unsafe(struct hinic3_lld_dev *lld_dev); - -/** - * @brief uld_dev_hold - get reference to uld_dev - * @param lld_dev: lld device - * @param type: uld service type - * - * Hold reference to uld device to keep it from being freed - **/ -void uld_dev_hold(struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type); - -/** - * @brief uld_dev_put - release reference to lld_dev - * @param dev: lld device - * @param type: uld service type - * - * Release reference to uld device to allow it to be freed - **/ -void uld_dev_put(struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type); - -/** - * @brief hinic3_get_uld_dev - get uld device by lld device - * @param lld_dev: lld device - * @param type: uld service type - * - * The value of uld_dev reference increases when uld_dev is obtained. The caller needs - * to release the reference by calling uld_dev_put. - **/ -void *hinic3_get_uld_dev(struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type); - -/** - * @brief hinic3_get_uld_dev_unsafe - get uld device by lld device - * @param lld_dev: lld device - * @param type: uld service type - * - * hinic3_get_uld_dev_unsafe() is completely analogous to hinic3_get_uld_dev(), - * The only difference is that the reference of uld_dev is not increased when uld_dev is obtained. - * - * The caller must ensure that uld_dev will not be freed during the remove process - * when using uld_dev. - **/ -void *hinic3_get_uld_dev_unsafe(struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type); - -/** - * @brief hinic3_get_chip_name - get chip name by lld device - * @param lld_dev: lld device - * @param chip_name: String for storing the chip name - * @param max_len: Maximum number of characters to be copied for chip_name - **/ -int hinic3_get_chip_name(struct hinic3_lld_dev *lld_dev, char *chip_name, u16 max_len); - -struct card_node *hinic3_get_chip_node_by_lld(struct hinic3_lld_dev *lld_dev); - -struct hinic3_hwdev *hinic3_get_sdk_hwdev_by_lld(struct hinic3_lld_dev *lld_dev); - -bool hinic3_get_vf_service_load(struct pci_dev *pdev, u16 service); - -int hinic3_set_vf_service_load(struct pci_dev *pdev, u16 service, - bool vf_srv_load); - -int hinic3_set_vf_service_state(struct pci_dev *pdev, u16 vf_func_id, - u16 service, bool en); - -bool hinic3_get_vf_load_state(struct pci_dev *pdev); - -int hinic3_set_vf_load_state(struct pci_dev *pdev, bool vf_load_state); - -int hinic3_attach_nic(struct hinic3_lld_dev *lld_dev); - -void hinic3_detach_nic(const struct hinic3_lld_dev *lld_dev); - -int hinic3_attach_service(const struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type); -void hinic3_detach_service(const struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type); -const char **hinic3_get_uld_names(void); -int hinic3_lld_init(void); -void hinic3_lld_exit(void); -#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mag_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mag_cfg.c index 2fe808b8356dff486fef9d630619fb814c62a162..8cd891e26a132534859a15cb2ce06a616b4b5bd4 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_mag_cfg.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mag_cfg.c @@ -17,12 +17,22 @@ #include "ossl_knl.h" #include "hinic3_crm.h" #include "hinic3_hw.h" +#include "mag_mpu_cmd.h" +#include "mag_mpu_cmd_defs.h" #include "hinic3_nic_io.h" #include "hinic3_nic_cfg.h" #include "hinic3_srv_nic.h" #include "hinic3_nic.h" #include "hinic3_common.h" +#define BIFUR_RESOURCE_PF_SSID 0x5a1 +#define CAP_INFO_MAX_LEN 512 +#define DEVICE_VENDOR_MAX_LEN 17 +#define READ_RSFEC_REGISTER_DELAY_TIME_MS 500 + +struct parse_tlv_info g_page_info = {0}; +struct drv_mag_cmd_get_xsfp_tlv_rsp g_xsfp_tlv_info = {0}; + static int mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); static int mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, @@ -78,8 +88,10 @@ int hinic3_get_phy_port_stats(void *hwdev, struct mag_cmd_port_stats *stats) return -ENOMEM; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_io) - return -EINVAL; + if (!nic_io) { + err = -EINVAL; + goto out; + } memset(&stats_info, 0, sizeof(stats_info)); stats_info.port_id = hinic3_physical_port_id(hwdev); @@ -104,6 +116,64 @@ int hinic3_get_phy_port_stats(void *hwdev, struct mag_cmd_port_stats *stats) } EXPORT_SYMBOL(hinic3_get_phy_port_stats); +int hinic3_get_phy_rsfec_stats(void *hwdev, struct mag_cmd_rsfec_stats *stats) +{ + struct mag_cmd_get_mag_cnt *port_stats = NULL; + struct mag_cmd_get_mag_cnt stats_info; + u16 out_size = sizeof(*port_stats); + struct hinic3_nic_io *nic_io = NULL; + int err; + + if (!hwdev || !stats) + return -EINVAL; + + port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); + if (!port_stats) + return -ENOMEM; + + nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + err = -EINVAL; + goto out; + } + + memset(&stats_info, 0, sizeof(stats_info)); + stats_info.port_id = hinic3_physical_port_id(hwdev); + + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_MAG_CNT, + &stats_info, sizeof(stats_info), + port_stats, &out_size); + if (err || !out_size || port_stats->head.status) { + nic_err(nic_io->dev_hdl, + "Failed to get rsfec statistics, err: %d, status: 0x%x, out size: 0x%x\n", + err, port_stats->head.status, out_size); + err = -EIO; + goto out; + } + /* 读2遍, 清除误码残留 */ + msleep(READ_RSFEC_REGISTER_DELAY_TIME_MS); + + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_MAG_CNT, &stats_info, + sizeof(stats_info), + port_stats, &out_size); + if (err || !out_size || port_stats->head.status) { + nic_err(nic_io->dev_hdl, + "Failed to get rsfec statistics, err: %d, status: 0x%x, out size: 0x%x\n", + err, port_stats->head.status, out_size); + err = -EIO; + goto out; + } + + memcpy(stats, &port_stats->mag_csr[MAG_RX_RSFEC_ERR_CW_CNT], + sizeof(u32)); + +out: + kfree(port_stats); + + return err; +} +EXPORT_SYMBOL(hinic3_get_phy_rsfec_stats); + int hinic3_set_port_funcs_state(void *hwdev, bool enable) { return 0; @@ -180,6 +250,8 @@ int hinic3_get_loopback_mode(void *hwdev, u8 *mode, u8 *enable) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; return hinic3_cfg_loopback_mode(nic_io, MGMT_MSG_CMD_OP_GET, mode, enable); @@ -195,6 +267,8 @@ int hinic3_set_loopback_mode(void *hwdev, u8 mode, u8 enable) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; if (mode < LOOP_MODE_MIN || mode > LOOP_MODE_MAX) { nic_err(nic_io->dev_hdl, "Invalid loopback mode %u to set\n", @@ -218,6 +292,9 @@ int hinic3_set_led_status(void *hwdev, enum mag_led_type type, return -EFAULT; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + memset(&led_info, 0, sizeof(led_info)); led_info.function_id = hinic3_global_func_id(hwdev); @@ -249,6 +326,8 @@ int hinic3_get_port_info(void *hwdev, struct nic_port_info *port_info, memset(&port_msg, 0, sizeof(port_msg)); nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; port_msg.port_id = hinic3_physical_port_id(hwdev); @@ -268,9 +347,12 @@ int hinic3_get_port_info(void *hwdev, struct nic_port_info *port_info, port_info->port_type = port_msg.wire_type; port_info->speed = port_msg.speed; port_info->fec = port_msg.fec; + port_info->lanes = port_msg.lanes; port_info->supported_mode = port_msg.supported_mode; port_info->advertised_mode = port_msg.advertised_mode; - + port_info->supported_fec_mode = port_msg.supported_fec_mode; + /* switch Gbps to Mbps */ + port_info->bond_speed = (u32)port_msg.bond_speed * RATE_MBPS_TO_GBPS; return 0; } @@ -306,6 +388,8 @@ int hinic3_set_link_settings(void *hwdev, memset(&info, 0, sizeof(info)); nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; info.port_id = hinic3_physical_port_id(hwdev); info.config_bitmap = settings->valid_bitmap; @@ -335,6 +419,8 @@ int hinic3_get_link_state(void *hwdev, u8 *link_state) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; memset(&get_link, 0, sizeof(get_link)); get_link.port_id = hinic3_physical_port_id(hwdev); @@ -364,9 +450,10 @@ void hinic3_notify_vf_link_status(struct hinic3_nic_io *nic_io, if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) { link.status = link_status; link.port_id = hinic3_physical_port_id(nic_io->hwdev); - err = hinic3_mbox_to_vf(nic_io->hwdev, vf_id, HINIC3_MOD_HILINK, + err = hinic3_mbox_to_vf_no_ack(nic_io->hwdev, vf_id, + HINIC3_MOD_HILINK, MAG_CMD_GET_LINK_STATUS, &link, - sizeof(link), &link, &out_size, 0, + sizeof(link), &link, &out_size, HINIC3_CHANNEL_NIC); if (err == MBOX_ERRCODE_UNKNOWN_DES_FUNC) { nic_warn(nic_io->dev_hdl, "VF%d not initialized, disconnect it\n", @@ -387,6 +474,9 @@ void hinic3_notify_all_vfs_link_changed(void *hwdev, u8 link_status) u16 i; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return; + nic_io->link_status = link_status; for (i = 1; i <= nic_io->max_vfs; i++) { if (!nic_io->vf_infos[HW_VF_ID_TO_OS(i)].link_forced) @@ -394,6 +484,167 @@ void hinic3_notify_all_vfs_link_changed(void *hwdev, u8 link_status) } } +static char *g_hw_to_char_fec[HILINK_FEC_MAX_TYPE] = { + "not set", "rsfec", "basefec", + "nofec", "llrsfec"}; +static char *g_hw_to_speed_info[PORT_SPEED_UNKNOWN] = { + "not set", "10MB", "100MB", "1GB", "10GB", + "25GB", "40GB", "50GB", "100GB", "200GB"}; +static char *g_hw_to_an_state_info[PORT_CFG_AN_OFF + 1] = { + "not set", "on", "off"}; + +struct port_type_table { + u32 port_type; + char *port_type_name; +}; + +static const struct port_type_table port_optical_type_table_s[] = { + {LINK_PORT_UNKNOWN, "UNKNOWN"}, + {LINK_PORT_OPTICAL_MM, "optical_sr"}, + {LINK_PORT_OPTICAL_SM, "optical_lr"}, + {LINK_PORT_PAS_COPPER, "copper"}, + {LINK_PORT_ACC, "ACC"}, + {LINK_PORT_BASET, "baset"}, + {LINK_PORT_AOC, "AOC"}, + {LINK_PORT_ELECTRIC, "electric"}, + {LINK_PORT_BACKBOARD_INTERFACE, "interface"}, +}; + +static char *get_port_type_name(u32 type) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(port_optical_type_table_s); i++) { + if (type == port_optical_type_table_s[i].port_type) + return port_optical_type_table_s[i].port_type_name; + } + return "UNKNOWN TYPE"; +} + +static void get_port_type(struct hinic3_nic_io *nic_io, + struct mag_cmd_event_port_info *info, + char **port_type) +{ + if (info->port_type <= LINK_PORT_BACKBOARD_INTERFACE) + *port_type = get_port_type_name(info->port_type); + else + sdk_info(nic_io->dev_hdl, "Unknown port type: %u\n", + info->port_type); +} + +static int get_port_temperature_power(struct mag_cmd_event_port_info *info, + char *str) +{ + char cap_info[CAP_INFO_MAX_LEN]; + + memset(cap_info, 0, sizeof(cap_info)); + snprintf(cap_info, CAP_INFO_MAX_LEN, "%s, %s, Temperature: %u", str, + info->sfp_type ? "QSFP" : "SFP", info->cable_temp); + + if (info->sfp_type) + snprintf(str, CAP_INFO_MAX_LEN, "%s, rx power: %uuw %uuW %uuW %uuW", + cap_info, info->power[0x0], info->power[0x1], + info->power[0x2], info->power[0x3]); + else + snprintf(str, CAP_INFO_MAX_LEN, "%s, rx power: %uuW, tx power: %uuW", + cap_info, info->power[0x0], info->power[0x1]); + + return 0; +} + +static void print_cable_info(struct hinic3_nic_io *nic_io, + struct mag_cmd_event_port_info *info) +{ + char tmp_str[CAP_INFO_MAX_LEN] = {0}; + char tmp_vendor[DEVICE_VENDOR_MAX_LEN] = {0}; + char *port_type = "Unknown port type"; + int i; + int err = 0; + + if (info->gpio_insert) { + sdk_info(nic_io->dev_hdl, "Cable unpresent\n"); + return; + } + + get_port_type(nic_io, info, &port_type); + + for (i = sizeof(info->vendor_name) - 1; i >= 0; i--) { + if (info->vendor_name[i] == ' ') + info->vendor_name[i] = '\0'; + else + break; + } + + memcpy(tmp_vendor, info->vendor_name, sizeof(info->vendor_name)); + snprintf(tmp_str, CAP_INFO_MAX_LEN, "Vendor: %s, %s, length: %um, max_speed: %uGbps", + tmp_vendor, port_type, info->cable_length, info->max_speed); + + if (info->port_type == LINK_PORT_OPTICAL_MM || + info->port_type == LINK_PORT_AOC) { + err = get_port_temperature_power(info, tmp_str); + if (err) + return; + } + + sdk_info(nic_io->dev_hdl, "Cable information: %s\n", tmp_str); +} + +static void print_link_info(struct hinic3_nic_io *nic_io, + struct mag_cmd_event_port_info *info, + enum hinic3_nic_event_type type) +{ + char *fec = "None"; + char *speed = "None"; + char *an_state = "None"; + + if (info->fec < HILINK_FEC_MAX_TYPE) + fec = g_hw_to_char_fec[info->fec]; + else + sdk_info(nic_io->dev_hdl, "Unknown fec type: %u\n", info->fec); + + if (info->an_state > PORT_CFG_AN_OFF) { + sdk_info(nic_io->dev_hdl, "an_state %u is invalid", + info->an_state); + return; + } + + an_state = g_hw_to_an_state_info[info->an_state]; + + if (info->speed >= PORT_SPEED_UNKNOWN) { + sdk_info(nic_io->dev_hdl, "speed %u is invalid", info->speed); + return; + } + + speed = g_hw_to_speed_info[info->speed]; + sdk_info(nic_io->dev_hdl, "Link information: speed %s, %s, autoneg %s", + speed, fec, an_state); +} + +void print_port_info(struct hinic3_nic_io *nic_io, + struct mag_cmd_event_port_info *port_info, + enum hinic3_nic_event_type type) +{ + print_cable_info(nic_io, port_info); + + print_link_info(nic_io, port_info, type); + + if (type == EVENT_NIC_LINK_UP) + return; + + sdk_info(nic_io->dev_hdl, "PMA ctrl: %s, tx %s, rx %s, PMA fifo reg: 0x%x, PMA signal ok reg: 0x%x, RF/LF status reg: 0x%x\n", + port_info->pma_ctrl == 1 ? "off" : "on", + port_info->tx_enable ? "enable" : "disable", + port_info->rx_enable ? "enable" : "disable", port_info->pma_fifo_reg, + port_info->pma_signal_ok_reg, port_info->rf_lf); + sdk_info(nic_io->dev_hdl, "alos: %u, rx_los: %u, PCS 64 66b reg: 0x%x, PCS link: 0x%x, MAC link: 0x%x PCS_err_cnt: 0x%x\n", + port_info->alos, port_info->rx_los, port_info->pcs_64_66b_reg, + port_info->pcs_link, port_info->pcs_mac_link, + port_info->pcs_err_cnt); + sdk_info(nic_io->dev_hdl, "his_link_machine_state = 0x%08x, cur_link_machine_state = 0x%08x\n", + port_info->his_link_machine_state, + port_info->cur_link_machine_state); +} + static int hinic3_get_vf_link_status_msg_handler(struct hinic3_nic_io *nic_io, u16 vf_id, void *buf_in, u16 in_size, void *buf_out, @@ -433,6 +684,10 @@ static void get_port_info(void *hwdev, int err; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("nic_io is NULL\n"); + return; + } if (hinic3_func_type(hwdev) != TYPE_VF && link_status->status) { err = hinic3_get_port_info(hwdev, &port_info, HINIC3_CHANNEL_NIC); if (err) { @@ -457,8 +712,17 @@ static void link_status_event_handler(void *hwdev, void *buf_in, struct hinic3_event_info event_info = {0}; struct hinic3_event_link_info *link_info = (void *)event_info.event_data; struct hinic3_nic_io *nic_io = NULL; + struct pci_dev *pdev = NULL; + + /* Ignore link change event */ + if (hinic3_is_bm_slave_host(hwdev)) + return; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("nic_io is NULL\n"); + return; + } link_status = buf_in; sdk_info(nic_io->dev_hdl, "Link status report received, func_id: %u, status: %u\n", @@ -475,6 +739,13 @@ static void link_status_event_handler(void *hwdev, void *buf_in, hinic3_event_callback(hwdev, &event_info); + if (nic_io->pcidev_hdl != NULL) { + pdev = nic_io->pcidev_hdl; + if (pdev->subsystem_device == BIFUR_RESOURCE_PF_SSID) { + return; + } + } + if (hinic3_func_type(hwdev) != TYPE_VF) { hinic3_notify_all_vfs_link_changed(hwdev, link_status->status); ret_link_status = buf_out; @@ -483,20 +754,142 @@ static void link_status_event_handler(void *hwdev, void *buf_in, } } +static void port_info_event_printf(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct mag_cmd_event_port_info *port_info = buf_in; + struct hinic3_nic_io *nic_io = NULL; + struct hinic3_event_info event_info; + enum hinic3_nic_event_type type; + + if (!hwdev) { + pr_err("hwdev is NULL\n"); + return; + } + + nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("nic_io is NULL\n"); + return; + } + + if (in_size != sizeof(*port_info)) { + sdk_info(nic_io->dev_hdl, "Invalid port info message size %u, should be %lu\n", + in_size, sizeof(*port_info)); + return; + } + + ((struct mag_cmd_event_port_info *)buf_out)->head.status = 0; + + type = port_info->event_type; + if (type < EVENT_NIC_LINK_DOWN || type > EVENT_NIC_LINK_UP) { + sdk_info(nic_io->dev_hdl, "Invalid hilink info report, type: %d\n", + type); + return; + } + + print_port_info(nic_io, port_info, type); + + memset(&event_info, 0, sizeof(event_info)); + event_info.service = EVENT_SRV_NIC; + event_info.type = type; + + *out_size = sizeof(*port_info); + + hinic3_event_callback(hwdev, &event_info); +} + +void hinic3_notify_vf_bond_status(struct hinic3_nic_io *nic_io, + u16 vf_id, u8 bond_status) +{ + struct mag_cmd_get_bond_status bond; + struct vf_data_storage *vf_infos = nic_io->vf_infos; + u16 out_size = sizeof(bond); + int err; + + memset(&bond, 0, sizeof(bond)); + if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) { + bond.status = bond_status; + err = hinic3_mbox_to_vf_no_ack(nic_io->hwdev, vf_id, + HINIC3_MOD_HILINK, + MAG_CMD_GET_BOND_STATUS, &bond, + sizeof(bond), &bond, &out_size, + HINIC3_CHANNEL_NIC); + if (err == MBOX_ERRCODE_UNKNOWN_DES_FUNC) { + nic_warn(nic_io->dev_hdl, "VF %hu not initialized, disconnect it\n", + HW_VF_ID_TO_OS(vf_id)); + hinic3_unregister_vf(nic_io, vf_id); + return; + } + if (err || !out_size || bond.head.status) + nic_err(nic_io->dev_hdl, + "Send bond change event to VF %hu failed, err: %d, status: 0x%x, out_size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, bond.head.status, + out_size); + } +} + +void hinic3_notify_all_vfs_bond_changed(void *hwdev, u8 bond_status) +{ + struct hinic3_nic_io *nic_io = NULL; + u16 i; + + nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + nic_io->link_status = bond_status; + for (i = 1; i <= nic_io->max_vfs; i++) + hinic3_notify_vf_bond_status(nic_io, i, bond_status); +} + +static void bond_status_event_handler(void *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct mag_cmd_get_bond_status *bond_status = NULL; + struct hinic3_event_info event_info = {}; + struct hinic3_nic_io *nic_io = NULL; + struct mag_cmd_get_bond_status *ret_bond_status = NULL; + + nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + + bond_status = (struct mag_cmd_get_bond_status *)buf_in; + sdk_info(nic_io->dev_hdl, "bond status report received, func_id: %u, status: %u\n", + hinic3_global_func_id(hwdev), bond_status->status); + + event_info.service = EVENT_SRV_NIC; + event_info.type = bond_status->status ? + EVENT_NIC_BOND_UP : EVENT_NIC_BOND_DOWN; + + hinic3_event_callback(hwdev, &event_info); + + if (hinic3_func_type(hwdev) != TYPE_VF) { + hinic3_notify_all_vfs_bond_changed(hwdev, bond_status->status); + ret_bond_status = buf_out; + ret_bond_status->head.status = 0; + *out_size = sizeof(*ret_bond_status); + } +} + static void cable_plug_event(void *hwdev, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct mag_cmd_wire_event *plug_event = buf_in; struct hinic3_port_routine_cmd *rt_cmd = NULL; + struct hinic3_port_routine_cmd_extern *rt_cmd_ext = NULL; struct hinic3_nic_io *nic_io = NULL; struct hinic3_event_info event_info; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("nic_io is NULL\n"); + return; + } + rt_cmd = &nic_io->nic_cfg.rt_cmd; + rt_cmd_ext = &nic_io->nic_cfg.rt_cmd_ext; mutex_lock(&nic_io->nic_cfg.sfp_mutex); rt_cmd->mpu_send_sfp_abs = false; rt_cmd->mpu_send_sfp_info = false; + rt_cmd_ext->mpu_send_xsfp_tlv_info = false; mutex_unlock(&nic_io->nic_cfg.sfp_mutex); memset(&event_info, 0, sizeof(event_info)); @@ -518,20 +911,54 @@ static void port_sfp_info_event(void *hwdev, void *buf_in, u16 in_size, { struct mag_cmd_get_xsfp_info *sfp_info = buf_in; struct hinic3_port_routine_cmd *rt_cmd = NULL; + struct hinic3_port_routine_cmd_extern *rt_cmd_ext = NULL; struct hinic3_nic_io *nic_io = NULL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return; if (in_size != sizeof(*sfp_info)) { - sdk_err(nic_io->dev_hdl, "Invalid sfp info cmd, length: %u, should be %ld\n", + sdk_err(nic_io->dev_hdl, "Invalid sfp info cmd, length: %u, should be %lu\n", in_size, sizeof(*sfp_info)); return; } rt_cmd = &nic_io->nic_cfg.rt_cmd; + rt_cmd_ext = &nic_io->nic_cfg.rt_cmd_ext; mutex_lock(&nic_io->nic_cfg.sfp_mutex); memcpy(&rt_cmd->std_sfp_info, sfp_info, sizeof(struct mag_cmd_get_xsfp_info)); rt_cmd->mpu_send_sfp_info = true; + rt_cmd_ext->mpu_send_xsfp_tlv_info = false; + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); +} + +static void port_xsfp_tlv_info_event(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct mag_cmd_get_xsfp_tlv_rsp *xsfp_tlv_info = buf_in; + struct hinic3_port_routine_cmd *rt_cmd = NULL; + struct hinic3_port_routine_cmd_extern *rt_cmd_ext = NULL; + struct hinic3_nic_io *nic_io = NULL; + size_t cpy_len = in_size - sizeof(struct mgmt_msg_head) - + XSFP_TLV_PRE_INFO_LEN; + + nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (nic_io == NULL) + return; + + if (cpy_len > XSFP_CMIS_INFO_MAX_SIZE) { + sdk_err(nic_io->dev_hdl, "invalid cpy_len(%lu)\n", cpy_len); + return; + } + rt_cmd = &nic_io->nic_cfg.rt_cmd; + rt_cmd_ext = &nic_io->nic_cfg.rt_cmd_ext; + mutex_lock(&nic_io->nic_cfg.sfp_mutex); + rt_cmd_ext->std_xsfp_tlv_info.port_id = xsfp_tlv_info->port_id; + memcpy(&(rt_cmd_ext->std_xsfp_tlv_info.tlv_buf[0]), + &(xsfp_tlv_info->tlv_buf[0]), cpy_len); + rt_cmd->mpu_send_sfp_info = false; + rt_cmd_ext->mpu_send_xsfp_tlv_info = true; mutex_unlock(&nic_io->nic_cfg.sfp_mutex); } @@ -543,8 +970,10 @@ static void port_sfp_abs_event(void *hwdev, void *buf_in, u16 in_size, struct hinic3_nic_io *nic_io = NULL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return; if (in_size != sizeof(*sfp_abs)) { - sdk_err(nic_io->dev_hdl, "Invalid sfp absent cmd, length: %u, should be %ld\n", + sdk_err(nic_io->dev_hdl, "Invalid sfp absent cmd, length: %u, should be %lu\n", in_size, sizeof(*sfp_abs)); return; } @@ -564,9 +993,11 @@ bool hinic3_if_sfp_absent(void *hwdev) u8 port_id = hinic3_physical_port_id(hwdev); u16 out_size = sizeof(sfp_abs); int err; - bool sfp_abs_status; + bool sfp_abs_status = 0; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return true; memset(&sfp_abs, 0, sizeof(sfp_abs)); rt_cmd = &nic_io->nic_cfg.rt_cmd; @@ -597,10 +1028,187 @@ bool hinic3_if_sfp_absent(void *hwdev) return (sfp_abs.abs_status == 0 ? false : true); } +int hinic3_get_sfp_tlv_info(void *hwdev, struct drv_mag_cmd_get_xsfp_tlv_rsp + *sfp_tlv_info, + const struct mag_cmd_get_xsfp_tlv_req + *sfp_tlv_info_req) +{ + struct hinic3_nic_io *nic_io = NULL; + struct hinic3_port_routine_cmd_extern *rt_cmd_ext = NULL; + u16 out_size = sizeof(*sfp_tlv_info); + int err; + + if ((hwdev == NULL) || (sfp_tlv_info == NULL)) + return -EINVAL; + + nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (nic_io == NULL) + return -EINVAL; + + rt_cmd_ext = &nic_io->nic_cfg.rt_cmd_ext; + mutex_lock(&nic_io->nic_cfg.sfp_mutex); + if (rt_cmd_ext->mpu_send_xsfp_tlv_info == true) { + if (rt_cmd_ext->std_xsfp_tlv_info.head.status != 0) { + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + return -EIO; + } + + memcpy(sfp_tlv_info, &rt_cmd_ext->std_xsfp_tlv_info, + sizeof(*sfp_tlv_info)); + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + return 0; + } + + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_XSFP_TLV_INFO, + (void *)sfp_tlv_info_req, + sizeof(*sfp_tlv_info_req), + sfp_tlv_info, &out_size); + if ((sfp_tlv_info->head.status != 0) || (err != 0) || (out_size == 0)) { + nic_err(nic_io->dev_hdl, + "Failed to get port%u tlv sfp eeprom information, err: %d, status: 0x%x, out size: 0x%x\n", + hinic3_physical_port_id(hwdev), err, + sfp_tlv_info->head.status, out_size); + return -EIO; + } + + return 0; +} + +static int hinic3_trans_cmis_get_page_pos(u32 page_id, u32 content_len, u32 *pos) +{ + if (page_id <= QSFP_CMIS_PAGE_03H) { + *pos = (page_id * content_len); + return 0; + } + + if (page_id == QSFP_CMIS_PAGE_11H) { + *pos = (QSFP_CMIS_PAGE_04H * content_len); + return 0; + } + + if (page_id == QSFP_CMIS_PAGE_12H) { + *pos = (QSFP_CMIS_PAGE_05H * content_len); + return 0; + } + + return -EINVAL; +} + +static int hinic3_get_page_key_info(struct mgmt_tlv_info *tlv_info, + struct parse_tlv_info *page_info, u8 idx, + u32 *total_len) +{ + u8 *src_addr = NULL; + u8 *dst_addr = NULL; + u8 *tmp_addr = NULL; + u32 page_id = 0; + u32 content_len = 0; + u32 src_pos = 0; + int ret; + + page_id = MGMT_TLV_GET_U32(tlv_info->value); + content_len = tlv_info->length - MGMT_TLV_U32_SIZE; + if (page_id == QSFP_CMIS_PAGE_00H) { + tmp_addr = (u8 *)(tlv_info + 1); + page_info->id = *(tmp_addr + MGMT_TLV_U32_SIZE); + } + + ret = hinic3_trans_cmis_get_page_pos(page_id, content_len, &src_pos); + if (ret != 0) + return ret; + + src_addr = page_info->tlv_page_info + src_pos; + tmp_addr = (u8 *)(tlv_info + 1); + dst_addr = tmp_addr + MGMT_TLV_U32_SIZE; + memcpy(src_addr, dst_addr, content_len); + if (ret != 0) + return ret; + + if (idx < XSFP_CMIS_PARSE_PAGE_NUM) + page_info->tlv_page_num[idx] = page_id; + + *total_len += content_len; + + return 0; +} + +static int hinic3_trans_cmis_tlv_info_to_buf(u8 *sfp_tlv_info, + struct parse_tlv_info *page_info) +{ + struct mgmt_tlv_info *tlv_info = NULL; + u8 *tlv_buf = sfp_tlv_info; + u8 idx = 0; + u32 total_len = 0; + int ret = 0; + bool need_continue = true; + + if ((sfp_tlv_info == NULL) || (page_info == NULL)) + return -EIO; + + while (need_continue) { + tlv_info = (struct mgmt_tlv_info *)tlv_buf; + switch (tlv_info->type) { + case MAG_XSFP_TYPE_PAGE: + ret = hinic3_get_page_key_info( + tlv_info, page_info, idx, &total_len); + if (ret != 0) { + pr_err("lib_get_page_key_info fail,ret:0x%x.\n", + ret); + break; + } + idx++; + break; + + case MAG_XSFP_TYPE_WIRE_TYPE: + page_info->wire_type = + MGMT_TLV_GET_U32(&(tlv_info->value[0])); + break; + + case MAG_XSFP_TYPE_END: + need_continue = false; + break; + + default: + break; + } + + tlv_buf += (sizeof(struct mgmt_tlv_info) + tlv_info->length); + } + + page_info->tlv_page_info_len = total_len; + + return 0; +} + +int hinic3_get_tlv_xsfp_eeprom(void *hwdev, u8 *data, u32 len) +{ + int err = 0; + struct mag_cmd_get_xsfp_tlv_req xsfp_tlv_info_req = {0}; + + xsfp_tlv_info_req.rsp_buf_len = XSFP_CMIS_INFO_MAX_SIZE; + xsfp_tlv_info_req.port_id = hinic3_physical_port_id(hwdev); + err = hinic3_get_sfp_tlv_info(hwdev, &g_xsfp_tlv_info, + &xsfp_tlv_info_req); + if (err != 0) + return err; + + err = hinic3_trans_cmis_tlv_info_to_buf(g_xsfp_tlv_info.tlv_buf, + &g_page_info); + if (err) + return -ENOMEM; + + memcpy(data, g_page_info.tlv_page_info, len); + + return (err == 0) ? 0 : -ENOMEM; +} + int hinic3_get_sfp_info(void *hwdev, struct mag_cmd_get_xsfp_info *sfp_info) { struct hinic3_nic_io *nic_io = NULL; struct hinic3_port_routine_cmd *rt_cmd = NULL; + u8 sfp_info_status = 0; u16 out_size = sizeof(*sfp_info); int err; @@ -608,13 +1216,17 @@ int hinic3_get_sfp_info(void *hwdev, struct mag_cmd_get_xsfp_info *sfp_info) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; rt_cmd = &nic_io->nic_cfg.rt_cmd; + sfp_info_status = rt_cmd->std_sfp_info.head.status; mutex_lock(&nic_io->nic_cfg.sfp_mutex); if (rt_cmd->mpu_send_sfp_info) { - if (rt_cmd->std_sfp_info.head.status) { + if (sfp_info_status != 0) { mutex_unlock(&nic_io->nic_cfg.sfp_mutex); - return -EIO; + return (sfp_info_status == HINIC3_MGMT_CMD_UNSUPPORTED) + ? HINIC3_MGMT_CMD_UNSUPPORTED : -EIO; } memcpy(sfp_info, &rt_cmd->std_sfp_info, sizeof(*sfp_info)); @@ -626,7 +1238,14 @@ int hinic3_get_sfp_info(void *hwdev, struct mag_cmd_get_xsfp_info *sfp_info) sfp_info->port_id = hinic3_physical_port_id(hwdev); err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_XSFP_INFO, sfp_info, sizeof(*sfp_info), sfp_info, &out_size); - if (sfp_info->head.status || err || !out_size) { + if (sfp_info->head.status == HINIC3_MGMT_CMD_UNSUPPORTED) { + return HINIC3_MGMT_CMD_UNSUPPORTED; + } + + if (sfp_info->head.status == HINIC3_MGMT_CMD_UNSUPPORTED) { + return -EOPNOTSUPP; + } + if ((sfp_info->head.status != 0) || (err != 0) || (out_size == 0)) { nic_err(nic_io->dev_hdl, "Failed to get port%u sfp eeprom information, err: %d, status: 0x%x, out size: 0x%x\n", hinic3_physical_port_id(hwdev), err, @@ -642,7 +1261,7 @@ int hinic3_get_sfp_eeprom(void *hwdev, u8 *data, u32 len) struct mag_cmd_get_xsfp_info sfp_info; int err; - if (!hwdev || !data) + if (!hwdev || !data || len > PAGE_SIZE) return -EINVAL; if (hinic3_if_sfp_absent(hwdev)) @@ -654,7 +1273,7 @@ int hinic3_get_sfp_eeprom(void *hwdev, u8 *data, u32 len) if (err) return err; - memcpy(data, sfp_info.sfp_info, len); + memcpy(data, sfp_info.sfp_info, sizeof(sfp_info.sfp_info)); return 0; } @@ -664,7 +1283,7 @@ int hinic3_get_sfp_type(void *hwdev, u8 *sfp_type, u8 *sfp_type_ext) struct hinic3_nic_io *nic_io = NULL; struct hinic3_port_routine_cmd *rt_cmd = NULL; u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; - int err; + int err = 0; if (!hwdev || !sfp_type || !sfp_type_ext) return -EINVAL; @@ -673,24 +1292,41 @@ int hinic3_get_sfp_type(void *hwdev, u8 *sfp_type, u8 *sfp_type_ext) return -ENXIO; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; rt_cmd = &nic_io->nic_cfg.rt_cmd; mutex_lock(&nic_io->nic_cfg.sfp_mutex); if (rt_cmd->mpu_send_sfp_info) { - if (rt_cmd->std_sfp_info.head.status) { + if (rt_cmd->std_sfp_info.head.status == 0) { + *sfp_type = rt_cmd->std_sfp_info.sfp_info[0]; + *sfp_type_ext = rt_cmd->std_sfp_info.sfp_info[1]; + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + return 0; + } + + if (rt_cmd->std_sfp_info.head.status != HINIC3_MGMT_CMD_UNSUPPORTED) { mutex_unlock(&nic_io->nic_cfg.sfp_mutex); return -EIO; } - *sfp_type = rt_cmd->std_sfp_info.sfp_info[0]; - *sfp_type_ext = rt_cmd->std_sfp_info.sfp_info[1]; - mutex_unlock(&nic_io->nic_cfg.sfp_mutex); - return 0; + err = HINIC3_MGMT_CMD_UNSUPPORTED; /* cmis */ } mutex_unlock(&nic_io->nic_cfg.sfp_mutex); - err = hinic3_get_sfp_eeprom(hwdev, (u8 *)sfp_data, - STD_SFP_INFO_MAX_SIZE); + if (err == 0) { + err = hinic3_get_sfp_eeprom(hwdev, (u8 *)sfp_data, + STD_SFP_INFO_MAX_SIZE); + } else { + /* mpu_send_sfp_info is false */ + err = hinic3_get_tlv_xsfp_eeprom(hwdev, (u8 *)sfp_data, + STD_SFP_INFO_MAX_SIZE); + } + + if (err == HINIC3_MGMT_CMD_UNSUPPORTED) + err = hinic3_get_tlv_xsfp_eeprom(hwdev, (u8 *)sfp_data, + STD_SFP_INFO_MAX_SIZE); + if (err) return err; @@ -796,12 +1432,20 @@ int hinic3_set_pf_bw_limit(void *hwdev, u32 bw_limit) return -EINVAL; } - old_bw_limit = nic_io->nic_cfg.pf_bw_limit; - nic_io->nic_cfg.pf_bw_limit = bw_limit; + if (nic_io->direct == HINIC3_NIC_TX) { + old_bw_limit = nic_io->nic_cfg.pf_bw_tx_limit; + nic_io->nic_cfg.pf_bw_tx_limit = bw_limit; + } else { + old_bw_limit = nic_io->nic_cfg.pf_bw_rx_limit; + nic_io->nic_cfg.pf_bw_rx_limit = bw_limit; + } err = hinic3_update_pf_bw(hwdev); if (err) { - nic_io->nic_cfg.pf_bw_limit = old_bw_limit; + if (nic_io->direct == HINIC3_NIC_TX) + nic_io->nic_cfg.pf_bw_tx_limit = old_bw_limit; + else + nic_io->nic_cfg.pf_bw_rx_limit = old_bw_limit; return err; } @@ -828,6 +1472,8 @@ int hinic3_pf_mag_mbox_handler(void *hwdev, u16 vf_id, return -EFAULT; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; for (index = 0; index < cmd_size; index++) { handler = &vf_mag_cmd_handler[index]; @@ -848,6 +1494,11 @@ static struct nic_event_handler mag_cmd_handler[] = { .handler = link_status_event_handler, }, + { + .cmd = MAG_CMD_EVENT_PORT_INFO, + .handler = port_info_event_printf, + }, + { .cmd = MAG_CMD_WIRE_EVENT, .handler = cable_plug_event, @@ -862,6 +1513,16 @@ static struct nic_event_handler mag_cmd_handler[] = { .cmd = MAG_CMD_GET_XSFP_PRESENT, .handler = port_sfp_abs_event, }, + + { + .cmd = MAG_CMD_GET_BOND_STATUS, + .handler = bond_status_event_handler, + }, + + { + .cmd = MAG_CMD_GET_XSFP_TLV_INFO, + .handler = port_xsfp_tlv_info_event, + }, }; static int hinic3_mag_event_handler(void *hwdev, u16 cmd, @@ -877,6 +1538,9 @@ static int hinic3_mag_event_handler(void *hwdev, u16 cmd, *out_size = 0; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + for (i = 0; i < size; i++) { if (cmd == mag_cmd_handler[i].cmd) { mag_cmd_handler[i].handler(hwdev, buf_in, in_size, @@ -917,7 +1581,8 @@ static int _mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u32 i, cmd_cnt = ARRAY_LEN(vf_mag_cmd_handler); bool cmd_to_pf = false; - if (hinic3_func_type(hwdev) == TYPE_VF) { + if (hinic3_func_type(hwdev) == TYPE_VF && + !hinic3_is_slave_host(hwdev)) { for (i = 0; i < cmd_cnt; i++) { if (cmd == vf_mag_cmd_handler[i].cmd) { cmd_to_pf = true; @@ -949,3 +1614,124 @@ static int mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, return _mag_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, out_size, channel); } + +#if defined(ETHTOOL_GFECPARAM) && defined(ETHTOOL_SFECPARAM) +struct fecparam_value_map { + u8 hinic3_fec_offset; + u8 hinic3_fec_value; + u8 ethtool_fec_value; +}; + +static void fecparam_convert(u32 opcode, u8 in_fec_param, u8 *out_fec_param) +{ + u8 i; + u8 fec_value_table_lenth; + struct fecparam_value_map fec_value_table[] = { + {PORT_FEC_NOT_SET, BIT(PORT_FEC_NOT_SET), ETHTOOL_FEC_NONE}, + {PORT_FEC_RSFEC, BIT(PORT_FEC_RSFEC), ETHTOOL_FEC_RS}, + {PORT_FEC_BASEFEC, BIT(PORT_FEC_BASEFEC), ETHTOOL_FEC_BASER}, + {PORT_FEC_NOFEC, BIT(PORT_FEC_NOFEC), ETHTOOL_FEC_OFF}, +#ifdef ETHTOOL_FEC_LLRS + {PORT_FEC_LLRSFEC, BIT(PORT_FEC_LLRSFEC), ETHTOOL_FEC_LLRS}, +#endif + {PORT_FEC_AUTO, BIT(PORT_FEC_AUTO), ETHTOOL_FEC_AUTO} + }; + + *out_fec_param = 0; + fec_value_table_lenth = (u8)(sizeof(fec_value_table) / sizeof(struct fecparam_value_map)); + + if (opcode == MAG_CMD_OPCODE_SET) { + for (i = 0; i < fec_value_table_lenth; i++) { + if ((in_fec_param & + fec_value_table[i].ethtool_fec_value) != 0) + /* The MPU uses the offset to determine the FEC mode. */ + *out_fec_param = + fec_value_table[i].hinic3_fec_offset; + } + } + + if (opcode == MAG_CMD_OPCODE_GET) { + for (i = 0; i < fec_value_table_lenth; i++) { + if ((in_fec_param & + fec_value_table[i].hinic3_fec_value) != 0) + *out_fec_param |= + fec_value_table[i].ethtool_fec_value; + } + } +} + +/* When the ethtool is used to set the FEC mode */ +static bool check_fecparam_is_valid(u8 fec_param) +{ + if ( +#ifdef ETHTOOL_FEC_LLRS + (fec_param == ETHTOOL_FEC_LLRS) || +#endif + (fec_param == ETHTOOL_FEC_RS) || + (fec_param == ETHTOOL_FEC_BASER) || + (fec_param == ETHTOOL_FEC_OFF)) { + return true; + } + return false; +} + +int set_fecparam(void *hwdev, u8 fecparam) +{ + struct mag_cmd_cfg_fec_mode fec_msg = {0}; + struct hinic3_nic_io *nic_io = NULL; + u16 out_size = sizeof(fec_msg); + u8 advertised_fec = 0; + int err; + + nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if (check_fecparam_is_valid(fecparam) == false) { + nic_err(nic_io->dev_hdl, "fec param is invalid, failed to set fec param\n"); + return -EINVAL; + } + fecparam_convert(MAG_CMD_OPCODE_SET, fecparam, &advertised_fec); + fec_msg.opcode = MAG_CMD_OPCODE_SET; + fec_msg.port_id = hinic3_physical_port_id(hwdev); + fec_msg.advertised_fec = advertised_fec; + err = mag_msg_to_mgmt_sync_ch(hwdev, MAG_CMD_CFG_FEC_MODE, + &fec_msg, sizeof(fec_msg), + &fec_msg, &out_size, HINIC3_CHANNEL_NIC); + if ((err != 0) || (fec_msg.head.status != 0)) + return -EINVAL; + return 0; +} + +int get_fecparam(void *hwdev, u8 *advertised_fec, u8 *supported_fec) +{ + struct mag_cmd_cfg_fec_mode fec_msg = {0}; + struct hinic3_nic_io *nic_io = NULL; + u16 out_size = sizeof(fec_msg); + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + fec_msg.opcode = MAG_CMD_OPCODE_GET; + fec_msg.port_id = hinic3_physical_port_id(hwdev); + err = mag_msg_to_mgmt_sync_ch(hwdev, MAG_CMD_CFG_FEC_MODE, + &fec_msg, sizeof(fec_msg), + &fec_msg, &out_size, HINIC3_CHANNEL_NIC); + if ((err != 0) || (fec_msg.head.status != 0)) + return -EINVAL; + + /* fec_msg.advertised_fec: bit offset, + *value is BIT(fec_msg.advertised_fec); fec_msg.supported_fec: value + */ + fecparam_convert(MAG_CMD_OPCODE_GET, BIT(fec_msg.advertised_fec), + advertised_fec); + fecparam_convert(MAG_CMD_OPCODE_GET, fec_msg.supported_fec, + supported_fec); + return 0; +} +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c index 307ad8d0e8cc1e7d3f184723c3ac41d6bb24b7a1..7790ae2ea56d3ee8177c001b42100dfd16bf7c21 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c @@ -20,6 +20,9 @@ #include #include "ossl_knl.h" +#if defined(HAVE_NDO_UDP_TUNNEL_ADD) || defined(HAVE_UDP_TUNNEL_NIC_INFO) +#include +#endif /* HAVE_NDO_UDP_TUNNEL_ADD || HAVE_UDP_TUNNEL_NIC_INFO */ #include "hinic3_hw.h" #include "hinic3_crm.h" #include "hinic3_mt.h" @@ -35,8 +38,8 @@ #include "hinic3_dcb.h" #include "hinic3_nic_prof.h" #include "hinic3_profile.h" +#include "hinic3_bond.h" -/*lint -e806*/ #define DEFAULT_POLL_WEIGHT 64 static unsigned int poll_weight = DEFAULT_POLL_WEIGHT; module_param(poll_weight, uint, 0444); @@ -68,7 +71,9 @@ static unsigned char set_link_status_follow = HINIC3_LINK_FOLLOW_STATUS_MAX; module_param(set_link_status_follow, byte, 0444); MODULE_PARM_DESC(set_link_status_follow, "Set link status follow port status (0=default,1=follow,2=separate,3=unset"); -/*lint +e806*/ +static bool page_pool_enabled = true; +module_param(page_pool_enabled, bool, 0444); +MODULE_PARM_DESC(page_pool_enabled, "enable/disable page_pool feature for rxq page management (default enable)"); #define HINIC3_NIC_DEV_WQ_NAME "hinic3_nic_dev_wq" @@ -80,6 +85,8 @@ MODULE_PARM_DESC(set_link_status_follow, "Set link status follow port status (0= #define HINIC3_SQ_DEPTH 1024 #define HINIC3_RQ_DEPTH 1024 +#define LRO_ENABLE 1 + enum hinic3_rx_buff_len { RX_BUFF_VALID_2KB = 2, RX_BUFF_VALID_4KB = 4, @@ -88,6 +95,9 @@ enum hinic3_rx_buff_len { }; #define CONVERT_UNIT 1024 +#define NIC_MAX_PF_NUM 32 + +#define BIFUR_RESOURCE_PF_SSID 0x5a1 #ifdef HAVE_MULTI_VLAN_OFFLOAD_EN static int hinic3_netdev_event(struct notifier_block *notifier, unsigned long event, void *ptr); @@ -99,6 +109,17 @@ static struct notifier_block hinic3_netdev_notifier = { .notifier_call = hinic3_netdev_event, }; +#ifdef HAVE_UDP_TUNNEL_NIC_INFO +static const struct udp_tunnel_nic_info hinic3_udp_tunnels = { + .set_port = hinic3_udp_tunnel_set_port, + .unset_port = hinic3_udp_tunnel_unset_port, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}; +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ + static void hinic3_register_notifier(struct hinic3_nic_dev *nic_dev) { int err; @@ -264,6 +285,11 @@ static void netdev_feature_init(struct net_device *netdev) netdev->features |= dft_fts | cso_fts | tso_fts | vlan_fts; netdev->vlan_features |= dft_fts | cso_fts | tso_fts; + if (nic_dev->nic_cap.lro_enable == LRO_ENABLE) { + netdev->features |= NETIF_F_LRO; + netdev->vlan_features |= NETIF_F_LRO; + } + #ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT hw_features |= get_netdev_hw_features(netdev); #else @@ -350,6 +376,7 @@ static int hinic3_init_intr_coalesce(struct hinic3_nic_dev *nic_dev) static void hinic3_free_intr_coalesce(struct hinic3_nic_dev *nic_dev) { kfree(nic_dev->intr_coalesce); + nic_dev->intr_coalesce = NULL; } static int hinic3_alloc_txrxqs(struct hinic3_nic_dev *nic_dev) @@ -404,8 +431,85 @@ static void hinic3_sw_deinit(struct hinic3_nic_dev *nic_dev) HINIC3_CHANNEL_NIC); hinic3_clear_rss_config(nic_dev); - if (test_bit(HINIC3_DCB_ENABLE, &nic_dev->flags)) - hinic3_sync_dcb_state(nic_dev->hwdev, 1, 0); + hinic3_dcb_deinit(nic_dev); +} + +static void hinic3_netdev_mtu_init(struct net_device *netdev) +{ + /* MTU range: 384 - 9600 */ +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + netdev->min_mtu = HINIC3_MIN_MTU_SIZE; + netdev->max_mtu = HINIC3_MAX_JUMBO_FRAME_SIZE; +#endif + +#ifdef HAVE_NETDEVICE_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = HINIC3_MIN_MTU_SIZE; + netdev->extended->max_mtu = HINIC3_MAX_JUMBO_FRAME_SIZE; +#endif +} + +static int hinic3_set_default_mac(struct hinic3_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u8 mac_addr[ETH_ALEN]; + int err = 0; + + err = hinic3_get_default_mac(nic_dev->hwdev, mac_addr); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to get MAC address\n"); + return err; + } + + ether_addr_copy(netdev->dev_addr, mac_addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev)) { + nic_err(&nic_dev->pdev->dev, + "Invalid MAC address %pM\n", + netdev->dev_addr); + return -EIO; + } + + nic_info(&nic_dev->pdev->dev, + "Invalid MAC address %pM, using random\n", + netdev->dev_addr); + eth_hw_addr_random(netdev); + } + + err = hinic3_set_mac(nic_dev->hwdev, netdev->dev_addr, 0, + hinic3_global_func_id(nic_dev->hwdev), + HINIC3_CHANNEL_NIC); + /* When this is VF driver, we must consider that PF has already set VF + * MAC, and we can't consider this condition is error status during + * driver probe procedure. + */ + if (err && err != HINIC3_PF_SET_VF_ALREADY) { + nic_err(&nic_dev->pdev->dev, "Failed to set default MAC\n"); + } + + if (err == HINIC3_PF_SET_VF_ALREADY) + return 0; + + return err; +} + +static void hinic3_outband_cfg_init(struct hinic3_nic_dev *nic_dev) +{ + u16 outband_default_vid = 0; + int err = 0; + + if (!nic_dev->nic_cap.outband_vlan_cfg_en) + return; + + err = hinic3_get_outband_vlan_cfg(nic_dev->hwdev, &outband_default_vid); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to get_outband_cfg, err: %d\n", err); + return; + } + + nic_dev->outband_cfg.outband_default_vid = outband_default_vid; + + return; } static int hinic3_sw_init(struct hinic3_nic_dev *nic_dev) @@ -421,8 +525,6 @@ static int hinic3_sw_init(struct hinic3_nic_dev *nic_dev) nic_features &= NIC_DRV_DEFAULT_FEATURE; hinic3_update_nic_feature(nic_dev->hwdev, nic_features); - sema_init(&nic_dev->port_state_sem, 1); - err = hinic3_dcb_init(nic_dev); if (err) { nic_err(&nic_dev->pdev->dev, "Failed to init dcb\n"); @@ -434,47 +536,12 @@ static int hinic3_sw_init(struct hinic3_nic_dev *nic_dev) hinic3_try_to_enable_rss(nic_dev); - err = hinic3_get_default_mac(nic_dev->hwdev, netdev->dev_addr); + err = hinic3_set_default_mac(nic_dev); if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to get MAC address\n"); - goto get_mac_err; - } - - if (!is_valid_ether_addr(netdev->dev_addr)) { - if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev)) { - nic_err(&nic_dev->pdev->dev, "Invalid MAC address %pM\n", - netdev->dev_addr); - err = -EIO; - goto err_mac; - } - - nic_info(&nic_dev->pdev->dev, "Invalid MAC address %pM, using random\n", - netdev->dev_addr); - eth_hw_addr_random(netdev); - } - - err = hinic3_set_mac(nic_dev->hwdev, netdev->dev_addr, 0, - hinic3_global_func_id(nic_dev->hwdev), - HINIC3_CHANNEL_NIC); - /* When this is VF driver, we must consider that PF has already set VF - * MAC, and we can't consider this condition is error status during - * driver probe procedure. - */ - if (err && err != HINIC3_PF_SET_VF_ALREADY) { - nic_err(&nic_dev->pdev->dev, "Failed to set default MAC\n"); goto set_mac_err; } - /* MTU range: 384 - 9600 */ -#ifdef HAVE_NETDEVICE_MIN_MAX_MTU - netdev->min_mtu = HINIC3_MIN_MTU_SIZE; - netdev->max_mtu = HINIC3_MAX_JUMBO_FRAME_SIZE; -#endif - -#ifdef HAVE_NETDEVICE_EXTENDED_MIN_MAX_MTU - netdev->extended->min_mtu = HINIC3_MIN_MTU_SIZE; - netdev->extended->max_mtu = HINIC3_MAX_JUMBO_FRAME_SIZE; -#endif + hinic3_netdev_mtu_init(netdev); err = hinic3_alloc_txrxqs(nic_dev); if (err) { @@ -482,6 +549,8 @@ static int hinic3_sw_init(struct hinic3_nic_dev *nic_dev) goto alloc_qps_err; } + hinic3_outband_cfg_init(nic_dev); + return 0; alloc_qps_err: @@ -490,8 +559,6 @@ static int hinic3_sw_init(struct hinic3_nic_dev *nic_dev) HINIC3_CHANNEL_NIC); set_mac_err: -err_mac: -get_mac_err: hinic3_clear_rss_config(nic_dev); return err; @@ -690,20 +757,51 @@ static void hinic3_periodic_work_handler(struct work_struct *work) queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ); } +static void hinic3_vport_stats_work_handler(struct work_struct *work) +{ + int err; + struct hinic3_vport_stats vport_stats = {0}; + struct delayed_work *delay = to_delayed_work(work); + struct hinic3_nic_dev *nic_dev = container_of(delay, struct hinic3_nic_dev, vport_stats_work); + err = hinic3_get_vport_stats(nic_dev->hwdev, hinic3_global_func_id(nic_dev->hwdev), &vport_stats); + if (err) + nic_err(&nic_dev->pdev->dev, "Failed to get dropped stats from fw\n"); + else + nic_dev->vport_stats.rx_discard_vport = vport_stats.rx_discard_vport; + queue_delayed_work(nic_dev->workq, &nic_dev->vport_stats_work, HZ); +} + +static void free_nic_dev_vram(struct hinic3_nic_dev *nic_dev) +{ + int is_use_vram = get_use_vram_flag(); + if (is_use_vram != 0) + hi_vram_kfree((void *)nic_dev->nic_vram, nic_dev->nic_vram_name, + sizeof(struct hinic3_vram)); + else + kfree(nic_dev->nic_vram); + nic_dev->nic_vram = NULL; +} + static void free_nic_dev(struct hinic3_nic_dev *nic_dev) { hinic3_deinit_nic_prof_adapter(nic_dev); destroy_workqueue(nic_dev->workq); kfree(nic_dev->vlan_bitmap); + nic_dev->vlan_bitmap = NULL; + free_nic_dev_vram(nic_dev); } static int setup_nic_dev(struct net_device *netdev, struct hinic3_lld_dev *lld_dev) { struct pci_dev *pdev = lld_dev->pdev; - struct hinic3_nic_dev *nic_dev; - char *netdev_name_fmt; + struct hinic3_nic_dev *nic_dev = NULL; + char *netdev_name_fmt = NULL; u32 page_num; + u16 func_id; + int ret; + int is_in_kexec = vram_get_kexec_flag(); + int is_use_vram = get_use_vram_flag(); nic_dev = (struct hinic3_nic_dev *)netdev_priv(netdev); nic_dev->netdev = netdev; @@ -718,24 +816,65 @@ static int setup_nic_dev(struct net_device *netdev, nic_dev->dma_rx_buff_size = RX_BUFF_NUM_PER_PAGE * nic_dev->rx_buff_len; page_num = nic_dev->dma_rx_buff_size / PAGE_SIZE; nic_dev->page_order = page_num > 0 ? ilog2(page_num) : 0; + nic_dev->page_pool_enabled = page_pool_enabled; + nic_dev->outband_cfg.outband_default_vid = 0; + + // value other than 0 indicates hot replace + if (is_use_vram != 0) { + func_id = hinic3_global_func_id(nic_dev->hwdev); + ret = snprintf(nic_dev->nic_vram_name, + VRAM_NAME_MAX_LEN, + "%s%u", VRAM_NIC_VRAM, func_id); + if (ret < 0) { + nic_err(&pdev->dev, "NIC vram name snprintf failed, ret:%d.\n", + ret); + return -EINVAL; + } + + nic_dev->nic_vram = (struct hinic3_vram *)hi_vram_kalloc(nic_dev->nic_vram_name, + sizeof(struct hinic3_vram)); + if (!nic_dev->nic_vram) { + nic_err(&pdev->dev, "Failed to allocate nic vram\n"); + return -ENOMEM; + } + + if (is_in_kexec == 0) + nic_dev->nic_vram->vram_mtu = netdev->mtu; + else + netdev->mtu = nic_dev->nic_vram->vram_mtu; + } else { + nic_dev->nic_vram = kzalloc(sizeof(struct hinic3_vram), + GFP_KERNEL); + if (!nic_dev->nic_vram) { + nic_err(&pdev->dev, "Failed to allocate nic vram\n"); + return -ENOMEM; + } + nic_dev->nic_vram->vram_mtu = netdev->mtu; + } mutex_init(&nic_dev->nic_mutex); nic_dev->vlan_bitmap = kzalloc(VLAN_BITMAP_SIZE(nic_dev), GFP_KERNEL); if (!nic_dev->vlan_bitmap) { nic_err(&pdev->dev, "Failed to allocate vlan bitmap\n"); - return -ENOMEM; + ret = -ENOMEM; + goto vlan_bitmap_error; } nic_dev->workq = create_singlethread_workqueue(HINIC3_NIC_DEV_WQ_NAME); if (!nic_dev->workq) { nic_err(&pdev->dev, "Failed to initialize nic workqueue\n"); - kfree(nic_dev->vlan_bitmap); - return -ENOMEM; + ret = -ENOMEM; + goto create_workq_error; } - INIT_DELAYED_WORK(&nic_dev->periodic_work, hinic3_periodic_work_handler); - INIT_DELAYED_WORK(&nic_dev->rxq_check_work, hinic3_rxq_check_work_handler); + INIT_DELAYED_WORK(&nic_dev->periodic_work, + hinic3_periodic_work_handler); + INIT_DELAYED_WORK(&nic_dev->rxq_check_work, + hinic3_rxq_check_work_handler); + if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev)) + INIT_DELAYED_WORK(&nic_dev->vport_stats_work, + hinic3_vport_stats_work_handler); INIT_LIST_HEAD(&nic_dev->uc_filter_list); INIT_LIST_HEAD(&nic_dev->mc_filter_list); @@ -748,10 +887,23 @@ static int setup_nic_dev(struct net_device *netdev, hinic3_init_nic_prof_adapter(nic_dev); netdev_name_fmt = hinic3_get_dft_netdev_name_fmt(nic_dev); - if (netdev_name_fmt) - strncpy(netdev->name, netdev_name_fmt, IFNAMSIZ); + if (netdev_name_fmt) { + ret = strscpy(netdev->name, netdev_name_fmt, IFNAMSIZ); + if (ret < 0) + goto get_netdev_name_error; + } return 0; + +get_netdev_name_error: + hinic3_deinit_nic_prof_adapter(nic_dev); + destroy_workqueue(nic_dev->workq); +create_workq_error: + kfree(nic_dev->vlan_bitmap); + nic_dev->vlan_bitmap = NULL; +vlan_bitmap_error: + free_nic_dev_vram(nic_dev); + return ret; } static int hinic3_set_default_hw_feature(struct hinic3_nic_dev *nic_dev) @@ -790,6 +942,32 @@ static int hinic3_set_default_hw_feature(struct hinic3_nic_dev *nic_dev) return 0; } +static void hinic3_bond_init(struct hinic3_nic_dev *nic_dev) +{ + u32 bond_id = HINIC3_INVALID_BOND_ID; + int err = hinic3_create_bond(nic_dev->hwdev, &bond_id); + if (err != 0) { + goto bond_init_failed; + } + + /* bond id does not change, means this pf is not bond active pf, no log is generated */ + if (bond_id == HINIC3_INVALID_BOND_ID) { + return; + } + + err = hinic3_open_close_bond(nic_dev->hwdev, true); + if (err != 0) { + hinic3_delete_bond(nic_dev->hwdev); + goto bond_init_failed; + } + + nic_info(&nic_dev->pdev->dev, "Bond %d init success\n", bond_id); + return; + +bond_init_failed: + nic_err(&nic_dev->pdev->dev, "Bond init failed\n"); +} + static int nic_probe(struct hinic3_lld_dev *lld_dev, void **uld_dev, char *uld_dev_name) { @@ -820,6 +998,12 @@ static int nic_probe(struct hinic3_lld_dev *lld_dev, void **uld_dev, goto err_out; } + err = hinic3_get_dev_cap(lld_dev->hwdev); + if (err != 0) { + nic_err(&pdev->dev, "Failed to get dev cap\n"); + goto err_out; + } + max_qps = hinic3_func_max_nic_qnum(lld_dev->hwdev); netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps); if (!netdev) { @@ -851,23 +1035,35 @@ static int nic_probe(struct hinic3_lld_dev *lld_dev, void **uld_dev, hinic3_assign_netdev_ops(nic_dev); netdev_feature_init(netdev); - +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + netdev->udp_tunnel_nic_info = &hinic3_udp_tunnels; +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ err = hinic3_set_default_hw_feature(nic_dev); if (err) goto set_features_err; + if (hinic3_get_bond_create_mode(lld_dev->hwdev) != 0) { + hinic3_bond_init(nic_dev); + } + #ifdef HAVE_MULTI_VLAN_OFFLOAD_EN hinic3_register_notifier(nic_dev); #endif - err = register_netdev(netdev); - if (err) { - nic_err(&pdev->dev, "Failed to register netdev\n"); - err = -ENOMEM; - goto netdev_err; + if (pdev->subsystem_device != BIFUR_RESOURCE_PF_SSID) { + err = register_netdev(netdev); + if (err) { + nic_err(&pdev->dev, "Failed to register netdev\n"); + err = -ENOMEM; + goto netdev_err; + } } queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ); + if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev)) + queue_delayed_work(nic_dev->workq, + &nic_dev->vport_stats_work, HZ); + netif_carrier_off(netdev); *uld_dev = nic_dev; @@ -900,6 +1096,27 @@ static int nic_probe(struct hinic3_lld_dev *lld_dev, void **uld_dev, return err; } +static void hinic3_bond_deinit(struct hinic3_nic_dev *nic_dev) +{ + int ret = 0; + + ret = hinic3_open_close_bond(nic_dev->hwdev, false); + if (ret != 0) { + goto bond_deinit_failed; + } + + ret = hinic3_delete_bond(nic_dev->hwdev); + if (ret != 0) { + goto bond_deinit_failed; + } + + nic_info(&nic_dev->pdev->dev, "Bond deinit success\n"); + return; + +bond_deinit_failed: + nic_err(&nic_dev->pdev->dev, "Bond deinit failed\n"); +} + static void nic_remove(struct hinic3_lld_dev *lld_dev, void *adapter) { struct hinic3_nic_dev *nic_dev = adapter; @@ -912,11 +1129,16 @@ static void nic_remove(struct hinic3_lld_dev *lld_dev, void *adapter) netdev = nic_dev->netdev; - unregister_netdev(netdev); + if (lld_dev->pdev->subsystem_device != BIFUR_RESOURCE_PF_SSID) { + unregister_netdev(netdev); + } #ifdef HAVE_MULTI_VLAN_OFFLOAD_EN hinic3_unregister_notifier(nic_dev); #endif + if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev)) + cancel_delayed_work_sync(&nic_dev->vport_stats_work); + cancel_delayed_work_sync(&nic_dev->periodic_work); cancel_delayed_work_sync(&nic_dev->rxq_check_work); cancel_work_sync(&nic_dev->rx_mode_work); @@ -924,6 +1146,10 @@ static void nic_remove(struct hinic3_lld_dev *lld_dev, void *adapter) hinic3_flush_rx_flow_rule(nic_dev); + if (hinic3_get_bond_create_mode(lld_dev->hwdev) != 0) { + hinic3_bond_deinit(nic_dev); + } + hinic3_update_nic_feature(nic_dev->hwdev, 0); hinic3_set_nic_feature_to_hw(nic_dev->hwdev); @@ -933,6 +1159,7 @@ static void nic_remove(struct hinic3_lld_dev *lld_dev, void *adapter) hinic3_deinit_nic_prof_adapter(nic_dev); kfree(nic_dev->vlan_bitmap); + nic_dev->vlan_bitmap = NULL; free_netdev(netdev); @@ -981,6 +1208,120 @@ static void hinic3_port_module_event_handler(struct hinic3_nic_dev *nic_dev, } } +bool hinic3_need_proc_link_event(struct hinic3_lld_dev *lld_dev) +{ + int ret = 0; + u16 func_id; + u8 roce_enable = false; + bool is_slave_func = false; + struct hinic3_hw_bond_infos hw_bond_infos = {0}; + + if (!lld_dev) + return false; + + /* 非slave设备需要处理link down事件 */ + ret = hinic3_is_slave_func(lld_dev->hwdev, &is_slave_func); + if (ret != 0) { + nic_err(&lld_dev->pdev->dev, "NIC get info, lld_dev is null\n"); + return true; + } + + if (!is_slave_func) + return true; + + /* 未使能了vroce功能,需处理link down事件 */ + func_id = hinic3_global_func_id(lld_dev->hwdev); + ret = hinic3_get_func_vroce_enable(lld_dev->hwdev, func_id, + &roce_enable); + if (ret != 0) + return true; + + if (!roce_enable) + return true; + + /* 未创建bond,需要处理link down事件 */ + hw_bond_infos.bond_id = HINIC_OVS_BOND_DEFAULT_ID; + + ret = hinic3_get_hw_bond_infos(lld_dev->hwdev, &hw_bond_infos, + HINIC3_CHANNEL_COMM); + if (ret != 0) { + pr_err("[ROCE, ERR] Get chipf bond info failed (%d)\n", ret); + return true; + } + + if (!hw_bond_infos.valid) + return true; + + return false; +} + +bool hinic3_need_proc_bond_event(struct hinic3_lld_dev *lld_dev) +{ + return !hinic3_need_proc_link_event(lld_dev); +} + +static void hinic_porc_bond_state_change(struct hinic3_lld_dev *lld_dev, + void *adapter, + struct hinic3_event_info *event) +{ + struct hinic3_nic_dev *nic_dev = adapter; + + if (!nic_dev || !event || !hinic3_support_nic(lld_dev->hwdev, NULL)) + return; + + switch (HINIC3_SRV_EVENT_TYPE(event->service, event->type)) { + case HINIC3_SRV_EVENT_TYPE(EVENT_SRV_NIC, EVENT_NIC_BOND_DOWN): + if (!hinic3_need_proc_bond_event(lld_dev)) { + nic_info(&lld_dev->pdev->dev, "NIC don't need proc bond event\n"); + return; + } + nic_info(&lld_dev->pdev->dev, "NIC proc bond down\n"); + hinic3_link_status_change(nic_dev, false); + break; + case HINIC3_SRV_EVENT_TYPE(EVENT_SRV_NIC, EVENT_NIC_BOND_UP): + if (!hinic3_need_proc_bond_event(lld_dev)) { + nic_info(&lld_dev->pdev->dev, "NIC don't need proc bond event\n"); + return; + } + nic_info(&lld_dev->pdev->dev, "NIC proc bond up\n"); + hinic3_link_status_change(nic_dev, true); + break; + default: + break; + } +} + +static void hinic3_outband_cfg_event_handler(struct hinic3_nic_dev *nic_dev, + struct hinic3_outband_cfg_info *info) +{ + int err = 0; + if (!nic_dev || !info || !hinic3_support_nic(nic_dev->hwdev, NULL)) { + pr_err("Outband cfg event invalid param\n"); + return; + } + + if (hinic3_func_type(nic_dev->hwdev) != TYPE_VF && + info->func_id >= NIC_MAX_PF_NUM) { + err = hinic3_notify_vf_outband_cfg(nic_dev->hwdev, + info->func_id, + info->outband_default_vid); + if (err) + nic_err(&nic_dev->pdev->dev, "Outband cfg event notify vf err: %d," + "func_id: 0x%x, vid: 0x%x\n", + err, info->func_id, info->outband_default_vid); + return; + } + + nic_info(&nic_dev->pdev->dev, + "Change outband default vid from %u to %u\n", + nic_dev->outband_cfg.outband_default_vid, + info->outband_default_vid); + + nic_dev->outband_cfg.outband_default_vid = info->outband_default_vid; + + return; +} + static void nic_event(struct hinic3_lld_dev *lld_dev, void *adapter, struct hinic3_event_info *event) { @@ -992,14 +1333,25 @@ static void nic_event(struct hinic3_lld_dev *lld_dev, void *adapter, switch (HINIC3_SRV_EVENT_TYPE(event->service, event->type)) { case HINIC3_SRV_EVENT_TYPE(EVENT_SRV_NIC, EVENT_NIC_LINK_DOWN): + if (!hinic3_need_proc_link_event(lld_dev)) { + nic_info(&lld_dev->pdev->dev, "NIC don't need proc link event\n"); + return; + } hinic3_link_status_change(nic_dev, false); break; case HINIC3_SRV_EVENT_TYPE(EVENT_SRV_NIC, EVENT_NIC_LINK_UP): hinic3_link_status_change(nic_dev, true); break; + case HINIC3_SRV_EVENT_TYPE(EVENT_SRV_NIC, EVENT_NIC_BOND_DOWN): + case HINIC3_SRV_EVENT_TYPE(EVENT_SRV_NIC, EVENT_NIC_BOND_UP): + hinic_porc_bond_state_change(lld_dev, adapter, event); + break; case HINIC3_SRV_EVENT_TYPE(EVENT_SRV_NIC, EVENT_NIC_PORT_MODULE_EVENT): hinic3_port_module_event_handler(nic_dev, event); break; + case HINIC3_SRV_EVENT_TYPE(EVENT_SRV_NIC, EVENT_NIC_OUTBAND_CFG): + hinic3_outband_cfg_event_handler(nic_dev, (void *)event->event_data); + break; case HINIC3_SRV_EVENT_TYPE(EVENT_SRV_COMM, EVENT_COMM_SRIOV_STATE_CHANGE): sriov_state_change(nic_dev, (void *)event->event_data); break; @@ -1059,7 +1411,7 @@ struct hinic3_uld_info g_nic_uld_info = { .resume = NULL, .event = nic_event, .ioctl = nic_ioctl, -}; /*lint -e766*/ +}; struct hinic3_uld_info *get_nic_uld_info(void) { diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h index 774193a319b0517bc79dbcaeeda29dc3caf329b1..7bc9376e3a16d9a95a8800a83252348081612cee 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h @@ -4,11 +4,12 @@ #ifndef HINIC3_MT_H #define HINIC3_MT_H -#define HINIC3_DRV_NAME "hisdk3" +#define HINIC3_DRV_NAME "hinic3" #define HINIC3_CHIP_NAME "hinic" /* Interrupt at most records, interrupt will be recorded in the FFM */ #define NICTOOL_CMD_TYPE (0x18) +#define HINIC3_CARD_NAME_MAX_LEN (128) struct api_cmd_rd { u32 pf_id; @@ -110,7 +111,8 @@ enum dbgtool_cmd { DBGTOOL_CMD_NUM }; -#define PF_MAX_SIZE (16) +#define HINIC_PF_MAX_SIZE (16) +#define HINIC_VF_MAX_SIZE (4096) #define BUSINFO_LEN (32) enum module_name { @@ -132,7 +134,9 @@ enum module_name { SEND_TO_MIGRATE_DRIVER, SEND_TO_PPA_DRIVER, SEND_TO_CUSTOM_DRIVER = SEND_TO_SRV_DRV_BASE + 11, - SEND_TO_DRIVER_MAX = SEND_TO_SRV_DRV_BASE + 15, /* reserved */ + SEND_TO_VSOCK_DRIVER = SEND_TO_SRV_DRV_BASE + 14, + SEND_TO_BIFUR_DRIVER, + SEND_TO_DRIVER_MAX = SEND_TO_SRV_DRV_BASE + 16, /* reserved */ }; enum driver_cmd_type { @@ -159,7 +163,7 @@ enum driver_cmd_type { GET_CHIP_FAULT_STATS, NIC_RSVD1, NIC_RSVD2, - NIC_RSVD3, + GET_OS_HOT_REPLACE_INFO, GET_CHIP_ID, GET_SINGLE_CARD_INFO, GET_FIRMWARE_ACTIVE_STATUS, @@ -170,7 +174,7 @@ enum driver_cmd_type { GET_LOOPBACK_MODE = 32, SET_LOOPBACK_MODE, SET_LINK_MODE, - SET_PF_BW_LIMIT, + SET_TX_PF_BW_LIMIT, GET_PF_BW_LIMIT, ROCE_CMD, GET_POLL_WEIGHT, @@ -188,6 +192,7 @@ enum driver_cmd_type { GET_NIC_STATS_STRING, GET_NIC_STATS_INFO, GET_PF_ID, + GET_MBOX_CNT, NIC_RSVD4, NIC_RSVD5, DCB_QOS_INFO, @@ -204,16 +209,54 @@ enum driver_cmd_type { RSS_INDIR, PORT_ID, + SET_RX_PF_BW_LIMIT = 0x43, + GET_FUNC_CAP = 0x50, GET_XSFP_PRESENT = 0x51, GET_XSFP_INFO = 0x52, DEV_NAME_TEST = 0x53, + GET_XSFP_INFO_COMP_CMIS = 0x54, GET_WIN_STAT = 0x60, WIN_CSR_READ = 0x61, WIN_CSR_WRITE = 0x62, WIN_API_CMD_RD = 0x63, + GET_FUSION_Q = 0x64, + ROCE_CMD_SET_LDCP_PARAM = 0x70, + + ROCE_CMD_GET_QPC_FROM_CACHE = 0x80, + ROCE_CMD_GET_QPC_FROM_HOST = 0x81, + ROCE_CMD_GET_CQC_FROM_CACHE = 0x82, + ROCE_CMD_GET_CQC_FROM_HOST = 0x83, + ROCE_CMD_GET_SRQC_FROM_CACHE = 0x84, + ROCE_CMD_GET_SRQC_FROM_HOST = 0x85, + ROCE_CMD_GET_MPT_FROM_CACHE = 0x86, + ROCE_CMD_GET_MPT_FROM_HOST = 0x87, + ROCE_CMD_GET_GID_FROM_CACHE = 0x88, + ROCE_CMD_GET_QPC_CQC_PI_CI = 0x89, + ROCE_CMD_GET_QP_COUNT = 0x8a, + ROCE_CMD_GET_DEV_ALGO = 0x8b, + + ROCE_CMD_START_CAP_PACKET = 0x90, + ROCE_CMD_STOP_CAP_PACKET = 0x91, + ROCE_CMD_QUERY_CAP_INFO = 0x92, + ROCE_CMD_ENABLE_QP_CAP_PACKET = 0x93, + ROCE_CMD_DISABLE_QP_CAP_PACKET = 0x94, + ROCE_CMD_QUERY_QP_CAP_INFO = 0x95, + + ROCE_CMD_ENABLE_BW_CTRL = 0xa0, + ROCE_CMD_DISABLE_BW_CTRL = 0xa1, + ROCE_CMD_CHANGE_BW_CTRL_PARAM = 0xa2, + ROCE_CMD_QUERY_BW_CTRL_PARAM = 0xa3, + + ROCE_CMD_TIMEOUT_ALARM = 0xb0, + ROCE_CMD_PORT_TRAFFIC = 0Xb1, + ROCE_CMD_BOND_HASH_TYPE_SET = 0xb2, + + BIFUR_SET_ENABLE = 0xc0, + BIFUR_GET_ENABLE = 0xc1, + VM_COMPAT_TEST = 0xFF }; @@ -229,7 +272,8 @@ enum sm_cmd_type { SM_CTR_RD64, SM_CTR_RD32_CLEAR, SM_CTR_RD64_PAIR_CLEAR, - SM_CTR_RD64_CLEAR + SM_CTR_RD64_CLEAR, + SM_CTR_RD16_CLEAR, }; struct cqm_stats { @@ -317,10 +361,22 @@ struct pf_info { }; struct card_info { - struct pf_info pf[PF_MAX_SIZE]; + struct pf_info pf[HINIC_PF_MAX_SIZE]; u32 pf_num; }; +struct func_mbox_cnt_info { + char bus_info[BUSINFO_LEN]; + u64 send_cnt; + u64 ack_cnt; +}; + +struct card_mbox_cnt_info { + struct func_mbox_cnt_info func_info[HINIC_PF_MAX_SIZE + + HINIC_VF_MAX_SIZE]; + u32 func_num; +}; + struct hinic3_nic_loop_mode { u32 loop_mode; u32 loop_ctrl; @@ -661,6 +717,161 @@ struct get_card_bond_msg_s { struct bond_all_msg_s all_msg[MAX_BONDING_CNT_PER_CARD]; }; +#define MAX_FUSION_Q_STATS_STR_LEN 16 +#define MAX_FUSION_Q_NUM 256 +struct queue_status_s { + pid_t tgid; + char status[MAX_FUSION_Q_STATS_STR_LEN]; +}; +struct fusion_q_status_s { + u16 queue_num; + struct queue_status_s queue[MAX_FUSION_Q_NUM]; +}; + +struct fusion_q_tx_hw_page { + u64 phy_addr; + u64 *map_addr; +}; + +struct fusion_sq_info { + u16 q_id; + u16 pi; + u16 ci; /* sw_ci */ + u16 fi; /* hw_ci */ + u32 q_depth; + u16 pi_reverse; + u16 wqebb_size; + u8 priority; + u16 *ci_addr; + u64 cla_addr; + void *slq_handle; + struct fusion_q_tx_hw_page direct_wqe; + struct fusion_q_tx_hw_page doorbell; + u32 page_idx; + u32 glb_sq_id; +}; + +struct fusion_q_tx_wqe { + u32 data[4]; +}; + +struct fusion_rq_info { + u16 q_id; + u16 delta; + u16 hw_pi; + u16 ci; /* sw_ci */ + u16 sw_pi; + u16 wqebb_size; + u16 q_depth; + u16 buf_len; + + void *slq_handle; + u64 ci_wqe_page_addr; + u64 ci_cla_tbl_addr; + + u8 coalesc_timer_cfg; + u8 pending_limt; + u16 msix_idx; + u32 msix_vector; +}; + +struct fusion_q_rx_wqe { + u32 data[8]; +}; + +struct fusion_q_rx_cqe { + union { + struct { + unsigned int checksum_err : 16; + unsigned int lro_num : 8; + unsigned int rsvd1 : 7; + unsigned int rx_done : 1; + } bs; + unsigned int value; + } dw0; + + union { + struct { + unsigned int vlan : 16; + unsigned int length : 16; + } bs; + unsigned int value; + } dw1; + + union { + struct { + unsigned int pkt_types : 12; + unsigned int rsvd : 4; + unsigned int udp_0 : 1; + unsigned int ipv6_ex_add : 1; + unsigned int loopback : 1; + unsigned int umbcast : 2; + unsigned int vlan_offload_en : 1; + unsigned int tag_num : 2; + unsigned int rss_type : 8; + } bs; + unsigned int value; + } dw2; + + union { + struct { + unsigned int rss_hash_value; + } bs; + unsigned int value; + } dw3; + + union { + struct { + unsigned int tx_ts_seq : 16; + unsigned int message_1588_offset : 8; + unsigned int message_1588_type : 4; + unsigned int rsvd : 1; + unsigned int if_rx_ts : 1; + unsigned int if_tx_ts : 1; + unsigned int if_1588 : 1; + } bs; + unsigned int value; + } dw4; + + union { + struct { + unsigned int ts; + } bs; + unsigned int value; + } dw5; + + union { + struct { + unsigned int lro_ts; + } bs; + unsigned int value; + } dw6; + + union { + struct { + unsigned int rsvd0; + } bs; + unsigned int value; + } dw7; /* 16Bytes Align */ +}; + +struct os_hot_repalce_func_info { + char card_name[HINIC3_CARD_NAME_MAX_LEN]; + int bus_num; + int valid; + int bdf; + int partition; + int backup_pf; + int pf_idx; + int port_id; +}; + +#define ALL_CARD_PF_NUM 2048 /* 64 card * 32 pf */ +struct os_hot_replace_info { + struct os_hot_repalce_func_info func_infos[ALL_CARD_PF_NUM]; + u32 func_cnt; +}; + int alloc_buff_in(void *hwdev, struct msg_module *nt_msg, u32 in_size, void **buf_in); int alloc_buff_out(void *hwdev, struct msg_module *nt_msg, u32 out_size, void **buf_out); diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c index 81e4030f7f50ab42b120c6471c0c13b0f73240d0..7cd9e4d3612c0e132c6055c1b6c45b649b42a702 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c @@ -15,6 +15,9 @@ #include #include "ossl_knl.h" +#if defined(HAVE_NDO_UDP_TUNNEL_ADD) || defined(HAVE_UDP_TUNNEL_NIC_INFO) +#include +#endif /* HAVE_NDO_UDP_TUNNEL_ADD || HAVE_UDP_TUNNEL_NIC_INFO */ #ifdef HAVE_XDP_SUPPORT #include #endif @@ -28,6 +31,10 @@ #include "hinic3_dcb.h" #include "hinic3_nic_prof.h" +#include "nic_npu_cmd.h" + +#include "vram_common.h" + #define HINIC3_DEFAULT_RX_CSUM_OFFLOAD 0xFFF #define HINIC3_LRO_DEFAULT_COAL_PKT_SIZE 32 @@ -47,8 +54,64 @@ static void hinic3_nic_set_rx_mode(struct net_device *netdev) queue_work(nic_dev->workq, &nic_dev->rx_mode_work); } +static void hinic3_free_irq_vram(struct hinic3_nic_dev *nic_dev, struct hinic3_dyna_txrxq_params *in_q_params) +{ + u32 size; + int is_use_vram = get_use_vram_flag(); + struct hinic3_dyna_txrxq_params q_params = nic_dev->q_params; + + if (q_params.irq_cfg == NULL) + return; + + size = sizeof(struct hinic3_irq) * (q_params.num_qps); + + if (is_use_vram != 0) { + hi_vram_kfree((void *)q_params.irq_cfg, q_params.irq_cfg_vram_name, size); + q_params.irq_cfg = NULL; + } else { + kfree(in_q_params->irq_cfg); + in_q_params->irq_cfg = NULL; + } +} + +static int hinic3_alloc_irq_vram(struct hinic3_nic_dev *nic_dev, + struct hinic3_dyna_txrxq_params *q_params, bool is_up_eth) +{ + u32 size; + int is_use_vram = get_use_vram_flag(); + u16 func_id; + + size = sizeof(struct hinic3_irq) * q_params->num_qps; + + if (is_use_vram != 0) { + func_id = hinic3_global_func_id(nic_dev->hwdev); + snprintf(q_params->irq_cfg_vram_name, + VRAM_NAME_MAX_LEN, "%s%u", + VRAM_NIC_IRQ_VRAM, func_id); + q_params->irq_cfg = (struct hinic3_irq *)hi_vram_kalloc( + q_params->irq_cfg_vram_name, size); + if (q_params->irq_cfg == NULL) { + nicif_err(nic_dev, drv, nic_dev->netdev, "NIC irq vram alloc failed.\n"); + return -ENOMEM; + } + /* in order to clear napi stored in vram, irq need to init when eth up */ + if (is_up_eth) { + memset(q_params->irq_cfg, 0, size); + } + } else { + q_params->irq_cfg = kzalloc(size, GFP_KERNEL); + if (q_params->irq_cfg == NULL) { + nicif_err(nic_dev, drv, nic_dev->netdev, "NIC irq alloc failed.\n"); + return -ENOMEM; + } + } + + return 0; +} + static int hinic3_alloc_txrxq_resources(struct hinic3_nic_dev *nic_dev, - struct hinic3_dyna_txrxq_params *q_params) + struct hinic3_dyna_txrxq_params *q_params, + bool is_up_eth) { u32 size; int err; @@ -70,12 +133,9 @@ static int hinic3_alloc_txrxq_resources(struct hinic3_nic_dev *nic_dev, goto alloc_rxqs_res_arr_err; } - size = sizeof(*q_params->irq_cfg) * q_params->num_qps; - q_params->irq_cfg = kzalloc(size, GFP_KERNEL); - if (!q_params->irq_cfg) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc irq resource array\n"); - err = -ENOMEM; + err = hinic3_alloc_irq_vram(nic_dev, q_params, is_up_eth); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc irq resource array\n"); goto alloc_irq_cfg_err; } @@ -102,8 +162,7 @@ static int hinic3_alloc_txrxq_resources(struct hinic3_nic_dev *nic_dev, q_params->txqs_res); alloc_txqs_res_err: - kfree(q_params->irq_cfg); - q_params->irq_cfg = NULL; + hinic3_free_irq_vram(nic_dev, q_params); alloc_irq_cfg_err: kfree(q_params->rxqs_res); @@ -119,13 +178,14 @@ static int hinic3_alloc_txrxq_resources(struct hinic3_nic_dev *nic_dev, static void hinic3_free_txrxq_resources(struct hinic3_nic_dev *nic_dev, struct hinic3_dyna_txrxq_params *q_params) { + int is_in_kexec = vram_get_kexec_flag(); hinic3_free_rxqs_res(nic_dev, q_params->num_qps, q_params->rq_depth, q_params->rxqs_res); hinic3_free_txqs_res(nic_dev, q_params->num_qps, q_params->sq_depth, q_params->txqs_res); - kfree(q_params->irq_cfg); - q_params->irq_cfg = NULL; + if (is_in_kexec == 0) + hinic3_free_irq_vram(nic_dev, q_params); kfree(q_params->rxqs_res); q_params->rxqs_res = NULL; @@ -161,6 +221,7 @@ static int hinic3_configure_txrxqs(struct hinic3_nic_dev *nic_dev, static void config_dcb_qps_map(struct hinic3_nic_dev *nic_dev) { struct net_device *netdev = nic_dev->netdev; + struct hinic3_dcb *dcb = nic_dev->dcb; u8 num_cos; if (!test_bit(HINIC3_DCB_ENABLE, &nic_dev->flags)) { @@ -171,12 +232,13 @@ static void config_dcb_qps_map(struct hinic3_nic_dev *nic_dev) num_cos = hinic3_get_dev_user_cos_num(nic_dev); hinic3_update_qp_cos_cfg(nic_dev, num_cos); /* For now, we don't support to change num_cos */ - if (num_cos > nic_dev->cos_config_num_max || + if (num_cos > dcb->cos_config_num_max || nic_dev->q_params.num_qps < num_cos) { nicif_err(nic_dev, drv, netdev, "Invalid num_cos: %u or num_qps: %u, disable DCB\n", num_cos, nic_dev->q_params.num_qps); nic_dev->q_params.num_cos = 0; clear_bit(HINIC3_DCB_ENABLE, &nic_dev->flags); + clear_bit(HINIC3_DCB_ENABLE, &nic_dev->nic_vram->flags); /* if we can't enable rss or get enough num_qps, * need to sync default configure to hw */ @@ -190,11 +252,14 @@ static int hinic3_configure(struct hinic3_nic_dev *nic_dev) { struct net_device *netdev = nic_dev->netdev; int err; + int is_in_kexec = vram_get_kexec_flag(); - err = hinic3_set_port_mtu(nic_dev->hwdev, (u16)netdev->mtu); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to set mtu\n"); - return err; + if (is_in_kexec == 0) { + err = hinic3_set_port_mtu(nic_dev->hwdev, (u16)netdev->mtu); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to set mtu\n"); + return err; + } } config_dcb_qps_map(nic_dev); @@ -256,10 +321,11 @@ static void config_dcb_num_qps(struct hinic3_nic_dev *nic_dev, const struct hinic3_dyna_txrxq_params *q_params, u16 max_qps) { + struct hinic3_dcb *dcb = nic_dev->dcb; u8 num_cos = q_params->num_cos; u8 user_cos_num = hinic3_get_dev_user_cos_num(nic_dev); - if (!num_cos || num_cos > nic_dev->cos_config_num_max || num_cos > max_qps) + if (!num_cos || num_cos > dcb->cos_config_num_max || num_cos > max_qps) return; /* will disable DCB in config_dcb_qps_map() */ hinic3_update_qp_cos_cfg(nic_dev, user_cos_num); @@ -334,57 +400,10 @@ static void hinic3_destroy_num_qps(struct hinic3_nic_dev *nic_dev) kfree(nic_dev->qps_irq_info); } -int hinic3_force_port_disable(struct hinic3_nic_dev *nic_dev) -{ - int err; - - down(&nic_dev->port_state_sem); - - err = hinic3_set_port_enable(nic_dev->hwdev, false, HINIC3_CHANNEL_NIC); - if (!err) - nic_dev->force_port_disable = true; - - up(&nic_dev->port_state_sem); - - return err; -} - -int hinic3_force_set_port_state(struct hinic3_nic_dev *nic_dev, bool enable) -{ - int err = 0; - - down(&nic_dev->port_state_sem); - - nic_dev->force_port_disable = false; - err = hinic3_set_port_enable(nic_dev->hwdev, enable, - HINIC3_CHANNEL_NIC); - - up(&nic_dev->port_state_sem); - - return err; -} - int hinic3_maybe_set_port_state(struct hinic3_nic_dev *nic_dev, bool enable) { - int err; - - down(&nic_dev->port_state_sem); - - /* Do nothing when force disable - * Port will disable when call force port disable - * and should not enable port when in force mode - */ - if (nic_dev->force_port_disable) { - up(&nic_dev->port_state_sem); - return 0; - } - - err = hinic3_set_port_enable(nic_dev->hwdev, enable, + return hinic3_set_port_enable(nic_dev->hwdev, enable, HINIC3_CHANNEL_NIC); - - up(&nic_dev->port_state_sem); - - return err; } static void hinic3_print_link_message(struct hinic3_nic_dev *nic_dev, @@ -401,7 +420,8 @@ static void hinic3_print_link_message(struct hinic3_nic_dev *nic_dev, static int hinic3_alloc_channel_resources(struct hinic3_nic_dev *nic_dev, struct hinic3_dyna_qp_params *qp_params, - struct hinic3_dyna_txrxq_params *trxq_params) + struct hinic3_dyna_txrxq_params *trxq_params, + bool is_up_eth) { int err; @@ -416,7 +436,7 @@ static int hinic3_alloc_channel_resources(struct hinic3_nic_dev *nic_dev, return err; } - err = hinic3_alloc_txrxq_resources(nic_dev, trxq_params); + err = hinic3_alloc_txrxq_resources(nic_dev, trxq_params, is_up_eth); if (err) { nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc txrxq resources\n"); hinic3_free_qps(nic_dev->hwdev, qp_params); @@ -544,9 +564,53 @@ int hinic3_vport_up(struct hinic3_nic_dev *nic_dev) return err; } +static int hinic3_flush_rq_and_check(struct hinic3_nic_dev *nic_dev, + u16 glb_func_id) +{ + struct hinic3_flush_rq *rq_flush_msg = NULL; + struct hinic3_cmd_buf *cmd_buf = NULL; + int out_buf_len = sizeof(struct hinic3_flush_rq); + u16 rq_id; + u64 out_param = 0; + int ret; + + cmd_buf = hinic3_alloc_cmd_buf(nic_dev->hwdev); + if (!cmd_buf) { + nic_err(&nic_dev->pdev->dev, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + cmd_buf->size = sizeof(struct hinic3_flush_rq); + rq_flush_msg = (struct hinic3_flush_rq *)cmd_buf->buf; + rq_flush_msg->dw.bs.func_id = glb_func_id; + for (rq_id = 0; rq_id < nic_dev->q_params.num_qps; rq_id++) { + rq_flush_msg->dw.bs.rq_id = rq_id; + hinic3_cpu_to_be32(rq_flush_msg, out_buf_len); + ret = hinic3_cmdq_direct_resp(nic_dev->hwdev, HINIC3_MOD_L2NIC, + HINIC3_UCODE_CHK_RQ_STOP, + cmd_buf, &out_param, 0, + HINIC3_CHANNEL_NIC); + if (ret != 0 || out_param != 0) { + nic_err(&nic_dev->pdev->dev, "Failed to flush rq, ret:%d, func:%u, rq:%u\n", + ret, glb_func_id, rq_id); + goto err; + } + hinic3_be32_to_cpu(rq_flush_msg, out_buf_len); + } + + nic_info(&nic_dev->pdev->dev, "Func:%u rq_num:%u flush rq success\n", + glb_func_id, nic_dev->q_params.num_qps); + hinic3_free_cmd_buf(nic_dev->hwdev, cmd_buf); + return 0; +err: + hinic3_free_cmd_buf(nic_dev->hwdev, cmd_buf); + return -1; +} + void hinic3_vport_down(struct hinic3_nic_dev *nic_dev) { u16 glb_func_id; + int is_in_kexec = vram_get_kexec_flag(); netif_carrier_off(nic_dev->netdev); netif_tx_disable(nic_dev->netdev); @@ -559,18 +623,21 @@ void hinic3_vport_down(struct hinic3_nic_dev *nic_dev) if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev)) hinic3_notify_all_vfs_link_changed(nic_dev->hwdev, 0); - hinic3_maybe_set_port_state(nic_dev, false); + if (is_in_kexec != 0) + nicif_info(nic_dev, drv, nic_dev->netdev, "Skip changing mag status!\n"); + else + hinic3_maybe_set_port_state(nic_dev, false); glb_func_id = hinic3_global_func_id(nic_dev->hwdev); hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, false, HINIC3_CHANNEL_NIC); hinic3_flush_txqs(nic_dev->netdev); - /* After set vport disable 100ms, - * no packets will be send to host - * FPGA set 2000ms - */ - msleep(HINIC3_WAIT_FLUSH_QP_RESOURCE_TIMEOUT); + if (is_in_kexec == 0) { + msleep(HINIC3_WAIT_FLUSH_QP_RESOURCE_TIMEOUT); + } else { + (void)hinic3_flush_rq_and_check(nic_dev, glb_func_id); + } hinic3_flush_qps_res(nic_dev->hwdev); } } @@ -583,11 +650,12 @@ int hinic3_change_channel_settings(struct hinic3_nic_dev *nic_dev, struct hinic3_dyna_qp_params new_qp_params = {0}; struct hinic3_dyna_qp_params cur_qp_params = {0}; int err; + bool is_free_resources = false; hinic3_config_num_qps(nic_dev, trxq_params); err = hinic3_alloc_channel_resources(nic_dev, &new_qp_params, - trxq_params); + trxq_params, false); if (err) { nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc channel resources\n"); @@ -599,10 +667,19 @@ int hinic3_change_channel_settings(struct hinic3_nic_dev *nic_dev, hinic3_close_channel(nic_dev, &cur_qp_params); hinic3_free_channel_resources(nic_dev, &cur_qp_params, &nic_dev->q_params); + is_free_resources = true; } if (nic_dev->num_qp_irq > trxq_params->num_qps) hinic3_qp_irq_change(nic_dev, trxq_params->num_qps); + + if (is_free_resources) { + err = hinic3_alloc_irq_vram(nic_dev, trxq_params, false); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Change chl alloc irq failed\n"); + goto alloc_irq_err; + } + } nic_dev->q_params = *trxq_params; if (reopen_handler) @@ -623,7 +700,7 @@ int hinic3_change_channel_settings(struct hinic3_nic_dev *nic_dev, vport_up_err: hinic3_close_channel(nic_dev, &new_qp_params); - +alloc_irq_err: open_channel_err: hinic3_free_channel_resources(nic_dev, &new_qp_params, trxq_params); @@ -654,7 +731,7 @@ int hinic3_open(struct net_device *netdev) } err = hinic3_alloc_channel_resources(nic_dev, &qp_params, - &nic_dev->q_params); + &nic_dev->q_params, true); if (err) goto alloc_channel_res_err; @@ -693,12 +770,31 @@ int hinic3_open(struct net_device *netdev) return err; } +static void hinic3_delete_napi(struct hinic3_nic_dev *nic_dev) +{ + u16 q_id; + int is_in_kexec = vram_get_kexec_flag(); + struct hinic3_irq *irq_cfg = NULL; + + if (is_in_kexec == 0 || nic_dev->q_params.irq_cfg == NULL) + return; + + for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) { + irq_cfg = &(nic_dev->q_params.irq_cfg[q_id]); + qp_del_napi(irq_cfg); + } + + hinic3_free_irq_vram(nic_dev, &nic_dev->q_params); +} + int hinic3_close(struct net_device *netdev) { struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); struct hinic3_dyna_qp_params qp_params = {0}; if (!test_and_clear_bit(HINIC3_INTF_UP, &nic_dev->flags)) { + /* delete napi in os hotreplace rollback */ + hinic3_delete_napi(nic_dev); nicif_info(nic_dev, drv, netdev, "Netdev already close, do nothing\n"); return 0; } @@ -835,6 +931,7 @@ static u16 select_queue_by_hash_func(struct net_device *dev, struct sk_buff *skb #define GET_DSCP_PRI_OFFSET 2 static u8 hinic3_get_dscp_up(struct hinic3_nic_dev *nic_dev, struct sk_buff *skb) { + struct hinic3_dcb *dcb = nic_dev->dcb; int dscp_cp; if (skb->protocol == htons(ETH_P_IP)) @@ -842,8 +939,8 @@ static u8 hinic3_get_dscp_up(struct hinic3_nic_dev *nic_dev, struct sk_buff *skb else if (skb->protocol == htons(ETH_P_IPV6)) dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> GET_DSCP_PRI_OFFSET; else - return nic_dev->hw_dcb_cfg.default_cos; - return nic_dev->hw_dcb_cfg.dscp2cos[dscp_cp]; + return dcb->hw_dcb_cfg.default_cos; + return dcb->hw_dcb_cfg.dscp2cos[dscp_cp]; } #if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY) @@ -869,6 +966,7 @@ static u16 hinic3_select_queue(struct net_device *netdev, struct sk_buff *skb) #endif /* end of HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK */ { struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_dcb *dcb = nic_dev->dcb; u16 txq; u8 cos, qp_num; @@ -889,18 +987,19 @@ static u16 hinic3_select_queue(struct net_device *netdev, struct sk_buff *skb) #endif if (test_bit(HINIC3_DCB_ENABLE, &nic_dev->flags)) { - if (nic_dev->hw_dcb_cfg.trust == DCB_PCP) { + if (dcb->hw_dcb_cfg.trust == HINIC3_DCB_PCP) { if (skb->vlan_tci) - cos = nic_dev->hw_dcb_cfg.pcp2cos[skb->vlan_tci >> VLAN_PRIO_SHIFT]; + cos = dcb->hw_dcb_cfg.pcp2cos[skb->vlan_tci >> + VLAN_PRIO_SHIFT]; else - cos = nic_dev->hw_dcb_cfg.default_cos; + cos = dcb->hw_dcb_cfg.default_cos; } else { cos = hinic3_get_dscp_up(nic_dev, skb); } - qp_num = nic_dev->hw_dcb_cfg.cos_qp_num[cos] ? - txq % nic_dev->hw_dcb_cfg.cos_qp_num[cos] : 0; - txq = nic_dev->hw_dcb_cfg.cos_qp_offset[cos] + qp_num; + qp_num = dcb->hw_dcb_cfg.cos_qp_num[cos] ? + txq % dcb->hw_dcb_cfg.cos_qp_num[cos] : 0; + txq = dcb->hw_dcb_cfg.cos_qp_offset[cos] + qp_num; } return txq; @@ -978,7 +1077,7 @@ static struct net_device_stats *hinic3_get_stats(struct net_device *netdev) stats->rx_packets = packets; stats->rx_bytes = bytes; stats->rx_errors = errors; - stats->rx_dropped = dropped; + stats->rx_dropped = dropped + nic_dev->vport_stats.rx_discard_vport; #ifndef HAVE_VOID_NDO_GET_STATS64 return stats; @@ -1025,10 +1124,17 @@ static int hinic3_change_mtu(struct net_device *netdev, int new_mtu) struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); u32 mtu = (u32)new_mtu; int err = 0; - + int is_in_kexec = vram_get_kexec_flag(); #ifdef HAVE_XDP_SUPPORT u32 xdp_max_mtu; +#endif + if (is_in_kexec != 0) { + nicif_info(nic_dev, drv, netdev, "Hotreplace skip change mtu\n"); + return err; + } + +#ifdef HAVE_XDP_SUPPORT if (hinic3_is_xdp_enable(nic_dev)) { xdp_max_mtu = hinic3_xdp_max_mtu(nic_dev); if (mtu > xdp_max_mtu) { @@ -1047,6 +1153,7 @@ static int hinic3_change_mtu(struct net_device *netdev, int new_mtu) nicif_info(nic_dev, drv, nic_dev->netdev, "Change mtu from %u to %d\n", netdev->mtu, new_mtu); netdev->mtu = mtu; + nic_dev->nic_vram->vram_mtu = mtu; } return err; @@ -1080,6 +1187,71 @@ static int hinic3_set_mac_addr(struct net_device *netdev, void *addr) return 0; } +#if defined(HAVE_NDO_UDP_TUNNEL_ADD) || defined(HAVE_UDP_TUNNEL_NIC_INFO) +static int hinic3_udp_tunnel_port_config(struct net_device *netdev, + struct udp_tunnel_info *ti, + u8 action) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u16 func_id = hinic3_global_func_id(nic_dev->hwdev); + u16 dst_port; + int ret = 0; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + dst_port = ntohs(ti->port); + ret = hinic3_vlxan_port_config(nic_dev->hwdev, func_id, + dst_port, action); + if (ret != 0) { + nicif_warn(nic_dev, drv, netdev, + "Failed to set vxlan port %u to device(%d)\n", + dst_port, ret); + break; + } + nicif_info(nic_dev, link, netdev, "Vxlan dst port set to %u\n", + action == HINIC3_CMD_OP_ADD ? + dst_port : ntohs(VXLAN_OFFLOAD_PORT_LE)); + break; + default: + nicif_err(nic_dev, drv, netdev, + "Failed to add port, only vxlan dst port is supported\n"); + ret = -EINVAL; + } + return ret; +} +#endif /* HAVE_NDO_UDP_TUNNEL_ADD || HAVE_UDP_TUNNEL_NIC_INFO */ +#ifdef HAVE_NDO_UDP_TUNNEL_ADD +static void hinic3_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti) +{ + if (ti->sa_family != AF_INET && ti->sa_family != AF_INET6) + return; + + hinic3_udp_tunnel_port_config(netdev, ti, HINIC3_CMD_OP_ADD); +} + +static void hinic3_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti) +{ + if (ti->sa_family != AF_INET && ti->sa_family != AF_INET6) + return; + + hinic3_udp_tunnel_port_config(netdev, ti, HINIC3_CMD_OP_DEL); +} +#endif /* HAVE_NDO_UDP_TUNNEL_ADD */ + +#ifdef HAVE_UDP_TUNNEL_NIC_INFO +int hinic3_udp_tunnel_set_port(struct net_device *netdev, __attribute__((unused)) unsigned int table, + __attribute__((unused))unsigned int entry, struct udp_tunnel_info *ti) +{ + return hinic3_udp_tunnel_port_config(netdev, ti, HINIC3_CMD_OP_ADD); +} + +int hinic3_udp_tunnel_unset_port(struct net_device *netdev, __attribute__((unused)) unsigned int table, + __attribute__((unused)) unsigned int entry, struct udp_tunnel_info *ti) +{ + return hinic3_udp_tunnel_port_config(netdev, ti, HINIC3_CMD_OP_DEL); +} +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ + static int hinic3_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, @@ -1126,7 +1298,7 @@ hinic3_vlan_rx_kill_vid(struct net_device *netdev, int err = 0; col = VID_COL(nic_dev, vid); - line = VID_LINE(nic_dev, vid); + line = (int)VID_LINE(nic_dev, vid); /* In the broadcast scenario, ucode finds the corresponding function * based on VLAN 0 of vlan table. If we delete VLAN 0, the VLAN function @@ -1165,14 +1337,12 @@ static int hinic3_vlan_restore(struct net_device *netdev) return -EFAULT; rcu_read_lock(); for (i = 0; i < VLAN_N_VID; i++) { -/* lint -e778 */ #ifdef HAVE_VLAN_FIND_DEV_DEEP_RCU vlandev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), i); #else vlandev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), i); #endif -/* lint +e778 */ col = VID_COL(nic_dev, i); line = VID_LINE(nic_dev, i); if (!vlandev && (vlan_bitmap[line] & (1UL << col)) != 0) { @@ -1330,12 +1500,17 @@ static int set_feature_vlan_filter(struct hinic3_nic_dev *nic_dev, return 0; #ifdef NEED_VLAN_RESTORE - if (en) + if (en) { err = hinic3_vlan_restore(nic_dev->netdev); + if (err) { + hinic3_err(nic_dev, drv, "vlan restore failed\n"); + *failed_features |= vlan_filter_feature; + return err; + } + } #endif - if (err == 0) - err = hinic3_set_vlan_fliter(nic_dev->hwdev, en); + err = hinic3_set_vlan_fliter(nic_dev->hwdev, en); if (err) { hinic3_err(nic_dev, drv, "%s rx vlan filter failed\n", SET_FEATURES_OP_STR(en)); @@ -1429,8 +1604,8 @@ static int hinic3_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) struct hinic3_nic_dev *adapter = netdev_priv(netdev); int err; - if (is_multicast_ether_addr(mac) || /*lint !e574*/ - vf >= pci_num_vf(adapter->pdev)) /*lint !e574*/ + if (is_multicast_ether_addr(mac) || + vf >= pci_num_vf(adapter->pdev)) return -EINVAL; err = hinic3_set_vf_mac(adapter->hwdev, OS_VF_ID_TO_HW(vf), mac); @@ -1448,7 +1623,6 @@ static int hinic3_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) return 0; } -/*lint -save -e574 -e734*/ #ifdef IFLA_VF_MAX static int set_hw_vf_vlan(void *hwdev, u16 cur_vlanprio, int vf, u16 vlan, u8 qos) @@ -1670,7 +1844,7 @@ static int hinic3_ndo_set_vf_bw(struct net_device *netdev, int vf, return -EIO; /* rate limit cannot be less than 0 and greater than link speed */ - if (max_tx_rate < 0 || max_tx_rate > speeds[port_info.speed]) { + if (max_tx_rate < 0 || max_tx_rate > (int)(speeds[port_info.speed])) { nicif_err(adapter, drv, netdev, "Set vf max tx rate must be in [0 - %u]\n", speeds[port_info.speed]); return -EINVAL; @@ -1719,7 +1893,7 @@ static int hinic3_xdp_setup(struct hinic3_nic_dev *nic_dev, int max_mtu = hinic3_xdp_max_mtu(nic_dev); int q_id; - if (nic_dev->netdev->mtu > max_mtu) { + if (nic_dev->netdev->mtu > (u32)max_mtu) { nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to setup xdp program, the current MTU %d is larger than max allowed MTU %d\n", nic_dev->netdev->mtu, max_mtu); @@ -1844,6 +2018,10 @@ static const struct net_device_ops hinic3_netdev_ops = { .ndo_xdp = hinic3_xdp, #endif #endif +#ifdef HAVE_NDO_UDP_TUNNEL_ADD + .ndo_udp_tunnel_add = hinic3_udp_tunnel_add, + .ndo_udp_tunnel_del = hinic3_udp_tunnel_del, +#endif /* HAVE_NDO_UDP_TUNNEL_ADD */ #ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT }; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h index cc00bdcbc71b9e9f99471a39849b6e37db79af50..1bc6a1427ff3021c10acfa9d9a53374f63a67fed 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h @@ -10,6 +10,8 @@ #include "hinic3_common.h" #include "hinic3_nic_io.h" #include "hinic3_nic_cfg.h" +#include "mag_mpu_cmd.h" +#include "mag_mpu_cmd_defs.h" /* ************************ array index define ********************* */ #define ARRAY_INDEX_0 0 @@ -21,6 +23,35 @@ #define ARRAY_INDEX_6 6 #define ARRAY_INDEX_7 7 +#define XSFP_TLV_PRE_INFO_LEN 4 + +enum hinic3_link_port_type { + LINK_PORT_UNKNOWN, + LINK_PORT_OPTICAL_MM, + LINK_PORT_OPTICAL_SM, + LINK_PORT_PAS_COPPER, + LINK_PORT_ACC, + LINK_PORT_BASET, + LINK_PORT_AOC = 0x40, + LINK_PORT_ELECTRIC, + LINK_PORT_BACKBOARD_INTERFACE, +}; + +enum hilink_fibre_subtype { + FIBRE_SUBTYPE_SR = 1, + FIBRE_SUBTYPE_LR, + FIBRE_SUBTYPE_MAX, +}; + +enum hilink_fec_type { + HILINK_FEC_NOT_SET, + HILINK_FEC_RSFEC, + HILINK_FEC_BASEFEC, + HILINK_FEC_NOFEC, + HILINK_FEC_LLRSFE, + HILINK_FEC_MAX_TYPE, +}; + struct hinic3_sq_attr { u8 dma_attr_off; u8 pending_limit; @@ -58,24 +89,32 @@ struct hinic3_port_routine_cmd { struct mag_cmd_get_xsfp_present abs; }; +struct hinic3_port_routine_cmd_extern { + bool mpu_send_xsfp_tlv_info; + + struct drv_mag_cmd_get_xsfp_tlv_rsp std_xsfp_tlv_info; +}; + struct hinic3_nic_cfg { - struct semaphore cfg_lock; + struct semaphore cfg_lock; /* Valid when pfc is disable */ - bool pause_set; - struct nic_pause_config nic_pause; + bool pause_set; + struct nic_pause_config nic_pause; - u8 pfc_en; - u8 pfc_bitmap; + u8 pfc_en; + u8 pfc_bitmap; - struct nic_port_info port_info; + struct nic_port_info port_info; /* percentage of pf link bandwidth */ - u32 pf_bw_limit; - u32 rsvd2; + u32 pf_bw_tx_limit; + u32 pf_bw_rx_limit; - struct hinic3_port_routine_cmd rt_cmd; - struct mutex sfp_mutex; /* mutex used for copy sfp info */ + struct hinic3_port_routine_cmd rt_cmd; + struct hinic3_port_routine_cmd_extern rt_cmd_ext; + /* mutex used for copy sfp info */ + struct mutex sfp_mutex; }; struct hinic3_nic_io { @@ -84,7 +123,7 @@ struct hinic3_nic_io { void *dev_hdl; u8 link_status; - u8 rsvd1; + u8 direct; u32 rsvd2; struct hinic3_io_queue *sq; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c index 2c1b5658b458adf29741c9664dea07fc7c73371a..525a353e0b596bccffa1f692f30dbedd432176b9 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c @@ -20,10 +20,126 @@ #include "hinic3_nic_io.h" #include "hinic3_srv_nic.h" #include "hinic3_nic.h" -#include "hinic3_nic_cmd.h" +#include "nic_mpu_cmd.h" +#include "nic_npu_cmd.h" #include "hinic3_common.h" #include "hinic3_nic_cfg.h" +#include "vram_common.h" + +int hinic3_delete_bond(void *hwdev) +{ + struct hinic3_cmd_delete_bond cmd_delete_bond; + u16 out_size = sizeof(cmd_delete_bond); + struct hinic3_nic_io *nic_io = NULL; + int err = 0; + + if (!hwdev) { + pr_err("hwdev is null.\n"); + return -EINVAL; + } + + nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("nic_io is null.\n"); + return -EINVAL; + } + + memset(&cmd_delete_bond, 0, sizeof(cmd_delete_bond)); + cmd_delete_bond.bond_id = HINIC3_INVALID_BOND_ID; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC3_NIC_CMD_BOND_DEV_DELETE, + &cmd_delete_bond, sizeof(cmd_delete_bond), + &cmd_delete_bond, &out_size); + if (err || !out_size || cmd_delete_bond.head.status) { + nic_err(nic_io->dev_hdl, "Failed to delete bond, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cmd_delete_bond.head.status, out_size); + return -EFAULT; + } + + if (cmd_delete_bond.bond_id != HINIC3_INVALID_BOND_ID) { + nic_info(nic_io->dev_hdl, "Delete bond success\n"); + } + + return 0; +} + +int hinic3_open_close_bond(void *hwdev, u32 bond_en) +{ + struct hinic3_cmd_open_close_bond cmd_open_close_bond; + u16 out_size = sizeof(cmd_open_close_bond); + struct hinic3_nic_io *nic_io = NULL; + int err = 0; + + if (!hwdev) { + pr_err("hwdev is null.\n"); + return -EINVAL; + } + + nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("nic_io is null.\n"); + return -EINVAL; + } + + memset(&cmd_open_close_bond, 0, sizeof(cmd_open_close_bond)); + cmd_open_close_bond.open_close_bond_info.bond_id = HINIC3_INVALID_BOND_ID; + cmd_open_close_bond.open_close_bond_info.open_close_flag = bond_en; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC3_NIC_CMD_BOND_DEV_OPEN_CLOSE, + &cmd_open_close_bond, sizeof(cmd_open_close_bond), + &cmd_open_close_bond, &out_size); + if (err || !out_size || cmd_open_close_bond.head.status) { + nic_err(nic_io->dev_hdl, "Failed to %s bond, err: %d, status: 0x%x, out_size: 0x%x\n", + bond_en == true ? "open" : "close", err, cmd_open_close_bond.head.status, out_size); + return -EFAULT; + } + + if (cmd_open_close_bond.open_close_bond_info.bond_id != HINIC3_INVALID_BOND_ID) { + nic_info(nic_io->dev_hdl, "%s bond success\n", bond_en == true ? "Open" : "Close"); + } + + return 0; +} + +int hinic3_create_bond(void *hwdev, u32 *bond_id) +{ + struct hinic3_cmd_create_bond cmd_create_bond; + u16 out_size = sizeof(cmd_create_bond); + struct hinic3_nic_io *nic_io = NULL; + int err = 0; + + if (!hwdev) { + pr_err("hwdev is null.\n"); + return -EINVAL; + } + + nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("nic_io is null.\n"); + return -EINVAL; + } + + memset(&cmd_create_bond, 0, sizeof(cmd_create_bond)); + cmd_create_bond.create_bond_info.default_param_flag = true; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC3_NIC_CMD_BOND_DEV_CREATE, + &cmd_create_bond, sizeof(cmd_create_bond), + &cmd_create_bond, &out_size); + if (err || !out_size || cmd_create_bond.head.status) { + nic_err(nic_io->dev_hdl, "Failed to create default bond, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cmd_create_bond.head.status, out_size); + return -EFAULT; + } + + if (cmd_create_bond.create_bond_info.bond_id != HINIC3_INVALID_BOND_ID) { + *bond_id = cmd_create_bond.create_bond_info.bond_id; + nic_info(nic_io->dev_hdl, "Create bond success\n"); + } + + return 0; +} + int hinic3_set_ci_table(void *hwdev, struct hinic3_sq_attr *attr) { struct hinic3_cmd_cons_idx_attr cons_idx_attr; @@ -37,6 +153,8 @@ int hinic3_set_ci_table(void *hwdev, struct hinic3_sq_attr *attr) memset(&cons_idx_attr, 0, sizeof(cons_idx_attr)); nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; cons_idx_attr.func_idx = hinic3_global_func_id(hwdev); @@ -152,6 +270,8 @@ int hinic3_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, memset(&mac_info, 0, sizeof(mac_info)); nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) { nic_err(nic_io->dev_hdl, "Invalid VLAN number: %d\n", @@ -183,7 +303,7 @@ int hinic3_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, } EXPORT_SYMBOL(hinic3_del_mac); -int hinic3_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, +int hinic3_update_mac(void *hwdev, const u8 *old_mac, u8 *new_mac, u16 vlan_id, u16 func_id) { struct hinic3_port_mac_update mac_info; @@ -197,6 +317,8 @@ int hinic3_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, memset(&mac_info, 0, sizeof(mac_info)); nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) { nic_err(nic_io->dev_hdl, "Invalid VLAN number: %d\n", @@ -247,6 +369,8 @@ int hinic3_get_default_mac(void *hwdev, u8 *mac_addr) memset(&mac_info, 0, sizeof(mac_info)); nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; mac_info.func_id = hinic3_global_func_id(hwdev); @@ -292,6 +416,45 @@ static int hinic3_config_vlan(struct hinic3_nic_io *nic_io, u8 opcode, return 0; } +#if defined(HAVE_NDO_UDP_TUNNEL_ADD) || defined(HAVE_UDP_TUNNEL_NIC_INFO) +int hinic3_vlxan_port_config(void *hwdev, u16 func_id, u16 port, u8 action) +{ + struct hinic3_cmd_vxlan_port_info vxlan_port_info; + u16 out_size = sizeof(vxlan_port_info); + struct hinic3_nic_io *nic_io = NULL; + int err; + + nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&vxlan_port_info, 0, sizeof(vxlan_port_info)); + vxlan_port_info.opcode = action; + vxlan_port_info.cfg_mode = 0; // other ethtool set + vxlan_port_info.func_id = func_id; + vxlan_port_info.vxlan_port = port; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC3_NIC_CMD_CFG_VXLAN_PORT, + &vxlan_port_info, sizeof(vxlan_port_info), + &vxlan_port_info, &out_size); + if (err || !out_size || vxlan_port_info.msg_head.status) { + if (vxlan_port_info.msg_head.status == 0x2) { + nic_warn(nic_io->dev_hdl, + "Failed to %s vxlan dst port because it has already been set by hinicadm\n", + action == HINIC3_CMD_OP_ADD ? "add" : "delete"); + } else { + nic_err(nic_io->dev_hdl, + "Failed to %s vxlan dst port, err: %d, status: 0x%x, out size: 0x%x\n", + action == HINIC3_CMD_OP_ADD ? "add" : "delete", + err, vxlan_port_info.msg_head.status, out_size); + } + return -EINVAL; + } + + return 0; +} +#endif /* HAVE_NDO_UDP_TUNNEL_ADD || HAVE_UDP_TUNNEL_NIC_INFO */ + int hinic3_add_vlan(void *hwdev, u16 vlan_id, u16 func_id) { struct hinic3_nic_io *nic_io = NULL; @@ -300,6 +463,9 @@ int hinic3_add_vlan(void *hwdev, u16 vlan_id, u16 func_id) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + return hinic3_config_vlan(nic_io, HINIC3_CMD_OP_ADD, vlan_id, func_id); } @@ -311,6 +477,9 @@ int hinic3_del_vlan(void *hwdev, u16 vlan_id, u16 func_id) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + return hinic3_config_vlan(nic_io, HINIC3_CMD_OP_DEL, vlan_id, func_id); } @@ -354,6 +523,9 @@ int hinic3_set_dcb_state(void *hwdev, struct hinic3_dcb_state *dcb_state) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + if (!memcmp(&nic_io->dcb_state, dcb_state, sizeof(nic_io->dcb_state))) return 0; @@ -515,6 +687,8 @@ int hinic3_set_pause_info(void *hwdev, struct nic_pause_config nic_pause) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; nic_cfg = &nic_io->nic_cfg; @@ -540,7 +714,6 @@ int hinic3_set_pause_info(void *hwdev, struct nic_pause_config nic_pause) int hinic3_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause) { - struct hinic3_nic_cfg *nic_cfg = NULL; struct hinic3_nic_io *nic_io = NULL; int err = 0; @@ -548,17 +721,13 @@ int hinic3_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); - nic_cfg = &nic_io->nic_cfg; + if (!nic_io) + return -EINVAL; err = hinic3_cfg_hw_pause(nic_io, HINIC3_CMD_OP_GET, nic_pause); if (err) return err; - if (nic_cfg->pause_set || !nic_pause->auto_neg) { - nic_pause->rx_pause = nic_cfg->nic_pause.rx_pause; - nic_pause->tx_pause = nic_cfg->nic_pause.tx_pause; - } - return 0; } @@ -573,6 +742,8 @@ int hinic3_sync_dcb_state(void *hwdev, u8 op_code, u8 state) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; memset(&dcb_state, 0, sizeof(dcb_state)); @@ -659,44 +830,6 @@ int hinic3_cache_out_qps_res(void *hwdev) return 0; } -int hinic3_get_fpga_phy_port_stats(void *hwdev, struct hinic3_phy_fpga_port_stats *stats) -{ - struct hinic3_port_stats *port_stats = NULL; - struct hinic3_port_stats_info stats_info; - u16 out_size = sizeof(*port_stats); - struct hinic3_nic_io *nic_io = NULL; - int err; - - port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); - if (!port_stats) - return -ENOMEM; - - nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_io) - return -EINVAL; - - memset(&stats_info, 0, sizeof(stats_info)); - - err = l2nic_msg_to_mgmt_sync(hwdev, HINIC3_NIC_CMD_GET_PORT_STAT, - &stats_info, sizeof(stats_info), - port_stats, &out_size); - if (err || !out_size || port_stats->msg_head.status) { - nic_err(nic_io->dev_hdl, - "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\n", - err, port_stats->msg_head.status, out_size); - err = -EIO; - goto out; - } - - memcpy(stats, &port_stats->stats, sizeof(*stats)); - -out: - kfree(port_stats); - - return err; -} -EXPORT_SYMBOL(hinic3_get_fpga_phy_port_stats); - int hinic3_get_vport_stats(void *hwdev, u16 func_id, struct hinic3_vport_stats *stats) { struct hinic3_port_stats_info stats_info; @@ -712,6 +845,8 @@ int hinic3_get_vport_stats(void *hwdev, u16 func_id, struct hinic3_vport_stats * memset(&vport_stats, 0, sizeof(vport_stats)); nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; stats_info.func_id = func_id; @@ -778,6 +913,8 @@ int hinic3_set_port_mtu(void *hwdev, u16 new_mtu) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; if (new_mtu < HINIC3_MIN_MTU_SIZE) { nic_err(nic_io->dev_hdl, @@ -808,6 +945,9 @@ static int nic_feature_nego(void *hwdev, u8 opcode, u64 *s_feature, u16 size) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + memset(&feature_nego, 0, sizeof(feature_nego)); feature_nego.func_id = hinic3_global_func_id(hwdev); feature_nego.opcode = opcode; @@ -829,29 +969,21 @@ static int nic_feature_nego(void *hwdev, u8 opcode, u64 *s_feature, u16 size) return 0; } -static int hinic3_get_bios_pf_bw_limit(void *hwdev, u32 *pf_bw_limit) +static int hinic3_get_bios_pf_bw_tx_limit(void *hwdev, struct hinic3_nic_io *nic_io, u16 func_id, u32 *pf_rate) { - struct hinic3_nic_io *nic_io = NULL; + int err = 0; // default success struct nic_cmd_bios_cfg cfg = {{0}}; u16 out_size = sizeof(cfg); - int err; - if (!hwdev || !pf_bw_limit) - return -EINVAL; - - if (hinic3_func_type(hwdev) == TYPE_VF || !HINIC3_SUPPORT_RATE_LIMIT(hwdev)) - return 0; - - nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); - cfg.bios_cfg.func_id = (u8)hinic3_global_func_id(hwdev); + cfg.bios_cfg.func_id = (u8)func_id; cfg.bios_cfg.func_valid = 1; - cfg.op_code = 0 | NIC_NVM_DATA_PF_SPEED_LIMIT; + cfg.op_code = 0 | NIC_NVM_DATA_PF_TX_SPEED_LIMIT; err = l2nic_msg_to_mgmt_sync(hwdev, HINIC3_NIC_CMD_BIOS_CFG, &cfg, sizeof(cfg), &cfg, &out_size); if (err || !out_size || cfg.head.status) { nic_err(nic_io->dev_hdl, - "Failed to get bios pf bandwidth limit, err: %d, status: 0x%x, out size: 0x%x\n", + "Failed to get bios pf bandwidth tx limit, err: %d, status: 0x%x, out size: 0x%x\n", err, cfg.head.status, out_size); return -EIO; } @@ -861,13 +993,77 @@ static int hinic3_get_bios_pf_bw_limit(void *hwdev, u32 *pf_bw_limit) nic_warn(nic_io->dev_hdl, "Invalid bios configuration data, signature: 0x%x\n", cfg.bios_cfg.signature); - if (cfg.bios_cfg.pf_bw > MAX_LIMIT_BW) { + if (cfg.bios_cfg.pf_tx_bw > MAX_LIMIT_BW) { + nic_err(nic_io->dev_hdl, "Invalid bios cfg pf bandwidth limit: %u\n", + cfg.bios_cfg.pf_tx_bw); + return -EINVAL; + } + + (*pf_rate) = cfg.bios_cfg.pf_tx_bw; + return err; +} + +static int hinic3_get_bios_pf_bw_rx_limit(void *hwdev, struct hinic3_nic_io *nic_io, u16 func_id, u32 *pf_rate) +{ + int err = 0; // default success + struct nic_rx_rate_bios_cfg rx_bios_conf = {{0}}; + u16 out_size = sizeof(rx_bios_conf); + + rx_bios_conf.func_id = (u8)func_id; + rx_bios_conf.op_code = 0; /* 1-save, 0-read */ + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC3_NIC_CMD_RX_RATE_CFG, &rx_bios_conf, sizeof(rx_bios_conf), + &rx_bios_conf, &out_size); + if (rx_bios_conf.msg_head.status == HINIC3_MGMT_CMD_UNSUPPORTED && err == 0) { // Compatible older firmware + nic_warn(nic_io->dev_hdl, "Not support get bios pf bandwidth rx limit\n"); + return 0; + } else if (err || !out_size || rx_bios_conf.msg_head.status) { + nic_err(nic_io->dev_hdl, + "Failed to get bios pf bandwidth rx limit, err: %d, status: 0x%x, out size: 0x%x\n", + err, rx_bios_conf.msg_head.status, out_size); + return -EIO; + } + if (rx_bios_conf.rx_rate_limit > MAX_LIMIT_BW) { nic_err(nic_io->dev_hdl, "Invalid bios cfg pf bandwidth limit: %u\n", - cfg.bios_cfg.pf_bw); + rx_bios_conf.rx_rate_limit); return -EINVAL; } - *pf_bw_limit = cfg.bios_cfg.pf_bw; + (*pf_rate) = rx_bios_conf.rx_rate_limit; + return err; +} + +static int hinic3_get_bios_pf_bw_limit(void *hwdev, u32 *pf_bw_limit, u8 direct) +{ + struct hinic3_nic_io *nic_io = NULL; + u32 pf_rate = 0; + int err = 0; + u16 func_id; + func_id = hinic3_global_func_id(hwdev); + + if (!hwdev || !pf_bw_limit) + return -EINVAL; + + if (hinic3_func_type(hwdev) == TYPE_VF || !HINIC3_SUPPORT_RATE_LIMIT(hwdev)) + return 0; + + nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if (direct == HINIC3_NIC_TX) { + err = hinic3_get_bios_pf_bw_tx_limit(hwdev, nic_io, func_id, &pf_rate); + } else if (direct == HINIC3_NIC_RX) { + err = hinic3_get_bios_pf_bw_rx_limit(hwdev, nic_io, func_id, &pf_rate); + } + + if (err != 0) + return err; + + if (pf_rate > MAX_LIMIT_BW) { + nic_err(nic_io->dev_hdl, "Invalid bios cfg pf bandwidth limit: %u\n", pf_rate); + return -EINVAL; + } + *pf_bw_limit = pf_rate; return 0; } @@ -876,8 +1072,9 @@ int hinic3_set_pf_rate(void *hwdev, u8 speed_level) { struct hinic3_cmd_tx_rate_cfg rate_cfg = {{0}}; struct hinic3_nic_io *nic_io = NULL; + u32 rate_limit; u16 out_size = sizeof(rate_cfg); - u32 pf_rate; + u32 pf_rate = 0; int err; u32 speed_convert[PORT_SPEED_UNKNOWN] = { 0, 10, 100, 1000, 10000, 25000, 40000, 50000, 100000, 200000 @@ -892,19 +1089,21 @@ int hinic3_set_pf_rate(void *hwdev, u8 speed_level) return -EINVAL; } - if (nic_io->nic_cfg.pf_bw_limit == MAX_LIMIT_BW) { - pf_rate = 0; - } else { + rate_limit = (nic_io->direct == HINIC3_NIC_TX) ? + nic_io->nic_cfg.pf_bw_tx_limit : nic_io->nic_cfg.pf_bw_rx_limit; + + if (rate_limit != MAX_LIMIT_BW) { /* divided by 100 to convert to percentage */ - pf_rate = (speed_convert[speed_level] / 100) * nic_io->nic_cfg.pf_bw_limit; + pf_rate = (speed_convert[speed_level] / 100) * rate_limit; /* bandwidth limit is very small but not unlimit in this case */ - if (pf_rate == 0 && speed_level != PORT_SPEED_NOT_SET) + if ((pf_rate == 0) && (speed_level != PORT_SPEED_NOT_SET)) pf_rate = 1; } rate_cfg.func_id = hinic3_global_func_id(hwdev); rate_cfg.min_rate = 0; rate_cfg.max_rate = pf_rate; + rate_cfg.direct = nic_io->direct; err = l2nic_msg_to_mgmt_sync(hwdev, HINIC3_NIC_CMD_SET_MAX_MIN_RATE, &rate_cfg, sizeof(rate_cfg), &rate_cfg, &out_size); @@ -927,6 +1126,8 @@ int hinic3_set_nic_feature_to_hw(void *hwdev) struct hinic3_nic_io *nic_io = NULL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; return nic_feature_nego(hwdev, HINIC3_CMD_OP_SET, &nic_io->feature_cap, 1); } @@ -936,6 +1137,8 @@ u64 hinic3_get_feature_cap(void *hwdev) struct hinic3_nic_io *nic_io = NULL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return 0; return nic_io->feature_cap; } @@ -945,6 +1148,9 @@ void hinic3_update_nic_feature(void *hwdev, u64 s_feature) struct hinic3_nic_io *nic_io = NULL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return; + nic_io->feature_cap = s_feature; nic_info(nic_io->dev_hdl, "Update nic feature to 0x%llx\n", nic_io->feature_cap); @@ -978,6 +1184,7 @@ static int hinic3_init_nic_io(void *hwdev, void *pcidev_hdl, void *dev_hdl, (*nic_io)->nic_cfg.rt_cmd.mpu_send_sfp_abs = false; (*nic_io)->nic_cfg.rt_cmd.mpu_send_sfp_info = false; + (*nic_io)->nic_cfg.rt_cmd_ext.mpu_send_xsfp_tlv_info = false; return 0; } @@ -994,11 +1201,14 @@ int hinic3_init_nic_hwdev(void *hwdev, void *pcidev_hdl, void *dev_hdl, { struct hinic3_nic_io *nic_io = NULL; int err; + int is_in_kexec = vram_get_kexec_flag(); err = hinic3_init_nic_io(hwdev, pcidev_hdl, dev_hdl, &nic_io); if (err) return err; + nic_io->rx_buff_len = rx_buff_len; + err = hinic3_register_service_adapter(hwdev, nic_io, SERVICE_T_NIC); if (err) { nic_err(nic_io->dev_hdl, "Failed to register service adapter\n"); @@ -1011,10 +1221,12 @@ int hinic3_init_nic_hwdev(void *hwdev, void *pcidev_hdl, void *dev_hdl, goto set_used_state_err; } - err = hinic3_init_function_table(nic_io); - if (err) { - nic_err(nic_io->dev_hdl, "Failed to init function table\n"); - goto err_out; + if (is_in_kexec == 0) { + err = hinic3_init_function_table(nic_io); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to init function table\n"); + goto err_out; + } } err = hinic3_get_nic_feature_from_hw(hwdev, &nic_io->feature_cap, 1); @@ -1025,9 +1237,19 @@ int hinic3_init_nic_hwdev(void *hwdev, void *pcidev_hdl, void *dev_hdl, sdk_info(dev_hdl, "nic features: 0x%llx\n", nic_io->feature_cap); - err = hinic3_get_bios_pf_bw_limit(hwdev, &nic_io->nic_cfg.pf_bw_limit); - if (err) { - nic_err(nic_io->dev_hdl, "Failed to get pf bandwidth limit\n"); + err = hinic3_get_bios_pf_bw_limit(hwdev, + &nic_io->nic_cfg.pf_bw_tx_limit, + HINIC3_NIC_TX); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to get pf tx bandwidth limit\n"); + goto err_out; + } + + err = hinic3_get_bios_pf_bw_limit(hwdev, + &nic_io->nic_cfg.pf_bw_rx_limit, + HINIC3_NIC_RX); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to get pf rx bandwidth limit\n"); goto err_out; } @@ -1037,12 +1259,13 @@ int hinic3_init_nic_hwdev(void *hwdev, void *pcidev_hdl, void *dev_hdl, goto err_out; } - nic_io->rx_buff_len = rx_buff_len; - return 0; err_out: - hinic3_set_func_svc_used_state(hwdev, SVC_T_NIC, 0, HINIC3_CHANNEL_NIC); + if (hinic3_set_func_svc_used_state(hwdev, SVC_T_NIC, 0, + HINIC3_CHANNEL_NIC) != 0) { + nic_err(nic_io->dev_hdl, "Failed to set function svc used state\n"); + } set_used_state_err: hinic3_unregister_service_adapter(hwdev, SERVICE_T_NIC); @@ -1092,6 +1315,8 @@ int hinic3_force_drop_tx_pkt(void *hwdev) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; memset(&pkt_drop, 0, sizeof(pkt_drop)); pkt_drop.port = hinic3_physical_port_id(hwdev); @@ -1120,6 +1345,8 @@ int hinic3_set_rx_mode(void *hwdev, u32 enable) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; memset(&rx_mode_cfg, 0, sizeof(rx_mode_cfg)); rx_mode_cfg.func_id = hinic3_global_func_id(hwdev); @@ -1148,6 +1375,8 @@ int hinic3_set_rx_vlan_offload(void *hwdev, u8 en) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; memset(&vlan_cfg, 0, sizeof(vlan_cfg)); vlan_cfg.func_id = hinic3_global_func_id(hwdev); @@ -1176,6 +1405,9 @@ int hinic3_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); if (!nic_io->vf_infos || is_zero_ether_addr(vf_info->drv_mac_addr)) return 0; @@ -1215,6 +1447,8 @@ static int hinic3_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; memset(&lro_cfg, 0, sizeof(lro_cfg)); lro_cfg.func_id = hinic3_global_func_id(hwdev); @@ -1246,6 +1480,8 @@ static int hinic3_set_rx_lro_timer(void *hwdev, u32 timer_value) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; memset(&lro_timer, 0, sizeof(lro_timer)); lro_timer.opcode = HINIC3_CMD_OP_SET; @@ -1278,6 +1514,8 @@ int hinic3_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, ipv6_en = lro_en ? 1 : 0; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; nic_info(nic_io->dev_hdl, "Set LRO max coalesce packet size to %uK\n", lro_max_pkt_len); @@ -1306,6 +1544,8 @@ int hinic3_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; memset(&vlan_filter, 0, sizeof(vlan_filter)); vlan_filter.func_id = hinic3_global_func_id(hwdev); @@ -1325,7 +1565,6 @@ int hinic3_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl) int hinic3_set_func_capture_en(void *hwdev, u16 func_id, bool cap_en) { - // struct hinic_hwdev *dev = hwdev; struct nic_cmd_capture_info cap_info = {{0}}; u16 out_size = sizeof(cap_info); int err; @@ -1334,7 +1573,6 @@ int hinic3_set_func_capture_en(void *hwdev, u16 func_id, bool cap_en) return -EINVAL; /* 2 function capture types */ - // cap_info.op_type = UP_UCAPTURE_OP_TYPE_FUNC; cap_info.is_en_trx = cap_en; cap_info.func_port = func_id; @@ -1359,6 +1597,8 @@ int hinic3_add_tcam_rule(void *hwdev, struct nic_tcam_cfg_rule *tcam_rule) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; if (tcam_rule->index >= HINIC3_MAX_TCAM_RULES_NUM) { nic_err(nic_io->dev_hdl, "Tcam rules num to add is invalid\n"); return -EINVAL; @@ -1394,6 +1634,8 @@ int hinic3_del_tcam_rule(void *hwdev, u32 index) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; if (index >= HINIC3_MAX_TCAM_RULES_NUM) { nic_err(nic_io->dev_hdl, "Tcam rules num to del is invalid\n"); return -EINVAL; @@ -1443,6 +1685,8 @@ static int hinic3_mgmt_tcam_block(void *hwdev, u8 alloc_en, u16 *index) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; memset(&tcam_block_info, 0, sizeof(struct nic_cmd_ctrl_tcam_block_out)); @@ -1488,6 +1732,8 @@ int hinic3_set_fdir_tcam_rule_filter(void *hwdev, bool enable) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; memset(&port_tcam_cmd, 0, sizeof(port_tcam_cmd)); port_tcam_cmd.func_id = hinic3_global_func_id(hwdev); port_tcam_cmd.tcam_enable = (u8)enable; @@ -1516,6 +1762,9 @@ int hinic3_flush_tcam_rule(void *hwdev) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + memset(&tcam_flush, 0, sizeof(struct nic_cmd_flush_tcam_rules)); tcam_flush.func_id = hinic3_global_func_id(hwdev); @@ -1546,6 +1795,9 @@ int hinic3_get_rxq_hw_info(void *hwdev, struct rxq_check_info *rxq_info, u16 num return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + cmd_buf = hinic3_alloc_cmd_buf(hwdev); if (!cmd_buf) { nic_err(nic_io->dev_hdl, "Failed to allocate cmd_buf.\n"); @@ -1588,8 +1840,10 @@ int hinic3_pf_set_vf_link_state(void *hwdev, bool vf_link_forced, bool link_stat return -EINVAL; } - if (hinic3_func_type(hwdev) == TYPE_VF) + if (hinic3_func_type(hwdev) == TYPE_VF) { + pr_err("VF are not supported to set link state.\n"); return -EINVAL; + } nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); if (!nic_io) { @@ -1606,3 +1860,35 @@ int hinic3_pf_set_vf_link_state(void *hwdev, bool vf_link_forced, bool link_stat return 0; } EXPORT_SYMBOL(hinic3_pf_set_vf_link_state); + +int hinic3_get_outband_vlan_cfg(void *hwdev, u16 *outband_default_vid) +{ + struct hinic3_outband_cfg_info outband_cfg_info; + u16 out_size = sizeof(outband_cfg_info); + struct hinic3_nic_io *nic_io = NULL; + int err; + + if (!hwdev || !outband_default_vid) + return -EINVAL; + + memset(&outband_cfg_info, 0, sizeof(outband_cfg_info)); + + nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC3_NIC_CMD_GET_OUTBAND_CFG, + &outband_cfg_info, + sizeof(outband_cfg_info), + &outband_cfg_info, &out_size); + if (err || !out_size || outband_cfg_info.msg_head.status) { + nic_err(nic_io->dev_hdl, + "Failed to get outband cfg, err: %d, status: 0x%x, out size: 0x%x\n", + err, outband_cfg_info.msg_head.status, out_size); + return -EINVAL; + } + + *outband_default_vid = outband_cfg_info.outband_default_vid; + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h index ee0587cc3faaf23fb7198f4d4ced8f320903bb12..0fe7b9f337c65469832848f191a7d2dd24d29bd8 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h @@ -7,9 +7,9 @@ #include #include -#include "hinic3_mgmt_interface.h" +#include "nic_mpu_cmd_defs.h" #include "mag_mpu_cmd.h" -#include "mag_cmd.h" +#include "mag_mpu_cmd_defs.h" #define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1) #define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1) @@ -54,6 +54,8 @@ #define MAX_LIMIT_BW 100 +#define HINIC3_INVALID_BOND_ID 0xffffffff + enum hinic3_valid_link_settings { HILINK_LINK_SET_SPEED = 0x1, HILINK_LINK_SET_AUTONEG = 0x2, @@ -67,6 +69,11 @@ enum hinic3_link_follow_status { HINIC3_LINK_FOLLOW_STATUS_MAX, }; +enum hinic3_nic_pf_direct { + HINIC3_NIC_RX = 0, + HINIC3_NIC_TX, +}; + struct hinic3_link_ksettings { u32 valid_bitmap; u8 speed; /* enum nic_speed_level */ @@ -150,8 +157,12 @@ struct nic_port_info { u8 duplex; u8 speed; u8 fec; + u8 lanes; + u8 rsvd; u32 supported_mode; u32 advertised_mode; + u32 supported_fec_mode; + u32 bond_speed; }; struct nic_pause_config { @@ -176,11 +187,17 @@ struct hinic3_rxq_hw { #define MODULE_TYPE_QSFP28 0x11 #define MODULE_TYPE_QSFP 0x0C #define MODULE_TYPE_QSFP_PLUS 0x0D +#define MODULE_TYPE_DSFP 0x1B +#define MODULE_TYPE_QSFP_CMIS 0x1E #define TCAM_IP_TYPE_MASK 0x1 #define TCAM_TUNNEL_TYPE_MASK 0xF #define TCAM_FUNC_ID_MASK 0x7FFF +int hinic3_delete_bond(void *hwdev); +int hinic3_open_close_bond(void *hwdev, u32 bond_en); +int hinic3_create_bond(void *hwdev, u32 *bond_id); + int hinic3_add_tcam_rule(void *hwdev, struct nic_tcam_cfg_rule *tcam_rule); int hinic3_del_tcam_rule(void *hwdev, u32 index); @@ -201,7 +218,7 @@ int hinic3_flush_tcam_rule(void *hwdev); * @retval zero: success * @retval non-zero: failure */ -int hinic3_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, +int hinic3_update_mac(void *hwdev, const u8 *old_mac, u8 *new_mac, u16 vlan_id, u16 func_id); /* * @@ -333,7 +350,7 @@ int hinic3_kill_vf_vlan(void *hwdev, int vf_id); * @retval zero: success * @retval non-zero: failure */ -int hinic3_set_vf_mac(void *hwdev, int vf_id, unsigned char *mac_addr); +int hinic3_set_vf_mac(void *hwdev, int vf_id, const unsigned char *mac_addr); /* * * @brief hinic3_vf_info_vlanprio - get vf vlan priority @@ -508,7 +525,14 @@ int hinic3_rss_get_indir_tbl(void *hwdev, u32 *indir_table); */ int hinic3_get_phy_port_stats(void *hwdev, struct mag_cmd_port_stats *stats); -int hinic3_get_fpga_phy_port_stats(void *hwdev, struct hinic3_phy_fpga_port_stats *stats); +/* * + * @brief hinic3_get_phy_rsfec_stats - get rsfec stats + * @param hwdev: device pointer to hwdev + * @param stats: rsfec(Reed-Solomon Forward Error Correction) stats + * @retval zero: success + * @retval non-zero: failure + */ +int hinic3_get_phy_rsfec_stats(void *hwdev, struct mag_cmd_rsfec_stats *stats); int hinic3_set_port_funcs_state(void *hwdev, bool enable); @@ -544,6 +568,8 @@ int hinic3_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl); void hinic3_clear_vfs_info(void *hwdev); +int hinic3_notify_vf_outband_cfg(void *hwdev, u16 func_id, u16 vlan_id); + int hinic3_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id); int hinic3_set_led_status(void *hwdev, enum mag_led_type type, @@ -563,10 +589,13 @@ int hinic3_set_autoneg(void *hwdev, bool enable); int hinic3_get_sfp_type(void *hwdev, u8 *sfp_type, u8 *sfp_type_ext); int hinic3_get_sfp_eeprom(void *hwdev, u8 *data, u32 len); +int hinic3_get_tlv_xsfp_eeprom(void *hwdev, u8 *data, u32 len); bool hinic3_if_sfp_absent(void *hwdev); int hinic3_get_sfp_info(void *hwdev, struct mag_cmd_get_xsfp_info *sfp_info); - +int hinic3_get_sfp_tlv_info(void *hwdev, + struct drv_mag_cmd_get_xsfp_tlv_rsp *sfp_tlv_info, + const struct mag_cmd_get_xsfp_tlv_req *sfp_tlv_info_req); /* * * @brief hinic3_set_nic_feature_to_hw - sync nic feature to hardware * @param hwdev: device pointer to hwdev @@ -618,4 +647,18 @@ int hinic3_set_pf_rate(void *hwdev, u8 speed_level); int hinic3_get_rxq_hw_info(void *hwdev, struct rxq_check_info *rxq_info, u16 num_qps, u16 wqe_type); +#if defined(HAVE_NDO_UDP_TUNNEL_ADD) || defined(HAVE_UDP_TUNNEL_NIC_INFO) +/* * + * @brief hinic3_vlxan_port_config - add/del vxlan dst port + * @param hwdev: device pointer to hwdev + * @param func_id: function id + * @param port: vxlan dst port + * @param action: add or del, del will set to default value (0x12B5) + * @retval zero: success + * @retval non-zero: failure + */ +int hinic3_vlxan_port_config(void *hwdev, u16 func_id, u16 port, u8 action); +#endif /* HAVE_NDO_UDP_TUNNEL_ADD || HAVE_UDP_TUNNEL_NIC_INFO */ + +int hinic3_get_outband_vlan_cfg(void *hwdev, u16 *outband_default_vid); #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg_vf.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg_vf.c index b46cf78ce9e34685339ce4e27c94cc1434b7f80d..654673f527ecca9cc90dc9fa2b05284114dfdf6c 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg_vf.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg_vf.c @@ -21,7 +21,8 @@ #include "hinic3_nic_cfg.h" #include "hinic3_srv_nic.h" #include "hinic3_nic.h" -#include "hinic3_nic_cmd.h" +#include "nic_mpu_cmd.h" +#include "nic_npu_cmd.h" /*lint -e806*/ static unsigned char set_vf_link_state; @@ -116,7 +117,7 @@ int hinic3_cfg_vf_vlan(struct hinic3_nic_io *nic_io, u8 opcode, u16 vid, err = hinic3_set_vlan_ctx(nic_io, glb_func_id, vlan_tag, NIC_CONFIG_ALL_QUEUE_VLAN_CTX, opcode == HINIC3_CMD_OP_ADD); - if (err) { + if (err != 0) { nic_err(nic_io->dev_hdl, "Failed to set VF %d vlan ctx, err: %d\n", HW_VF_ID_TO_OS(vf_id), err); @@ -137,22 +138,21 @@ int hinic3_cfg_vf_vlan(struct hinic3_nic_io *nic_io, u8 opcode, u16 vid, /* this function just be called by hinic3_ndo_set_vf_mac, * others are not permitted. */ -int hinic3_set_vf_mac(void *hwdev, int vf_id, unsigned char *mac_addr) +int hinic3_set_vf_mac(void *hwdev, int vf_id, const unsigned char *mac_addr) { - struct vf_data_storage *vf_info; - struct hinic3_nic_io *nic_io; + struct vf_data_storage *vf_info = NULL; + struct hinic3_nic_io *nic_io = NULL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); -#ifndef __VMWARE__ + /* duplicate request, so just return success */ if (ether_addr_equal(vf_info->user_mac_addr, mac_addr)) return 0; -#else - if (ether_addr_equal(vf_info->user_mac_addr, mac_addr)) - return 0; -#endif ether_addr_copy(vf_info->user_mac_addr, mac_addr); return 0; @@ -160,13 +160,15 @@ int hinic3_set_vf_mac(void *hwdev, int vf_id, unsigned char *mac_addr) int hinic3_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos) { - struct hinic3_nic_io *nic_io; + struct hinic3_nic_io *nic_io = NULL; int err; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; err = hinic3_cfg_vf_vlan(nic_io, HINIC3_CMD_OP_ADD, vlan, qos, vf_id); - if (err) + if (err != 0) return err; nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = vlan; @@ -180,17 +182,20 @@ int hinic3_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos) int hinic3_kill_vf_vlan(void *hwdev, int vf_id) { - struct vf_data_storage *vf_infos; - struct hinic3_nic_io *nic_io; + struct vf_data_storage *vf_infos = NULL; + struct hinic3_nic_io *nic_io = NULL; int err; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + vf_infos = nic_io->vf_infos; err = hinic3_cfg_vf_vlan(nic_io, HINIC3_CMD_OP_DEL, vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan, vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos, vf_id); - if (err) + if (err != 0) return err; nic_info(nic_io->dev_hdl, "Remove VLAN %u on VF %d\n", @@ -205,11 +210,13 @@ int hinic3_kill_vf_vlan(void *hwdev, int vf_id) u16 hinic3_vf_info_vlanprio(void *hwdev, int vf_id) { - struct hinic3_nic_io *nic_io; + struct hinic3_nic_io *nic_io = NULL; u16 pf_vlan, vlanprio; u8 pf_qos; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return 0; pf_vlan = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan; pf_qos = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos; @@ -222,9 +229,14 @@ int hinic3_set_vf_link_state(void *hwdev, u16 vf_id, int link) { struct hinic3_nic_io *nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); - struct vf_data_storage *vf_infos = nic_io->vf_infos; + struct vf_data_storage *vf_infos = NULL; u8 link_status = 0; + if (!nic_io) + return -EINVAL; + + vf_infos = nic_io->vf_infos; + switch (link) { case HINIC3_IFLA_VF_LINK_STATE_AUTO: vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false; @@ -264,6 +276,9 @@ int hinic3_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + vf_infos = nic_io->vf_infos; memset(&spoofchk_cfg, 0, sizeof(spoofchk_cfg)); @@ -288,9 +303,11 @@ int hinic3_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk) bool hinic3_vf_info_spoofchk(void *hwdev, int vf_id) { - struct hinic3_nic_io *nic_io; + struct hinic3_nic_io *nic_io = NULL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return false; return nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk; } @@ -304,7 +321,7 @@ int hinic3_set_vf_trust(void *hwdev, u16 vf_id, bool trust) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); - if (vf_id > nic_io->max_vfs) + if (!nic_io || vf_id > nic_io->max_vfs) return -EINVAL; nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].trust = trust; @@ -317,11 +334,11 @@ bool hinic3_get_vf_trust(void *hwdev, int vf_id) struct hinic3_nic_io *nic_io = NULL; if (!hwdev) - return -EINVAL; + return false; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); - if (vf_id > nic_io->max_vfs) - return -EINVAL; + if (!nic_io || vf_id > nic_io->max_vfs) + return false; return nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].trust; } @@ -339,6 +356,7 @@ static int hinic3_set_vf_tx_rate_max_min(struct hinic3_nic_io *nic_io, rate_cfg.func_id = hinic3_glb_pf_vf_offset(nic_io->hwdev) + vf_id; rate_cfg.max_rate = max_rate; rate_cfg.min_rate = min_rate; + rate_cfg.direct = HINIC3_NIC_TX; err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, HINIC3_NIC_CMD_SET_MAX_MIN_RATE, &rate_cfg, sizeof(rate_cfg), &rate_cfg, @@ -359,13 +377,16 @@ int hinic3_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate) int err; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + if (!HINIC3_SUPPORT_RATE_LIMIT(hwdev)) { nic_err(nic_io->dev_hdl, "Current function doesn't support to set vf rate limit\n"); return -EOPNOTSUPP; } err = hinic3_set_vf_tx_rate_max_min(nic_io, vf_id, max_rate, min_rate); - if (err) + if (err != 0) return err; nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate; @@ -376,12 +397,16 @@ int hinic3_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate) void hinic3_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi) { - struct vf_data_storage *vfinfo; - struct hinic3_nic_io *nic_io; + struct vf_data_storage *vfinfo = NULL; + struct hinic3_nic_io *nic_io = NULL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return; vfinfo = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); + if (!vfinfo) + return; ivi->vf = HW_VF_ID_TO_OS(vf_id); ether_addr_copy(ivi->mac, vfinfo->user_mac_addr); @@ -455,12 +480,12 @@ static int vf_func_register(struct hinic3_nic_io *nic_io) err = hinic3_register_vf_mbox_cb(nic_io->hwdev, HINIC3_MOD_L2NIC, nic_io->hwdev, hinic3_vf_event_handler); - if (err) + if (err != 0) return err; err = hinic3_register_vf_mbox_cb(nic_io->hwdev, HINIC3_MOD_HILINK, nic_io->hwdev, hinic3_vf_mag_event_handler); - if (err) + if (err != 0) goto reg_hilink_err; memset(®ister_info, 0, sizeof(register_info)); @@ -472,6 +497,12 @@ static int vf_func_register(struct hinic3_nic_io *nic_io) ®ister_info, &out_size, 0, HINIC3_CHANNEL_NIC); if (err || !out_size || register_info.msg_head.status) { + if (hinic3_is_slave_host(nic_io->hwdev)) { + nic_warn(nic_io->dev_hdl, + "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n", + err, register_info.msg_head.status, out_size); + return 0; + } nic_err(nic_io->dev_hdl, "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n", err, register_info.msg_head.status, out_size); err = -EIO; @@ -506,18 +537,18 @@ static int pf_init_vf_infos(struct hinic3_nic_io *nic_io) for (i = 0; i < nic_io->max_vfs; i++) { err = hinic3_init_vf_infos(nic_io, i); - if (err) + if (err != 0) goto init_vf_infos_err; } err = hinic3_register_pf_mbox_cb(nic_io->hwdev, HINIC3_MOD_L2NIC, nic_io->hwdev, hinic3_pf_mbox_handler); - if (err) + if (err != 0) goto register_pf_mbox_cb_err; err = hinic3_register_pf_mbox_cb(nic_io->hwdev, HINIC3_MOD_HILINK, nic_io->hwdev, hinic3_pf_mag_mbox_handler); - if (err) + if (err != 0) goto register_pf_mag_mbox_cb_err; return 0; @@ -540,16 +571,16 @@ int hinic3_vf_func_init(struct hinic3_nic_io *nic_io) err = hinic3_register_mgmt_msg_cb(nic_io->hwdev, HINIC3_MOD_L2NIC, nic_io->hwdev, hinic3_pf_event_handler); - if (err) + if (err != 0) return err; err = hinic3_register_mgmt_msg_cb(nic_io->hwdev, HINIC3_MOD_HILINK, nic_io->hwdev, hinic3_pf_mag_event_handler); - if (err) + if (err != 0) goto register_mgmt_msg_cb_err; err = pf_init_vf_infos(nic_io); - if (err) + if (err != 0) goto pf_init_vf_infos_err; return 0; @@ -576,9 +607,15 @@ void hinic3_vf_func_free(struct hinic3_nic_io *nic_io) &unregister, sizeof(unregister), &unregister, &out_size, 0, HINIC3_CHANNEL_NIC); - if (err || !out_size || unregister.msg_head.status) - nic_err(nic_io->dev_hdl, "Failed to unregister VF, err: %d, status: 0x%x, out_size: 0x%x\n", - err, unregister.msg_head.status, out_size); + if (err || !out_size || unregister.msg_head.status) { + if (hinic3_is_slave_host(nic_io->hwdev)) + nic_info(nic_io->dev_hdl, + "vRoCE VF notify PF unsuccessful is allowed"); + else + nic_err(nic_io->dev_hdl, + "Failed to unregister VF, err: %d, status: 0x%x, out_size: 0x%x\n", + err, unregister.msg_head.status, out_size); + } hinic3_unregister_vf_mbox_cb(nic_io->hwdev, HINIC3_MOD_L2NIC); } else { @@ -587,6 +624,7 @@ void hinic3_vf_func_free(struct hinic3_nic_io *nic_io) hinic3_unregister_pf_mbox_cb(nic_io->hwdev, HINIC3_MOD_L2NIC); hinic3_clear_vfs_info(nic_io->hwdev); kfree(nic_io->vf_infos); + nic_io->vf_infos = NULL; } hinic3_unregister_mgmt_msg_cb(nic_io->hwdev, HINIC3_MOD_HILINK); hinic3_unregister_mgmt_msg_cb(nic_io->hwdev, HINIC3_MOD_L2NIC); @@ -595,11 +633,15 @@ void hinic3_vf_func_free(struct hinic3_nic_io *nic_io) static void clear_vf_infos(void *hwdev, u16 vf_id) { - struct vf_data_storage *vf_infos; - struct hinic3_nic_io *nic_io; + struct vf_data_storage *vf_infos = NULL; + struct hinic3_nic_io *nic_io = NULL; u16 func_id; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("nic_io is NULL\n"); + return; + } func_id = hinic3_glb_pf_vf_offset(hwdev) + vf_id; vf_infos = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); @@ -628,10 +670,57 @@ static void clear_vf_infos(void *hwdev, u16 vf_id) void hinic3_clear_vfs_info(void *hwdev) { + u16 i; struct hinic3_nic_io *nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); - u16 i; + if (!nic_io) { + pr_err("nic_io is NULL\n"); + return; + } for (i = 0; i < nic_io->max_vfs; i++) clear_vf_infos(hwdev, OS_VF_ID_TO_HW(i)); } + +int hinic3_notify_vf_outband_cfg(void *hwdev, u16 func_id, u16 vlan_id) +{ + int err = 0; + struct hinic3_outband_cfg_info outband_cfg_info; + struct vf_data_storage *vf_infos = NULL; + u16 out_size = sizeof(outband_cfg_info); + u16 vf_id; + struct hinic3_nic_io *nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("nic_io is NULL\n"); + return 0; + } + + vf_id = func_id - hinic3_glb_pf_vf_offset(nic_io->hwdev); + vf_infos = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); + + memset(&outband_cfg_info, 0, sizeof(outband_cfg_info)); + if (vf_infos->registered) { + outband_cfg_info.func_id = func_id; + outband_cfg_info.outband_default_vid = vlan_id; + err = hinic3_mbox_to_vf_no_ack(nic_io->hwdev, vf_id, + HINIC3_MOD_L2NIC, + HINIC3_NIC_CMD_OUTBAND_CFG_NOTICE, + &outband_cfg_info, + sizeof(outband_cfg_info), + &outband_cfg_info, &out_size, + HINIC3_CHANNEL_NIC); + if (err == MBOX_ERRCODE_UNKNOWN_DES_FUNC) { + nic_warn(nic_io->dev_hdl, "VF%d not initialized, disconnect it\n", + HW_VF_ID_TO_OS(vf_id)); + hinic3_unregister_vf(nic_io, vf_id); + return 0; + } + if (err || !out_size || outband_cfg_info.msg_head.status) + nic_err(nic_io->dev_hdl, + "outband cfg event to VF %d failed, err: %d, status: 0x%x, out_size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, + outband_cfg_info.msg_head.status, out_size); + } + + return err; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c index 17d48c4d6e51c4d1483f9821f56533367a2bd716..2878f66eed6e5ca7f5617c43f3ffbbded71645b2 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c @@ -30,6 +30,9 @@ int hinic3_dbg_get_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, } nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + if (q_id >= nic_io->num_qps) { pr_err("q_id[%u] > num_qps_cfg[%u].\n", q_id, nic_io->num_qps); return -EINVAL; @@ -69,6 +72,9 @@ int hinic3_dbg_get_sq_info(void *hwdev, u16 q_id, struct nic_sq_info *sq_info, } nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + if (q_id >= nic_io->num_qps) { nic_err(nic_io->dev_hdl, "Input queue id(%u) is larger than the actual queue number\n", q_id); @@ -82,6 +88,8 @@ int hinic3_dbg_get_sq_info(void *hwdev, u16 q_id, struct nic_sq_info *sq_info, } sq = &nic_io->sq[q_id]; + if (!sq) + return -EINVAL; sq_info->q_id = q_id; sq_info->pi = hinic3_get_sq_local_pi(sq); @@ -112,6 +120,9 @@ int hinic3_dbg_get_rq_info(void *hwdev, u16 q_id, struct nic_rq_info *rq_info, } nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + if (q_id >= nic_io->num_qps) { nic_err(nic_io->dev_hdl, "Input queue id(%u) is larger than the actual queue number\n", q_id); @@ -125,6 +136,8 @@ int hinic3_dbg_get_rq_info(void *hwdev, u16 q_id, struct nic_rq_info *rq_info, } rq = &nic_io->rq[q_id]; + if (!rq) + return -EINVAL; rq_info->q_id = q_id; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h index 800fa73613c86b60adf3a46eacb7412cb3c58340..137098bafd5c7b59bc69c09a02b0c10c5c3d5d36 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h @@ -15,9 +15,10 @@ #include "hinic3_tx.h" #include "hinic3_rx.h" #include "hinic3_dcb.h" +#include "vram_common.h" #define HINIC3_NIC_DRV_NAME "hinic3" -#define HINIC3_NIC_DRV_VERSION "15.17.1.1" +#define HINIC3_NIC_DRV_VERSION "17.7.8.1" #define HINIC3_FUNC_IS_VF(hwdev) (hinic3_func_type(hwdev) == TYPE_VF) @@ -25,6 +26,10 @@ #define HINIC3_MODERATONE_DELAY HZ #define LP_PKT_CNT 64 +#define LP_PKT_LEN 60 + +#define NAPI_IS_REGIN 1 +#define NAPI_NOT_REGIN 0 enum hinic3_flags { HINIC3_INTF_UP, @@ -91,6 +96,8 @@ struct hinic3_irq { u16 rsvd1; u32 irq_id; /* The IRQ number from OS */ + u32 napi_reign; + char irq_name[IFNAMSIZ + 16]; struct napi_struct napi; cpumask_t affinity_mask; @@ -123,6 +130,41 @@ struct hinic3_dyna_txrxq_params { struct hinic3_dyna_txq_res *txqs_res; struct hinic3_dyna_rxq_res *rxqs_res; struct hinic3_irq *irq_cfg; + char irq_cfg_vram_name[VRAM_NAME_MAX_LEN]; +}; + +struct hinic3_flush_rq { + union { + struct { +#if defined(BYTE_ORDER) && (BYTE_ORDER == BIG_ENDIAN) + u32 lb_proc : 1; + u32 rsvd : 10; + u32 rq_id : 8; + u32 func_id : 13; +#else + u32 func_id : 13; + u32 rq_id : 8; + u32 rsvd : 10; + u32 lb_proc : 1; +#endif + } bs; + u32 value; + } dw; + + union { + struct { +#if defined(BYTE_ORDER) && (BYTE_ORDER == BIG_ENDIAN) + u32 rsvd2 : 2; + u32 src_chnl : 12; + u32 pkt_len : 18; +#else + u32 pkt_len : 18; + u32 src_chnl : 12; + u32 rsvd2 : 2; +#endif + } bs; + u32 value; + } lb_info0; /* loop back information, used by uCode */ }; #define HINIC3_NIC_STATS_INC(nic_dev, field) \ @@ -147,6 +189,10 @@ struct hinic3_nic_stats { #endif }; +struct hinic3_nic_vport_stats { + u64 rx_discard_vport; +}; + #define HINIC3_TCAM_DYNAMIC_BLOCK_SIZE 16 #define HINIC3_MAX_TCAM_FILTERS 512 @@ -185,104 +231,123 @@ struct hinic3_tcam_info { struct hinic3_tcam_dynamic_block_info tcam_dynamic_info; }; +struct hinic3_dcb { + u8 cos_config_num_max; + u8 func_dft_cos_bitmap; + /* used to tool validity check */ + u16 port_dft_cos_bitmap; + + struct hinic3_dcb_config hw_dcb_cfg; + struct hinic3_dcb_config wanted_dcb_cfg; + unsigned long dcb_flags; +}; + +struct hinic3_vram { + u32 vram_mtu; + u16 vram_num_qps; + unsigned long flags; +}; + +struct hinic3_outband_cfg { + u16 outband_default_vid; + u16 rsvd; +}; + struct hinic3_nic_dev { - struct pci_dev *pdev; - struct net_device *netdev; - struct hinic3_lld_dev *lld_dev; - void *hwdev; + struct pci_dev *pdev; + struct net_device *netdev; + struct hinic3_lld_dev *lld_dev; + void *hwdev; - int poll_weight; - u32 rsvd1; - unsigned long *vlan_bitmap; + int poll_weight; + u32 rsvd1; + unsigned long *vlan_bitmap; - u16 max_qps; + u16 max_qps; - u32 msg_enable; - unsigned long flags; + u32 msg_enable; + unsigned long flags; - u32 lro_replenish_thld; - u32 dma_rx_buff_size; - u16 rx_buff_len; - u32 page_order; + u32 lro_replenish_thld; + u32 dma_rx_buff_size; + u16 rx_buff_len; + u32 page_order; + bool page_pool_enabled; /* Rss related varibles */ - u8 rss_hash_engine; - struct nic_rss_type rss_type; - u8 *rss_hkey; + u8 rss_hash_engine; + struct nic_rss_type rss_type; + u8 *rss_hkey; /* hkey in big endian */ - u32 *rss_hkey_be; - u32 *rss_indir; + u32 *rss_hkey_be; + u32 *rss_indir; - u8 cos_config_num_max; - u8 func_dft_cos_bitmap; - u16 port_dft_cos_bitmap; /* used to tool validity check */ + struct hinic3_dcb *dcb; + char dcb_name[VRAM_NAME_MAX_LEN]; - struct hinic3_dcb_config hw_dcb_cfg; - struct hinic3_dcb_config wanted_dcb_cfg; - struct hinic3_dcb_config dcb_cfg; - unsigned long dcb_flags; - int disable_port_cnt; - /* lock for disable or enable traffic flow */ - struct semaphore dcb_sem; + struct hinic3_vram *nic_vram; + char nic_vram_name[VRAM_NAME_MAX_LEN]; - struct hinic3_intr_coal_info *intr_coalesce; - unsigned long last_moder_jiffies; - u32 adaptive_rx_coal; - u8 intr_coal_set_flag; + int disable_port_cnt; + + struct hinic3_intr_coal_info *intr_coalesce; + unsigned long last_moder_jiffies; + u32 adaptive_rx_coal; + u8 intr_coal_set_flag; #ifndef HAVE_NETDEV_STATS_IN_NETDEV - struct net_device_stats net_stats; + struct net_device_stats net_stats; #endif - struct hinic3_nic_stats stats; + struct hinic3_nic_stats stats; + struct hinic3_nic_vport_stats vport_stats; /* lock for nic resource */ - struct mutex nic_mutex; - bool force_port_disable; - struct semaphore port_state_sem; - u8 link_status; + struct mutex nic_mutex; + u8 link_status; - struct nic_service_cap nic_cap; + struct nic_service_cap nic_cap; - struct hinic3_txq *txqs; - struct hinic3_rxq *rxqs; + struct hinic3_txq *txqs; + struct hinic3_rxq *rxqs; struct hinic3_dyna_txrxq_params q_params; - u16 num_qp_irq; - struct irq_info *qps_irq_info; + u16 num_qp_irq; + struct irq_info *qps_irq_info; - struct workqueue_struct *workq; + struct workqueue_struct *workq; - struct work_struct rx_mode_work; - struct delayed_work moderation_task; + struct work_struct rx_mode_work; + struct delayed_work moderation_task; - struct list_head uc_filter_list; - struct list_head mc_filter_list; - unsigned long rx_mod_state; - int netdev_uc_cnt; - int netdev_mc_cnt; + struct list_head uc_filter_list; + struct list_head mc_filter_list; + unsigned long rx_mod_state; + int netdev_uc_cnt; + int netdev_mc_cnt; - int lb_test_rx_idx; - int lb_pkt_len; - u8 *lb_test_rx_buf; + int lb_test_rx_idx; + int lb_pkt_len; + u8 *lb_test_rx_buf; - struct hinic3_tcam_info tcam; - struct hinic3_rx_flow_rule rx_flow_rule; + struct hinic3_tcam_info tcam; + struct hinic3_rx_flow_rule rx_flow_rule; #ifdef HAVE_XDP_SUPPORT - struct bpf_prog *xdp_prog; + struct bpf_prog *xdp_prog; #endif - struct delayed_work periodic_work; + struct delayed_work periodic_work; /* reference to enum hinic3_event_work_flags */ - unsigned long event_flag; - - struct hinic3_nic_prof_attr *prof_attr; - struct hinic3_prof_adapter *prof_adap; - u64 rsvd8[7]; - u32 rsvd9; - u32 rxq_get_err_times; - struct delayed_work rxq_check_work; + unsigned long event_flag; + + struct hinic3_nic_prof_attr *prof_attr; + struct hinic3_prof_adapter *prof_adap; + u64 rsvd8[7]; + struct hinic3_outband_cfg outband_cfg; + u32 rxq_get_err_times; + struct delayed_work rxq_check_work; + struct delayed_work vport_stats_work; }; #define hinic_msg(level, nic_dev, msglvl, format, arg...) \ @@ -309,7 +374,7 @@ struct hinic3_uld_info *get_nic_uld_info(void); u32 hinic3_get_io_stats_size(const struct hinic3_nic_dev *nic_dev); -void hinic3_get_io_stats(const struct hinic3_nic_dev *nic_dev, void *stats); +int hinic3_get_io_stats(const struct hinic3_nic_dev *nic_dev, void *stats); int hinic3_open(struct net_device *netdev); @@ -328,6 +393,8 @@ int hinic3_qps_irq_init(struct hinic3_nic_dev *nic_dev); void hinic3_qps_irq_deinit(struct hinic3_nic_dev *nic_dev); +void qp_del_napi(struct hinic3_irq *irq_cfg); + void hinic3_set_netdev_ops(struct hinic3_nic_dev *nic_dev); bool hinic3_is_netdev_ops_match(const struct net_device *netdev); @@ -345,10 +412,6 @@ void hinic3_get_ethtool_stats(struct net_device *netdev, int hinic3_get_sset_count(struct net_device *netdev, int sset); -int hinic3_force_port_disable(struct hinic3_nic_dev *nic_dev); - -int hinic3_force_set_port_state(struct hinic3_nic_dev *nic_dev, bool enable); - int hinic3_maybe_set_port_state(struct hinic3_nic_dev *nic_dev, bool enable); #ifdef ETHTOOL_GLINKSETTINGS @@ -383,5 +446,17 @@ bool hinic3_is_xdp_enable(struct hinic3_nic_dev *nic_dev); int hinic3_xdp_max_mtu(struct hinic3_nic_dev *nic_dev); #endif +#ifdef HAVE_UDP_TUNNEL_NIC_INFO +int hinic3_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti); +int hinic3_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti); +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ + +#if defined(ETHTOOL_GFECPARAM) && defined(ETHTOOL_SFECPARAM) +int set_fecparam(void *hwdev, u8 fecparam); +int get_fecparam(void *hwdev, u8 *advertised_fec, u8 *supported_fec); +#endif + #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_event.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_event.c index 57cf07cee554c88f9e140ec8dab9ac37cd374e2a..6cc294e4b468642456d67a5f2492ab9c98f1d506 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_event.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_event.c @@ -21,11 +21,12 @@ #include "hinic3_nic_cfg.h" #include "hinic3_srv_nic.h" #include "hinic3_nic.h" -#include "hinic3_nic_cmd.h" +#include "nic_mpu_cmd.h" +#include "nic_npu_cmd.h" static int hinic3_init_vf_config(struct hinic3_nic_io *nic_io, u16 vf_id) { - struct vf_data_storage *vf_info; + struct vf_data_storage *vf_info = NULL; u16 func_id; int err = 0; @@ -38,7 +39,7 @@ static int hinic3_init_vf_config(struct hinic3_nic_io *nic_io, u16 vf_id) err = hinic3_set_mac(nic_io->hwdev, vf_info->drv_mac_addr, vf_info->pf_vlan, func_id, HINIC3_CHANNEL_NIC); - if (err) { + if (err != 0) { nic_err(nic_io->dev_hdl, "Failed to set VF %d MAC\n", HW_VF_ID_TO_OS(vf_id)); return err; @@ -51,7 +52,7 @@ static int hinic3_init_vf_config(struct hinic3_nic_io *nic_io, u16 vf_id) err = hinic3_cfg_vf_vlan(nic_io, HINIC3_CMD_OP_ADD, vf_info->pf_vlan, vf_info->pf_qos, vf_id); - if (err) { + if (err != 0) { nic_err(nic_io->dev_hdl, "Failed to add VF %d VLAN_QOS\n", HW_VF_ID_TO_OS(vf_id)); return err; @@ -62,7 +63,7 @@ static int hinic3_init_vf_config(struct hinic3_nic_io *nic_io, u16 vf_id) err = hinic3_set_vf_tx_rate(nic_io->hwdev, vf_id, vf_info->max_rate, vf_info->min_rate); - if (err) { + if (err != 0) { nic_err(nic_io->dev_hdl, "Failed to set VF %d max rate %u, min rate %u\n", HW_VF_ID_TO_OS(vf_id), vf_info->max_rate, vf_info->min_rate); @@ -84,7 +85,7 @@ static int register_vf_msg_handler(struct hinic3_nic_io *nic_io, u16 vf_id) } err = hinic3_init_vf_config(nic_io, vf_id); - if (err) + if (err != 0) return err; nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = true; @@ -137,6 +138,9 @@ static int hinic3_register_vf_msg_handler(struct hinic3_nic_io *nic_io, struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); int err; + if (!vf_info) + return -EINVAL; + if (register_vf->op_register) { vf_info->support_extra_feature = register_vf->support_extra_feature; err = register_vf_msg_handler(nic_io, vf_id); @@ -145,7 +149,7 @@ static int hinic3_register_vf_msg_handler(struct hinic3_nic_io *nic_io, vf_info->support_extra_feature = 0; } - if (err) + if (err != 0) register_info->msg_head.status = EFAULT; *out_size = sizeof(*register_info); @@ -157,6 +161,9 @@ void hinic3_unregister_vf(struct hinic3_nic_io *nic_io, u16 vf_id) { struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); + if (!vf_info) + return; + unregister_vf_msg_handler(nic_io, vf_id); vf_info->support_extra_feature = 0; } @@ -181,14 +188,21 @@ static int hinic3_get_vf_mac_msg_handler(struct hinic3_nic_io *nic_io, u16 vf, void *buf_out, u16 *out_size) { struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf); + struct hinic3_port_mac_set *mac_in = + (struct hinic3_port_mac_set *)buf_in; struct hinic3_port_mac_set *mac_info = buf_out; int err; - if (HINIC3_SUPPORT_VF_MAC(nic_io->hwdev)) { + if (!mac_info || !vf_info) + return -EINVAL; + + mac_in->func_id = hinic3_glb_pf_vf_offset(nic_io->hwdev) + vf; + + if (HINIC3_SUPPORT_VF_MAC(nic_io->hwdev) != 0) { err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, HINIC3_NIC_CMD_GET_MAC, buf_in, in_size, buf_out, out_size); - if (!err) { + if (err == 0) { if (is_zero_ether_addr(mac_info->mac)) ether_addr_copy(mac_info->mac, vf_info->drv_mac_addr); } @@ -211,6 +225,11 @@ static int hinic3_set_vf_mac_msg_handler(struct hinic3_nic_io *nic_io, u16 vf, struct hinic3_port_mac_set *mac_out = buf_out; int err; + if (!vf_info) + return -EINVAL; + + mac_in->func_id = hinic3_glb_pf_vf_offset(nic_io->hwdev) + vf; + if (vf_info->use_specified_mac && !vf_info->trust && is_valid_ether_addr(mac_in->mac)) { nic_warn(nic_io->dev_hdl, "PF has already set VF %d MAC address, and vf trust is off.\n", @@ -247,6 +266,11 @@ static int hinic3_del_vf_mac_msg_handler(struct hinic3_nic_io *nic_io, u16 vf, struct hinic3_port_mac_set *mac_out = buf_out; int err; + if (!vf_info) + return -EINVAL; + + mac_in->func_id = hinic3_glb_pf_vf_offset(nic_io->hwdev) + vf; + if (vf_info->use_specified_mac && !vf_info->trust && is_valid_ether_addr(mac_in->mac)) { nic_warn(nic_io->dev_hdl, "PF has already set VF %d MAC address, and vf trust is off.\n", @@ -283,12 +307,15 @@ static int hinic3_update_vf_mac_msg_handler(struct hinic3_nic_io *nic_io, struct hinic3_port_mac_update *mac_out = buf_out; int err; + if (!vf_info) + return -EINVAL; + if (!is_valid_ether_addr(mac_in->new_mac)) { nic_err(nic_io->dev_hdl, "Update VF MAC is invalid.\n"); return -EINVAL; } + mac_in->func_id = hinic3_glb_pf_vf_offset(nic_io->hwdev) + vf; -#ifndef __VMWARE__ if (vf_info->use_specified_mac && !vf_info->trust) { nic_warn(nic_io->dev_hdl, "PF has already set VF %d MAC address, and vf trust is off.\n", HW_VF_ID_TO_OS(vf)); @@ -296,17 +323,7 @@ static int hinic3_update_vf_mac_msg_handler(struct hinic3_nic_io *nic_io, *out_size = sizeof(*mac_out); return 0; } -#else - err = hinic_config_vf_request(nic_io->hwdev->pcidev_hdl, - HW_VF_ID_TO_OS(vf), - HINIC_CFG_VF_MAC_CHANGED, - (void *)mac_in->new_mac); - if (err) { - nic_err(nic_io->dev_hdl, "Failed to config VF %d MAC request, err: %d\n", - HW_VF_ID_TO_OS(vf), err); - return err; - } -#endif + mac_in->vlan_id = vf_info->pf_vlan; err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, HINIC3_NIC_CMD_UPDATE_MAC, buf_in, in_size, buf_out, out_size); @@ -362,7 +379,8 @@ static int _l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u32 i, cmd_cnt = ARRAY_LEN(vf_cmd_handler); bool cmd_to_pf = false; - if (hinic3_func_type(hwdev) == TYPE_VF) { + if (hinic3_func_type(hwdev) == TYPE_VF && + !hinic3_is_slave_host(hwdev)) { for (i = 0; i < cmd_cnt; i++) { if (cmd == vf_cmd_handler[i].cmd) cmd_to_pf = true; @@ -404,6 +422,8 @@ int hinic3_pf_mbox_handler(void *hwdev, return -EFAULT; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; for (index = 0; index < cmd_size; index++) { if (cmd == vf_cmd_handler[index].cmd) @@ -460,10 +480,14 @@ void hinic3_notify_dcb_state_event(struct hinic3_nic_io *nic_io, static void dcb_state_event(void *hwdev, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { - struct hinic3_cmd_vf_dcb_state *vf_dcb; - struct hinic3_nic_io *nic_io; + struct hinic3_cmd_vf_dcb_state *vf_dcb = NULL; + struct hinic3_nic_io *nic_io = NULL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("nic_io is NULL\n"); + return; + } vf_dcb = buf_in; if (!vf_dcb) @@ -479,9 +503,13 @@ static void tx_pause_excp_event_handler(void *hwdev, void *buf_in, u16 in_size, struct hinic3_nic_io *nic_io = NULL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("nic_io is NULL\n"); + return; + } if (in_size != sizeof(*excp_info)) { - nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %ld\n", + nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %lu\n", in_size, sizeof(*excp_info)); return; } @@ -501,6 +529,10 @@ static void bond_active_event_handler(void *hwdev, void *buf_in, u16 in_size, struct hinic3_event_info event_info = {0}; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("nic_io is NULL\n"); + return; + } if (in_size != sizeof(*active_info)) { nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %ld\n", @@ -515,6 +547,36 @@ static void bond_active_event_handler(void *hwdev, void *buf_in, u16 in_size, hinic3_event_callback(nic_io->hwdev, &event_info); } +static void outband_vlan_cfg_event_handler(void *hwdev, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hinic3_outband_cfg_info *outband_cfg_info = buf_in; + struct hinic3_nic_io *nic_io = NULL; + struct hinic3_event_info event_info = {0}; + + nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("nic_io is NULL\n"); + return; + } + + nic_info(nic_io->dev_hdl, "outband vlan cfg event received\n"); + + if (in_size != sizeof(*outband_cfg_info)) { + nic_err(nic_io->dev_hdl, "outband cfg info invalid in_size: %u, should be %lu\n", + in_size, sizeof(*outband_cfg_info)); + return; + } + + event_info.service = EVENT_SRV_NIC; + event_info.type = EVENT_NIC_OUTBAND_CFG; + memcpy((void *)event_info.event_data, + outband_cfg_info, sizeof(*outband_cfg_info)); + + hinic3_event_callback(nic_io->hwdev, &event_info); +} + static const struct nic_event_handler nic_cmd_handler[] = { { .cmd = HINIC3_NIC_CMD_VF_COS, @@ -529,6 +591,11 @@ static const struct nic_event_handler nic_cmd_handler[] = { .cmd = HINIC3_NIC_CMD_BOND_ACTIVE_NOTICE, .handler = bond_active_event_handler, }, + + { + .cmd = HINIC3_NIC_CMD_OUTBAND_CFG_NOTICE, + .handler = outband_vlan_cfg_event_handler, + }, }; static int _event_handler(void *hwdev, u16 cmd, void *buf_in, u16 in_size, @@ -543,6 +610,8 @@ static int _event_handler(void *hwdev, u16 cmd, void *buf_in, u16 in_size, *out_size = 0; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; for (i = 0; i < size; i++) { if (cmd == nic_cmd_handler[i].cmd) { diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c index 22670ffe7ebfbe5c4326dce16cc3f1537c9931f7..a9768b7253925be4cdfe3e8c7f5ee4065c63429e 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c @@ -16,7 +16,8 @@ #include "hinic3_nic_cfg.h" #include "hinic3_srv_nic.h" #include "hinic3_nic.h" -#include "hinic3_nic_cmd.h" +#include "nic_mpu_cmd.h" +#include "nic_npu_cmd.h" #include "hinic3_nic_io.h" #define HINIC3_DEAULT_TX_CI_PENDING_LIMIT 1 @@ -34,7 +35,7 @@ MODULE_PARM_DESC(tx_coalescing_time, "TX CI coalescing parameter coalescing_time static unsigned char rq_wqe_type = HINIC3_NORMAL_RQ_WQE; module_param(rq_wqe_type, byte, 0444); -MODULE_PARM_DESC(rq_wqe_type, "RQ WQE type 0-8Bytes, 1-16Bytes, 2-32Bytes (default=2)"); +MODULE_PARM_DESC(rq_wqe_type, "RQ WQE type 1-16Bytes, 2-32Bytes (default=2)"); /*lint +e806*/ static u32 tx_drop_thd_on = HINIC3_DEAULT_DROP_THD_ON; @@ -45,7 +46,7 @@ static u32 tx_drop_thd_off = HINIC3_DEAULT_DROP_THD_OFF; module_param(tx_drop_thd_off, uint, 0644); MODULE_PARM_DESC(tx_drop_thd_off, "TX parameter drop_thd_off (default=0)"); /* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ -#define HINIC3_CI_Q_ADDR_SIZE (64) +#define HINIC3_CI_Q_ADDR_SIZE (64U) #define CI_TABLE_SIZE(num_qps, pg_sz) \ (ALIGN((num_qps) * HINIC3_CI_Q_ADDR_SIZE, pg_sz)) @@ -350,6 +351,13 @@ static int hinic3_create_rq(struct hinic3_nic_io *nic_io, struct hinic3_io_queue { int err; + /* rq_wqe_type Only support type 1-16Bytes, 2-32Bytes */ + if (rq_wqe_type != HINIC3_NORMAL_RQ_WQE && rq_wqe_type != HINIC3_EXTEND_RQ_WQE) { + sdk_warn(nic_io->dev_hdl, "Module Parameter rq_wqe_type value %d is out of range: [%d, %d].", + rq_wqe_type, HINIC3_NORMAL_RQ_WQE, HINIC3_EXTEND_RQ_WQE); + rq_wqe_type = HINIC3_NORMAL_RQ_WQE; + } + rq->wqe_type = rq_wqe_type; rq->q_id = q_id; rq->msix_entry_idx = rq_msix_idx; @@ -1044,8 +1052,8 @@ static int clean_queue_offload_ctxt(struct hinic3_nic_io *nic_io, static int clean_qp_offload_ctxt(struct hinic3_nic_io *nic_io) { /* clean LRO/TSO context space */ - return (clean_queue_offload_ctxt(nic_io, HINIC3_QP_CTXT_TYPE_SQ) || - clean_queue_offload_ctxt(nic_io, HINIC3_QP_CTXT_TYPE_RQ)); + return ((clean_queue_offload_ctxt(nic_io, HINIC3_QP_CTXT_TYPE_SQ) != 0) || + (clean_queue_offload_ctxt(nic_io, HINIC3_QP_CTXT_TYPE_RQ) != 0)); } /* init qps ctxt and set sq ci attr and arm all sq */ diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h index 5c5585a7fd740ef14197da48df76679623c602b7..943a7364947c2b6730de2ebe1a00f32c005577b8 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h @@ -303,7 +303,7 @@ static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos, wmb(); /* Write all before the doorbell */ - writeq(*((u64 *)&db), DB_ADDR(queue, pi)); + writeq(*((u64 *)(u8 *)&db), DB_ADDR(queue, pi)); } struct hinic3_dyna_qp_params { diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_prof.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_prof.c index 78d943d2dab5cec7fb73adb466762befe2232184..9ea93a0f8dbbee7cfcd121bfb683ce043738e75b 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_prof.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_prof.c @@ -32,7 +32,7 @@ struct hinic3_prof_adapter nic_prof_adap_objs[] = { void hinic3_init_nic_prof_adapter(struct hinic3_nic_dev *nic_dev) { - u16 num_adap = ARRAY_SIZE(nic_prof_adap_objs); + int num_adap = ARRAY_LEN(nic_prof_adap_objs); nic_dev->prof_adap = hinic3_prof_init(nic_dev, nic_prof_adap_objs, num_adap, (void *)&nic_dev->prof_attr); diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_ntuple.c b/drivers/net/ethernet/huawei/hinic3/hinic3_ntuple.c index 283c54444e8af7265645d22dfc7f89da02d610cd..6d9b0c1c37539f4c73a9711eb11f2960d73492ec 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_ntuple.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_ntuple.c @@ -394,7 +394,8 @@ hinic3_alloc_dynamic_block_resource(struct hinic3_nic_dev *nic_dev, dynamic_block_ptr = kzalloc(sizeof(*dynamic_block_ptr), GFP_KERNEL); if (!dynamic_block_ptr) { - nicif_err(nic_dev, drv, nic_dev->netdev, "fdir filter dynamic alloc block index %d memory failed\n", + nicif_err(nic_dev, drv, nic_dev->netdev, + "fdir filter dynamic alloc block index %u memory failed\n", dynamic_block_id); return NULL; } @@ -433,7 +434,8 @@ hinic3_dynamic_lookup_tcam_filter(struct hinic3_nic_dev *nic_dev, list_for_each_entry(tmp, &tcam_info->tcam_dynamic_info.tcam_dynamic_list, block_list) - if (tmp->dynamic_index_cnt < HINIC3_TCAM_DYNAMIC_BLOCK_SIZE) + if (!tmp || + tmp->dynamic_index_cnt < HINIC3_TCAM_DYNAMIC_BLOCK_SIZE) break; if (!tmp || tmp->dynamic_index_cnt >= HINIC3_TCAM_DYNAMIC_BLOCK_SIZE) { @@ -515,7 +517,7 @@ static int hinic3_add_tcam_filter(struct hinic3_nic_dev *nic_dev, } nicif_info(nic_dev, drv, nic_dev->netdev, - "Add fdir tcam rule, function_id: 0x%x, tcam_block_id: %d, local_index: %d, global_index: %d, queue: %d, tcam_rule_nums: %d succeed\n", + "Add fdir tcam rule, function_id: 0x%x, tcam_block_id: %u, local_index: %u, global_index: %u, queue: %u, tcam_rule_nums: %u succeed\n", hinic3_global_func_id(nic_dev->hwdev), tcam_filter->dynamic_block_id, index, fdir_tcam_rule->index, fdir_tcam_rule->data.qid, tcam_info->tcam_rule_nums + 1); @@ -582,7 +584,7 @@ static int hinic3_del_tcam_filter(struct hinic3_nic_dev *nic_dev, } nicif_info(nic_dev, drv, nic_dev->netdev, - "Del fdir_tcam_dynamic_rule function_id: 0x%x, tcam_block_id: %d, local_index: %d, global_index: %d, local_rules_nums: %d, global_rule_nums: %d succeed\n", + "Del fdir_tcam_dynamic_rule function_id: 0x%x, tcam_block_id: %u, local_index: %u, global_index: %u, local_rules_nums: %u, global_rule_nums: %u succeed\n", hinic3_global_func_id(nic_dev->hwdev), dynamic_block_id, tcam_filter->index, index, tmp->dynamic_index_cnt - 1, tcam_info->tcam_rule_nums - 1); @@ -608,7 +610,7 @@ static inline struct hinic3_tcam_filter * hinic3_tcam_filter_lookup(const struct list_head *filter_list, struct tag_tcam_key *key) { - struct hinic3_tcam_filter *iter; + struct hinic3_tcam_filter *iter = NULL; list_for_each_entry(iter, filter_list, tcam_filter_list) { if (memcmp(key, &iter->tcam_key, @@ -633,7 +635,7 @@ static int hinic3_remove_one_rule(struct hinic3_nic_dev *nic_dev, struct hinic3_ethtool_rx_flow_rule *eth_rule) { struct hinic3_tcam_info *tcam_info = &nic_dev->tcam; - struct hinic3_tcam_filter *tcam_filter; + struct hinic3_tcam_filter *tcam_filter = NULL; struct nic_tcam_cfg_rule fdir_tcam_rule; struct tag_tcam_key tcam_key; int err; @@ -754,7 +756,7 @@ static int validate_flow(struct hinic3_nic_dev *nic_dev, { if (fs->location >= MAX_NUM_OF_ETHTOOL_NTUPLE_RULES) { nicif_err(nic_dev, drv, nic_dev->netdev, "loc exceed limit[0,%lu]\n", - MAX_NUM_OF_ETHTOOL_NTUPLE_RULES); + MAX_NUM_OF_ETHTOOL_NTUPLE_RULES - 1); return -EINVAL; } @@ -875,7 +877,7 @@ int hinic3_ethtool_get_flow(const struct hinic3_nic_dev *nic_dev, int hinic3_ethtool_get_all_flows(const struct hinic3_nic_dev *nic_dev, struct ethtool_rxnfc *info, u32 *rule_locs) { - int idx = 0; + u32 idx = 0; struct hinic3_ethtool_rx_flow_rule *eth_rule = NULL; if (!HINIC3_SUPPORT_FDIR(nic_dev->hwdev)) { diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_profile.h b/drivers/net/ethernet/huawei/hinic3/hinic3_profile.h deleted file mode 100644 index a93f3b60e7097b24b9a239d716048c3cc72599a0..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_profile.h +++ /dev/null @@ -1,146 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ - -#ifndef HINIC3_PROFILE_H -#define HINIC3_PROFILE_H - -typedef bool (*hinic3_is_match_prof)(void *device); -typedef void *(*hinic3_init_prof_attr)(void *device); -typedef void (*hinic3_deinit_prof_attr)(void *porf_attr); - -enum prof_adapter_type { - PROF_ADAP_TYPE_INVALID, - PROF_ADAP_TYPE_PANGEA = 1, - - /* Add prof adapter type before default */ - PROF_ADAP_TYPE_DEFAULT, -}; - -/** - * struct hinic3_prof_adapter - custom scene's profile adapter - * @type: adapter type - * @match: Check whether the current function is used in the custom scene. - * Implemented in the current source file - * @init: When @match return true, the initialization function called in probe. - * Implemented in the source file of the custom scene - * @deinit: When @match return true, the deinitialization function called when - * remove. Implemented in the source file of the custom scene - */ -struct hinic3_prof_adapter { - enum prof_adapter_type type; - hinic3_is_match_prof match; - hinic3_init_prof_attr init; - hinic3_deinit_prof_attr deinit; -}; - -#ifdef static -#undef static -#define LLT_STATIC_DEF_SAVED -#endif - -/*lint -save -e661 */ -static inline struct hinic3_prof_adapter * -hinic3_prof_init(void *device, struct hinic3_prof_adapter *adap_objs, int num_adap, - void **prof_attr) -{ - struct hinic3_prof_adapter *prof_obj = NULL; - u16 i; - - for (i = 0; i < num_adap; i++) { - prof_obj = &adap_objs[i]; - if (!(prof_obj->match && prof_obj->match(device))) - continue; - - *prof_attr = prof_obj->init ? prof_obj->init(device) : NULL; - - return prof_obj; - } - - return NULL; -} - -static inline void hinic3_prof_deinit(struct hinic3_prof_adapter *prof_obj, void *prof_attr) -{ - if (!prof_obj) - return; - - if (prof_obj->deinit) - prof_obj->deinit(prof_attr); -} - -/*lint -restore*/ - -/* module-level interface */ -#ifdef CONFIG_MODULE_PROF -struct hinic3_module_ops { - int (*module_prof_init)(void); - void (*module_prof_exit)(void); - void (*probe_fault_process)(void *pdev, u16 level); - int (*probe_pre_process)(void *pdev); - void (*probe_pre_unprocess)(void *pdev); -}; - -struct hinic3_module_ops *hinic3_get_module_prof_ops(void); - -static inline void hinic3_probe_fault_process(void *pdev, u16 level) -{ - struct hinic3_module_ops *ops = hinic3_get_module_prof_ops(); - - if (ops && ops->probe_fault_process) - ops->probe_fault_process(pdev, level); -} - -static inline int hinic3_module_pre_init(void) -{ - struct hinic3_module_ops *ops = hinic3_get_module_prof_ops(); - - if (!ops || !ops->module_prof_init) - return -EINVAL; - - return ops->module_prof_init(); -} - -static inline void hinic3_module_post_exit(void) -{ - struct hinic3_module_ops *ops = hinic3_get_module_prof_ops(); - - if (ops && ops->module_prof_exit) - ops->module_prof_exit(); -} - -static inline int hinic3_probe_pre_process(void *pdev) -{ - struct hinic3_module_ops *ops = hinic3_get_module_prof_ops(); - - if (!ops || !ops->probe_pre_process) - return -EINVAL; - - return ops->probe_pre_process(pdev); -} - -static inline void hinic3_probe_pre_unprocess(void *pdev) -{ - struct hinic3_module_ops *ops = hinic3_get_module_prof_ops(); - - if (ops && ops->probe_pre_unprocess) - ops->probe_pre_unprocess(pdev); -} -#else -static inline void hinic3_probe_fault_process(void *pdev, u16 level) { }; - -static inline int hinic3_module_pre_init(void) -{ - return 0; -} - -static inline void hinic3_module_post_exit(void) { }; - -static inline int hinic3_probe_pre_process(void *pdev) -{ - return 0; -} - -static inline void hinic3_probe_pre_unprocess(void *pdev) { }; -#endif - -#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c index 6bf3b11dfa226e9c25c9d84992807da083d38f2f..655d2b8ab46122bcd6353b4889a7726aca61d7d1 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "ossl_knl.h" #include "hinic3_crm.h" @@ -23,8 +24,9 @@ #include "hinic3_hw.h" #include "hinic3_rss.h" -/*lint -e806*/ -static u16 num_qps; +#include "vram_common.h" + +static u16 num_qps = 0; module_param(num_qps, ushort, 0444); MODULE_PARM_DESC(num_qps, "Number of Queue Pairs (default=0)"); @@ -104,88 +106,84 @@ static int hinic3_get_rq2iq_map(struct hinic3_nic_dev *nic_dev, return 0; } -static void hinic3_fillout_indir_tbl(struct hinic3_nic_dev *nic_dev, u8 num_cos, u32 *indir) +static void hinic3_fillout_indir_tbl(struct hinic3_nic_dev *nic_dev, + u8 group_num, u32 *indir) { - u16 k, group_size, start_qid = 0, qp_num = 0; - int i = 0; - u8 j, cur_cos = 0, default_cos; + struct hinic3_dcb *dcb = nic_dev->dcb; + u16 k, group_size, start_qid = 0, cur_cos_qnum = 0; + u32 i = 0; + u8 j, cur_cos = 0, group = 0; u8 valid_cos_map = hinic3_get_dev_valid_cos_map(nic_dev); - if (num_cos == 0) { + if (group_num == 0) { for (i = 0; i < NIC_RSS_INDIR_SIZE; i++) indir[i] = i % nic_dev->q_params.num_qps; } else { - group_size = NIC_RSS_INDIR_SIZE / num_cos; - - for (j = 0; j < num_cos; j++) { - while (cur_cos < NIC_DCB_COS_MAX && - nic_dev->hw_dcb_cfg.cos_qp_num[cur_cos] == 0) - cur_cos++; - - if (cur_cos >= NIC_DCB_COS_MAX) { - if (BIT(nic_dev->hw_dcb_cfg.default_cos) & valid_cos_map) - default_cos = nic_dev->hw_dcb_cfg.default_cos; - else - default_cos = (u8)fls(valid_cos_map) - 1; + group_size = NIC_RSS_INDIR_SIZE / group_num; + + for (group = 0; group < group_num; group++) { + cur_cos = dcb->hw_dcb_cfg.default_cos; + for (j = 0; j < NIC_DCB_COS_MAX; j++) { + if ((BIT(j) & valid_cos_map) != 0) { + cur_cos = j; + valid_cos_map -= (u8)BIT(j); + break; + } + } - start_qid = nic_dev->hw_dcb_cfg.cos_qp_offset[default_cos]; - qp_num = nic_dev->hw_dcb_cfg.cos_qp_num[default_cos]; + cur_cos_qnum = dcb->hw_dcb_cfg.cos_qp_num[cur_cos]; + if (cur_cos_qnum > 0) { + start_qid = + dcb->hw_dcb_cfg.cos_qp_offset[cur_cos]; } else { - start_qid = nic_dev->hw_dcb_cfg.cos_qp_offset[cur_cos]; - qp_num = nic_dev->hw_dcb_cfg.cos_qp_num[cur_cos]; + start_qid = cur_cos % nic_dev->q_params.num_qps; + /* Ensure that the offset of start_id is 0. */ + cur_cos_qnum = 1; } for (k = 0; k < group_size; k++) - indir[i++] = start_qid + k % qp_num; - - cur_cos++; + indir[i++] = start_qid + k % cur_cos_qnum; } } } -/*lint -e528*/ int hinic3_rss_init(struct hinic3_nic_dev *nic_dev, u8 *rq2iq_map, u32 map_size, u8 dcb_en) { struct net_device *netdev = nic_dev->netdev; - u8 i, cos_num; - u8 cos_map[NIC_DCB_UP_MAX] = {0}; - u8 cfg_map[NIC_DCB_UP_MAX] = {0}; + u8 i, group_num, cos_bitmap, group = 0; + u8 cos_group[NIC_DCB_UP_MAX] = {0}; int err; - if (dcb_en) { - cos_num = hinic3_get_dev_user_cos_num(nic_dev); - - if (nic_dev->hw_dcb_cfg.trust == 0) { - memcpy(cfg_map, nic_dev->hw_dcb_cfg.pcp2cos, sizeof(cfg_map)); - } else if (nic_dev->hw_dcb_cfg.trust == 1) { - for (i = 0; i < NIC_DCB_UP_MAX; i++) - cfg_map[i] = nic_dev->hw_dcb_cfg.dscp2cos[i * NIC_DCB_DSCP_NUM]; - } -#define COS_CHANGE_OFFSET 4 - for (i = 0; i < COS_CHANGE_OFFSET; i++) - cos_map[COS_CHANGE_OFFSET + i] = cfg_map[i]; + if (dcb_en != 0) { + group_num = (u8)roundup_pow_of_two( + hinic3_get_dev_user_cos_num(nic_dev)); - for (i = 0; i < COS_CHANGE_OFFSET; i++) - cos_map[i] = cfg_map[NIC_DCB_UP_MAX - (i + 1)]; + cos_bitmap = hinic3_get_dev_valid_cos_map(nic_dev); - while (cos_num & (cos_num - 1)) - cos_num++; + for (i = 0; i < NIC_DCB_UP_MAX; i++) { + if ((BIT(i) & cos_bitmap) != 0) + cos_group[NIC_DCB_UP_MAX - i - 1] = group++; + else + cos_group[NIC_DCB_UP_MAX - i - 1] = + group_num - 1; + } } else { - cos_num = 0; + group_num = 0; } - err = hinic3_set_hw_rss_parameters(netdev, 1, cos_num, cos_map, dcb_en); + err = hinic3_set_hw_rss_parameters(netdev, 1, group_num, + cos_group, dcb_en); if (err) return err; - err = hinic3_get_rq2iq_map(nic_dev, nic_dev->q_params.num_qps, cos_num, cos_map, - NIC_DCB_UP_MAX, nic_dev->rss_indir, rq2iq_map, map_size); + err = hinic3_get_rq2iq_map(nic_dev, nic_dev->q_params.num_qps, + group_num, cos_group, NIC_DCB_UP_MAX, + nic_dev->rss_indir, rq2iq_map, map_size); if (err) nicif_err(nic_dev, drv, netdev, "Failed to get rq map\n"); return err; } -/*lint -e528*/ void hinic3_rss_deinit(struct hinic3_nic_dev *nic_dev) { u8 cos_map[NIC_DCB_UP_MAX] = {0}; @@ -246,28 +244,59 @@ static void hinic3_maybe_reconfig_rss_indir(struct net_device *netdev, u8 dcb_en hinic3_set_default_rss_indir(netdev); } +#ifdef HAVE_HOT_REPLACE_FUNC +bool partition_slave_doing_hotupgrade(void) +{ + return get_partition_role() && partition_doing_hotupgrade(); +} +#endif + static void decide_num_qps(struct hinic3_nic_dev *nic_dev) { u16 tmp_num_qps = nic_dev->max_qps; u16 num_cpus = 0; + u16 max_num_cpus; int i, node; + int is_in_kexec = vram_get_kexec_flag(); + if (is_in_kexec != 0) { + nic_dev->q_params.num_qps = nic_dev->nic_vram->vram_num_qps; + nicif_info(nic_dev, drv, nic_dev->netdev, + "Os hotreplace use vram to init num qps 1:%hu 2:%hu\n", + nic_dev->q_params.num_qps, + nic_dev->nic_vram->vram_num_qps); + return; + } + if (nic_dev->nic_cap.default_num_queues != 0 && nic_dev->nic_cap.default_num_queues < nic_dev->max_qps) tmp_num_qps = nic_dev->nic_cap.default_num_queues; MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, tmp_num_qps); - for (i = 0; i < (int)num_online_cpus(); i++) { +#ifdef HAVE_HOT_REPLACE_FUNC + if (partition_slave_doing_hotupgrade()) + max_num_cpus = (u16)num_present_cpus(); + else + max_num_cpus = (u16)num_online_cpus(); +#else + max_num_cpus = (u16)num_online_cpus(); +#endif + + for (i = 0; i < max_num_cpus; i++) { node = (int)cpu_to_node(i); if (node == dev_to_node(&nic_dev->pdev->dev)) num_cpus++; } if (!num_cpus) - num_cpus = (u16)num_online_cpus(); + num_cpus = max_num_cpus; nic_dev->q_params.num_qps = (u16)min_t(u16, tmp_num_qps, num_cpus); + nic_dev->nic_vram->vram_num_qps = nic_dev->q_params.num_qps; + nicif_info(nic_dev, drv, nic_dev->netdev, + "init num qps 1:%u 2:%u\n", + nic_dev->q_params.num_qps, nic_dev->nic_vram->vram_num_qps); } static void copy_value_to_rss_hkey(struct hinic3_nic_dev *nic_dev, @@ -324,7 +353,6 @@ static int alloc_rss_resource(struct hinic3_nic_dev *nic_dev) return 0; } -/*lint -e528*/ void hinic3_try_to_enable_rss(struct hinic3_nic_dev *nic_dev) { u8 cos_map[NIC_DCB_UP_MAX] = {0}; @@ -363,6 +391,7 @@ void hinic3_try_to_enable_rss(struct hinic3_nic_dev *nic_dev) set_q_params: clear_bit(HINIC3_RSS_ENABLE, &nic_dev->flags); nic_dev->q_params.num_qps = nic_dev->max_qps; + nic_dev->nic_vram->vram_num_qps = nic_dev->max_qps; } static int hinic3_config_rss_hw_resource(struct hinic3_nic_dev *nic_dev, @@ -756,6 +785,7 @@ int hinic3_set_channels(struct net_device *netdev, nic_dev->q_params.num_qps = (u16)count; } + nic_dev->nic_vram->vram_num_qps = nic_dev->q_params.num_qps; return 0; } @@ -816,7 +846,7 @@ int hinic3_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) int err = 0; if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Rss is disable\n"); + netdev_warn_once(nic_dev->netdev, "Rss is disable\n"); return -EOPNOTSUPP; } @@ -923,7 +953,7 @@ int hinic3_get_rxfh_indir(struct net_device *netdev, u32 *indir) indir = indir1->ring_index; #endif if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Rss is disable\n"); + netdev_warn_once(nic_dev->netdev, "Rss is disable\n"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rss_cfg.c index 175c4d68b795ab4fb2a1b1b96039008b32ecc8ca..902d7e2195839a932e568ca5a21b42b17da61dfd 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rss_cfg.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss_cfg.c @@ -15,13 +15,14 @@ #include "ossl_knl.h" #include "hinic3_crm.h" #include "hinic3_nic_cfg.h" -#include "hinic3_nic_cmd.h" +#include "nic_mpu_cmd.h" +#include "nic_npu_cmd.h" #include "hinic3_hw.h" #include "hinic3_nic.h" #include "hinic3_common.h" static int hinic3_rss_cfg_hash_key(struct hinic3_nic_io *nic_io, u8 opcode, - u8 *key) + u8 *key, u16 key_size) { struct hinic3_cmd_rss_hash_key hash_key; u16 out_size = sizeof(hash_key); @@ -32,7 +33,7 @@ static int hinic3_rss_cfg_hash_key(struct hinic3_nic_io *nic_io, u8 opcode, hash_key.opcode = opcode; if (opcode == HINIC3_CMD_OP_SET) - memcpy(hash_key.key, key, NIC_RSS_KEY_SIZE); + memcpy(hash_key.key, key, key_size); err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, HINIC3_NIC_CMD_CFG_RSS_HASH_KEY, @@ -46,7 +47,7 @@ static int hinic3_rss_cfg_hash_key(struct hinic3_nic_io *nic_io, u8 opcode, } if (opcode == HINIC3_CMD_OP_GET) - memcpy(key, hash_key.key, NIC_RSS_KEY_SIZE); + memcpy(key, hash_key.key, key_size); return 0; } @@ -60,8 +61,11 @@ int hinic3_rss_set_hash_key(void *hwdev, const u8 *key) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; memcpy(hash_key, key, NIC_RSS_KEY_SIZE); - return hinic3_rss_cfg_hash_key(nic_io, HINIC3_CMD_OP_SET, hash_key); + return hinic3_rss_cfg_hash_key(nic_io, HINIC3_CMD_OP_SET, + hash_key, NIC_RSS_KEY_SIZE); } int hinic3_rss_get_hash_key(void *hwdev, u8 *key) @@ -72,7 +76,10 @@ int hinic3_rss_get_hash_key(void *hwdev, u8 *key) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); - return hinic3_rss_cfg_hash_key(nic_io, HINIC3_CMD_OP_GET, key); + if (!nic_io) + return -EINVAL; + return hinic3_rss_cfg_hash_key(nic_io, HINIC3_CMD_OP_GET, + key, NIC_RSS_KEY_SIZE); } int hinic3_rss_get_indir_tbl(void *hwdev, u32 *indir_table) @@ -86,6 +93,9 @@ int hinic3_rss_get_indir_tbl(void *hwdev, u32 *indir_table) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + cmd_buf = hinic3_alloc_cmd_buf(hwdev); if (!cmd_buf) { nic_err(nic_io->dev_hdl, "Failed to allocate cmd_buf.\n"); @@ -126,6 +136,8 @@ int hinic3_rss_set_indir_tbl(void *hwdev, const u32 *indir_table) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; cmd_buf = hinic3_alloc_cmd_buf(hwdev); if (!cmd_buf) { nic_err(nic_io->dev_hdl, "Failed to allocate cmd buf\n"); @@ -170,6 +182,8 @@ static int hinic3_cmdq_set_rss_type(void *hwdev, struct nic_rss_type rss_type) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; cmd_buf = hinic3_alloc_cmd_buf(hwdev); if (!cmd_buf) { @@ -221,6 +235,8 @@ static int hinic3_mgmt_set_rss_type(void *hwdev, struct nic_rss_type rss_type) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; memset(&ctx_tbl, 0, sizeof(ctx_tbl)); ctx_tbl.func_id = hinic3_global_func_id(hwdev); ctx |= HINIC3_RSS_TYPE_SET(1, VALID) | @@ -270,6 +286,8 @@ int hinic3_get_rss_type(void *hwdev, struct nic_rss_type *rss_type) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; memset(&ctx_tbl, 0, sizeof(struct hinic3_rss_context_table)); ctx_tbl.func_id = hinic3_global_func_id(hwdev); @@ -303,6 +321,9 @@ static int hinic3_rss_cfg_hash_engine(struct hinic3_nic_io *nic_io, u8 opcode, u16 out_size = sizeof(hash_type); int err; + if (!nic_io) + return -EINVAL; + memset(&hash_type, 0, sizeof(struct hinic3_cmd_rss_engine_type)); hash_type.func_id = hinic3_global_func_id(nic_io->hwdev); @@ -336,6 +357,9 @@ int hinic3_rss_set_hash_engine(void *hwdev, u8 type) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + return hinic3_rss_cfg_hash_engine(nic_io, HINIC3_CMD_OP_SET, &type); } @@ -347,6 +371,9 @@ int hinic3_rss_get_hash_engine(void *hwdev, u8 *type) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + return hinic3_rss_cfg_hash_engine(nic_io, HINIC3_CMD_OP_GET, type); } @@ -362,6 +389,8 @@ int hinic3_rss_cfg(void *hwdev, u8 rss_en, u8 cos_num, u8 *prio_tc, u16 num_qps) return -EINVAL; nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; memset(&rss_cfg, 0, sizeof(struct hinic3_cmd_rss_config)); rss_cfg.func_id = hinic3_global_func_id(hwdev); rss_cfg.rss_en = rss_en; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c index 5c60d016e2fe7368757ec2dbc8bc485bbc240dcf..4dbd6dda638f03074a4d609ade488d1699041188 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -20,6 +21,7 @@ #include #include #include +#include #include "ossl_knl.h" #include "hinic3_crm.h" @@ -31,10 +33,6 @@ #include "hinic3_rss.h" #include "hinic3_rx.h" -static u32 rq_pi_rd_en; -module_param(rq_pi_rd_en, uint, 0644); -MODULE_PARM_DESC(rq_pi_rd_en, "Enable rq read pi from host, defaut update pi by doorbell (default=0)"); - /* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ #define HINIC3_RX_HDR_SIZE 256 #define HINIC3_RX_BUFFER_WRITE 16 @@ -63,19 +61,34 @@ static bool rx_alloc_mapped_page(struct hinic3_nic_dev *nic_dev, struct pci_dev *pdev = nic_dev->pdev; struct page *page = rx_info->page; dma_addr_t dma = rx_info->buf_dma_addr; + u32 page_offset = 0; if (likely(dma)) return true; /* alloc new page for storage */ - page = alloc_pages_node(NUMA_NO_NODE, GFP_ATOMIC | __GFP_COLD | - __GFP_COMP, nic_dev->page_order); +#ifdef HAVE_PAGE_POOL_SUPPORT + if (rx_info->page_pool) { + page = page_pool_alloc_frag(rx_info->page_pool, &page_offset, + nic_dev->rx_buff_len, + GFP_ATOMIC | __GFP_COLD | + __GFP_COMP); + if (unlikely(!page)) + return false; + dma = page_pool_get_dma_addr(page); + goto set_rx_info; + } +#endif + page = alloc_pages_node(NUMA_NO_NODE, + GFP_ATOMIC | __GFP_COLD | __GFP_COMP, + nic_dev->page_order); + if (unlikely(!page)) return false; /* map page for use */ - dma = dma_map_page(&pdev->dev, page, 0, nic_dev->dma_rx_buff_size, - DMA_FROM_DEVICE); + dma = dma_map_page(&pdev->dev, page, page_offset, + nic_dev->dma_rx_buff_size, DMA_FROM_DEVICE); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use */ @@ -83,10 +96,12 @@ static bool rx_alloc_mapped_page(struct hinic3_nic_dev *nic_dev, __free_pages(page, nic_dev->page_order); return false; } + goto set_rx_info; +set_rx_info: rx_info->page = page; rx_info->buf_dma_addr = dma; - rx_info->page_offset = 0; + rx_info->page_offset = page_offset; return true; } @@ -108,8 +123,8 @@ static u32 hinic3_rx_fill_wqe(struct hinic3_rxq *rxq) /* unit of cqe length is 16B */ hinic3_set_sge(&rq_wqe->extend_wqe.cqe_sect.sge, rx_info->cqe_dma, - (sizeof(struct hinic3_rq_cqe) >> - HINIC3_CQE_SIZE_SHIFT)); + (HINIC3_CQE_LEN >> + HINIC3_CQE_SIZE_SHIFT)); /* use fixed len */ rq_wqe->extend_wqe.buf_desc.sge.len = nic_dev->rx_buff_len; @@ -163,18 +178,11 @@ static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq) } if (likely(i)) { - if (!rq_pi_rd_en) { - hinic3_write_db(rxq->rq, - rxq->q_id & (NIC_DCB_COS_MAX - 1), - RQ_CFLAG_DP, - (u16)((u32)rxq->next_to_update << - rxq->rq->wqe_type)); - } else { - /* Write all the wqes before pi update */ - wmb(); - - hinic3_update_rq_hw_pi(rxq->rq, rxq->next_to_update); - } + hinic3_write_db(rxq->rq, + rxq->q_id & (NIC_RX_DB_COS_MAX - 1), + RQ_CFLAG_DP, + (u16)((u32)rxq->next_to_update << + rxq->rq->wqe_type)); rxq->delta -= i; rxq->next_to_alloc = rxq->next_to_update; } else if (free_wqebbs == rxq->q_depth - 1) { @@ -208,6 +216,18 @@ static void hinic3_rx_free_buffers(struct hinic3_nic_dev *nic_dev, u32 q_depth, for (i = 0; i < q_depth; i++) { rx_info = &rx_info_arr[i]; +#ifdef HAVE_PAGE_POOL_SUPPORT + if (rx_info->page_pool) { + if (rx_info->page) { + page_pool_put_full_page(rx_info->page_pool, + rx_info->page, false); + rx_info->buf_dma_addr = 0; + rx_info->page = NULL; + } + continue; + } +#endif + if (rx_info->buf_dma_addr) { dma_unmap_page(&nic_dev->pdev->dev, rx_info->buf_dma_addr, @@ -226,7 +246,7 @@ static void hinic3_rx_free_buffers(struct hinic3_nic_dev *nic_dev, u32 q_depth, static void hinic3_reuse_rx_page(struct hinic3_rxq *rxq, struct hinic3_rx_info *old_rx_info) { - struct hinic3_rx_info *new_rx_info; + struct hinic3_rx_info *new_rx_info = NULL; u16 nta = rxq->next_to_alloc; new_rx_info = &rxq->rx_info[nta]; @@ -250,8 +270,8 @@ static bool hinic3_add_rx_frag(struct hinic3_rxq *rxq, struct hinic3_rx_info *rx_info, struct sk_buff *skb, u32 size) { - struct page *page; - u8 *va; + struct page *page = NULL; + u8 *va = NULL; page = rx_info->page; va = (u8 *)page_address(page) + rx_info->page_offset; @@ -267,8 +287,15 @@ static bool hinic3_add_rx_frag(struct hinic3_rxq *rxq, DMA_FROM_DEVICE); if (size <= HINIC3_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { - memcpy(__skb_put(skb, size), va, - ALIGN(size, sizeof(long))); /*lint !e666*/ + __skb_put_data(skb, va, size); + +#ifdef HAVE_PAGE_POOL_SUPPORT + if (rx_info->page_pool) { + page_pool_put_full_page(rx_info->page_pool, + page, false); + return false; + } +#endif /* page is not reserved, we can reuse buffer as-is */ if (likely(page_to_nid(page) == numa_node_id())) @@ -276,25 +303,37 @@ static bool hinic3_add_rx_frag(struct hinic3_rxq *rxq, /* this page cannot be reused so discard it */ put_page(page); - return false; + goto discard_page; } skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, (int)rx_info->page_offset, (int)size, rxq->buf_len); +#ifdef HAVE_PAGE_POOL_SUPPORT + if (rx_info->page_pool) { + skb_mark_for_recycle(skb); + return false; + } +#endif + /* avoid re-using remote pages */ if (unlikely(page_to_nid(page) != numa_node_id())) - return false; + goto discard_page; /* if we are only owner of page we can reuse it */ if (unlikely(page_count(page) != 1)) - return false; + goto discard_page; /* flip page offset to other buffer */ rx_info->page_offset ^= rxq->buf_len; get_page(page); return true; + +discard_page: + dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, + rxq->dma_rx_buff_size, DMA_FROM_DEVICE); + return false; } static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *head_skb, @@ -334,13 +373,9 @@ static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *head_skb, head_skb->truesize += rxq->buf_len; } - if (likely(hinic3_add_rx_frag(rxq, rx_info, skb, size))) { + if (likely(hinic3_add_rx_frag(rxq, rx_info, skb, size))) hinic3_reuse_rx_page(rxq, rx_info); - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, - rxq->dma_rx_buff_size, DMA_FROM_DEVICE); - } + /* clear contents of buffer_info */ rx_info->buf_dma_addr = 0; rx_info->page = NULL; @@ -481,9 +516,8 @@ static unsigned int hinic3_eth_get_headlen(unsigned char *data, unsigned int max protocol = hdr.eth->h_proto; /* L2 header */ - /*lint -save -e778*/ if (protocol == htons(ETH_P_8021_AD) || - protocol == htons(ETH_P_8021_Q)) { /*lint -restore*/ + protocol == htons(ETH_P_8021_Q)) { if (unlikely(max_len < ETH_HLEN + VLAN_HLEN)) return max_len; @@ -495,9 +529,8 @@ static unsigned int hinic3_eth_get_headlen(unsigned char *data, unsigned int max } /* L3 header */ - /*lint -save -e778*/ switch (protocol) { - case htons(ETH_P_IP): /*lint -restore*/ + case htons(ETH_P_IP): if ((int)(hdr.data - data) > (int)(max_len - sizeof(struct iphdr))) return max_len; @@ -680,7 +713,7 @@ static void hinic3_copy_lp_data(struct hinic3_nic_dev *nic_dev, nicif_warn(nic_dev, rx_err, netdev, "Loopback test warning, receive too many test pkts\n"); } - if (skb->len != nic_dev->lb_pkt_len) { + if (skb->len != (u32)(nic_dev->lb_pkt_len)) { nicif_warn(nic_dev, rx_err, netdev, "Wrong packet length\n"); nic_dev->lb_test_rx_idx++; return; @@ -714,7 +747,10 @@ static inline void hinic3_lro_set_gso_params(struct sk_buff *skb, u16 num_lro) } #ifdef HAVE_XDP_SUPPORT -enum hinic3_xdp_pkt { +enum hinic3_xdp_status { + // bpf_prog status + HINIC3_XDP_PROG_EMPTY, + // pkt action HINIC3_XDP_PKT_PASS, HINIC3_XDP_PKT_DROP, }; @@ -725,9 +761,15 @@ static void update_drop_rx_info(struct hinic3_rxq *rxq, u16 weqbb_num) while (weqbb_num) { rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; +#ifdef HAVE_PAGE_POOL_SUPPORT + if (rx_info->page_pool) + goto discard_direct; +#endif if (likely(page_to_nid(rx_info->page) == numa_node_id())) hinic3_reuse_rx_page(rxq, rx_info); + goto discard_direct; +discard_direct: rx_info->buf_dma_addr = 0; rx_info->page = NULL; rxq->cons_idx++; @@ -737,11 +779,10 @@ static void update_drop_rx_info(struct hinic3_rxq *rxq, u16 weqbb_num) } } -int hinic3_run_xdp(struct hinic3_rxq *rxq, u32 pkt_len) +int hinic3_run_xdp(struct hinic3_rxq *rxq, u32 pkt_len, struct xdp_buff *xdp) { struct bpf_prog *xdp_prog = NULL; struct hinic3_rx_info *rx_info = NULL; - struct xdp_buff xdp; int result = HINIC3_XDP_PKT_PASS; u16 weqbb_num = 1; /* xdp can only use one rx_buff */ u8 *va = NULL; @@ -749,13 +790,14 @@ int hinic3_run_xdp(struct hinic3_rxq *rxq, u32 pkt_len) rcu_read_lock(); xdp_prog = READ_ONCE(rxq->xdp_prog); - if (!xdp_prog) + if (!xdp_prog) { + result = HINIC3_XDP_PROG_EMPTY; goto unlock_rcu; + } if (unlikely(pkt_len > rxq->buf_len)) { RXQ_STATS_INC(rxq, xdp_large_pkt); - weqbb_num = (u16)(pkt_len >> rxq->rx_buff_shift) + - ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); + weqbb_num = HINIC3_GET_SGE_NUM(pkt_len, rxq); result = HINIC3_XDP_PKT_DROP; goto xdp_out; } @@ -766,19 +808,20 @@ int hinic3_run_xdp(struct hinic3_rxq *rxq, u32 pkt_len) dma_sync_single_range_for_cpu(rxq->dev, rx_info->buf_dma_addr, rx_info->page_offset, rxq->buf_len, DMA_FROM_DEVICE); - xdp.data = va; - xdp.data_hard_start = xdp.data; - xdp.data_end = xdp.data + pkt_len; + xdp->data = va; + xdp->data_hard_start = xdp->data; + xdp->data_end = xdp->data + pkt_len; #ifdef HAVE_XDP_FRAME_SZ - xdp.frame_sz = rxq->buf_len; + xdp->frame_sz = rxq->buf_len; #endif #ifdef HAVE_XDP_DATA_META - xdp_set_data_meta_invalid(&xdp); + xdp_set_data_meta_invalid(xdp); #endif - prefetchw(xdp.data_hard_start); - act = bpf_prog_run_xdp(xdp_prog, &xdp); + prefetchw(xdp->data_hard_start); + act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: + result = HINIC3_XDP_PKT_PASS; break; case XDP_DROP: result = HINIC3_XDP_PKT_DROP; @@ -799,12 +842,94 @@ int hinic3_run_xdp(struct hinic3_rxq *rxq, u32 pkt_len) return result; } + +static bool hinic3_add_rx_frag_with_xdp(struct hinic3_rxq *rxq, u32 pkt_len, + struct hinic3_rx_info *rx_info, + struct sk_buff *skb, + struct xdp_buff *xdp) +{ + struct page *page = rx_info->page; + + if (pkt_len <= HINIC3_RX_HDR_SIZE) { + __skb_put_data(skb, xdp->data, pkt_len); + +#ifdef HAVE_PAGE_POOL_SUPPORT + if (rx_info->page_pool) { + page_pool_put_full_page(rx_info->page_pool, + page, false); + return false; + } +#endif + + if (likely(page_to_nid(page) == numa_node_id())) + return true; + + put_page(page); + goto umap_page; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + (int)(rx_info->page_offset + + (xdp->data - xdp->data_hard_start)), + (int)pkt_len, rxq->buf_len); + +#ifdef HAVE_PAGE_POOL_SUPPORT + if (rx_info->page_pool) { + skb_mark_for_recycle(skb); + return false; + } +#endif + if (unlikely(page_to_nid(page) != numa_node_id())) + goto umap_page; + if (unlikely(page_count(page) != 1)) + goto umap_page; + + rx_info->page_offset ^= rxq->buf_len; + get_page(page); + + return true; + +umap_page: + dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, + rxq->dma_rx_buff_size, DMA_FROM_DEVICE); + return false; +} + +static struct sk_buff *hinic3_fetch_rx_buffer_xdp(struct hinic3_rxq *rxq, + u32 pkt_len, + struct xdp_buff *xdp) +{ + struct sk_buff *skb; + struct hinic3_rx_info *rx_info; + u32 sw_ci; + bool reuse; + + sw_ci = rxq->cons_idx & rxq->q_mask; + rx_info = &rxq->rx_info[sw_ci]; + + skb = netdev_alloc_skb_ip_align(rxq->netdev, HINIC3_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + reuse = hinic3_add_rx_frag_with_xdp(rxq, pkt_len, rx_info, skb, xdp); + if (likely(reuse)) + hinic3_reuse_rx_page(rxq, rx_info); + + rx_info->buf_dma_addr = 0; + rx_info->page = NULL; + + rxq->cons_idx += 1; + rxq->delta += 1; + + return skb; +} + #endif static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe, u32 pkt_len, u32 vlan_len, u32 status) { - struct sk_buff *skb; + struct sk_buff *skb = NULL; struct net_device *netdev = rxq->netdev; u32 offload_type; u16 num_lro; @@ -812,13 +937,25 @@ static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe, #ifdef HAVE_XDP_SUPPORT u32 xdp_status; + struct xdp_buff xdp = { 0 }; - xdp_status = hinic3_run_xdp(rxq, pkt_len); + xdp_status = (u32)(hinic3_run_xdp(rxq, pkt_len, &xdp)); if (xdp_status == HINIC3_XDP_PKT_DROP) return 0; -#endif + // build skb + if (xdp_status != HINIC3_XDP_PROG_EMPTY) { + // xdp_prog configured, build skb with xdp + skb = hinic3_fetch_rx_buffer_xdp(rxq, pkt_len, &xdp); + } else { + // xdp_prog not configured, build skb + skb = hinic3_fetch_rx_buffer(rxq, pkt_len); + } +#else + + // xdp is not supported skb = hinic3_fetch_rx_buffer(rxq, pkt_len); +#endif if (unlikely(!skb)) { RXQ_STATS_INC(rxq, alloc_skb_err); return -ENOMEM; @@ -852,7 +989,7 @@ static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe, hinic3_copy_lp_data(nic_dev, skb); num_lro = HINIC3_GET_RX_NUM_LRO(status); - if (num_lro) + if (num_lro > 1) hinic3_lro_set_gso_params(skb, num_lro); skb_record_rx_queue(skb, rxq->q_id); @@ -931,12 +1068,40 @@ int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget) return pkts; } +#ifdef HAVE_PAGE_POOL_SUPPORT +static struct page_pool *hinic3_create_page_pool(struct hinic3_nic_dev *nic_dev, + u32 rq_depth, + struct hinic3_rx_info *rx_info_arr) +{ + struct page_pool_params pp_params = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG | + PP_FLAG_DMA_SYNC_DEV, + .order = nic_dev->page_order, + .pool_size = rq_depth * nic_dev->rx_buff_len / + (PAGE_SIZE << nic_dev->page_order), + .nid = dev_to_node(&(nic_dev->pdev->dev)), + .dev = &(nic_dev->pdev->dev), + .dma_dir = DMA_FROM_DEVICE, + .offset = 0, + .max_len = PAGE_SIZE << nic_dev->page_order, + }; + struct page_pool *page_pool; + int i; + + page_pool = nic_dev->page_pool_enabled ? + page_pool_create(&pp_params) : NULL; + for (i = 0; i < rq_depth; i++) + rx_info_arr[i].page_pool = page_pool; + return page_pool; +} +#endif + int hinic3_alloc_rxqs_res(struct hinic3_nic_dev *nic_dev, u16 num_rq, u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res) { struct hinic3_dyna_rxq_res *rqres = NULL; u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth; - int idx, i; + int idx; u32 pkts; u64 size; @@ -947,46 +1112,49 @@ int hinic3_alloc_rxqs_res(struct hinic3_nic_dev *nic_dev, u16 num_rq, if (!rqres->rx_info) { nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc rxq%d rx info\n", idx); - goto err_out; + goto err_alloc_rx_info; } - rqres->cqe_start_vaddr = dma_zalloc_coherent(&nic_dev->pdev->dev, cqe_mem_size, &rqres->cqe_start_paddr, GFP_KERNEL); if (!rqres->cqe_start_vaddr) { - kfree(rqres->rx_info); nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc rxq%d cqe\n", idx); - goto err_out; + goto err_alloc_cqe; } - +#ifdef HAVE_PAGE_POOL_SUPPORT + rqres->page_pool = hinic3_create_page_pool(nic_dev, rq_depth, + rqres->rx_info); + if (nic_dev->page_pool_enabled && !rqres->page_pool) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to create rxq%d page pool\n", idx); + goto err_create_page_pool; + } +#endif pkts = hinic3_rx_alloc_buffers(nic_dev, rq_depth, rqres->rx_info); if (!pkts) { - dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, - rqres->cqe_start_vaddr, - rqres->cqe_start_paddr); - kfree(rqres->rx_info); nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc rxq%d rx buffers\n", idx); - goto err_out; + goto err_alloc_buffers; } rqres->next_to_alloc = (u16)pkts; } return 0; -err_out: - for (i = 0; i < idx; i++) { - rqres = &rxqs_res[i]; - - hinic3_rx_free_buffers(nic_dev, rq_depth, rqres->rx_info); - dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, - rqres->cqe_start_vaddr, - rqres->cqe_start_paddr); - kfree(rqres->rx_info); - } - +err_alloc_buffers: +#ifdef HAVE_PAGE_POOL_SUPPORT + page_pool_destroy(rqres->page_pool); +err_create_page_pool: +#endif + dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, + rqres->cqe_start_vaddr, + rqres->cqe_start_paddr); +err_alloc_cqe: + kfree(rqres->rx_info); +err_alloc_rx_info: + hinic3_free_rxqs_res(nic_dev, idx, rq_depth, rxqs_res); return -ENOMEM; } @@ -1001,6 +1169,10 @@ void hinic3_free_rxqs_res(struct hinic3_nic_dev *nic_dev, u16 num_rq, rqres = &rxqs_res[idx]; hinic3_rx_free_buffers(nic_dev, rq_depth, rqres->rx_info); +#ifdef HAVE_PAGE_POOL_SUPPORT + if (rqres->page_pool) + page_pool_destroy(rqres->page_pool); +#endif dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, rqres->cqe_start_vaddr, rqres->cqe_start_paddr); @@ -1084,6 +1256,7 @@ void hinic3_free_rxqs(struct net_device *netdev) struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); kfree(nic_dev->rxqs); + nic_dev->rxqs = NULL; } int hinic3_alloc_rxqs(struct net_device *netdev) @@ -1207,6 +1380,15 @@ int rxq_restore(struct hinic3_nic_dev *nic_dev, u16 q_id, u16 hw_ci) nic_info(&nic_dev->pdev->dev, "rxq %u restore_buf_num:%u\n", q_id, rxq->restore_buf_num); rx_info = &rxq->rx_info[(hw_ci + rxq->q_depth - 1) & rxq->q_mask]; +#ifdef HAVE_PAGE_POOL_SUPPORT + if (rx_info->page_pool && rx_info->page) { + page_pool_put_full_page(rx_info->page_pool, + rx_info->page, false); + rx_info->buf_dma_addr = 0; + rx_info->page = NULL; + goto reset_rxq; + } +#endif if (rx_info->buf_dma_addr) { dma_unmap_page(&nic_dev->pdev->dev, rx_info->buf_dma_addr, nic_dev->dma_rx_buff_size, DMA_FROM_DEVICE); @@ -1217,7 +1399,9 @@ int rxq_restore(struct hinic3_nic_dev *nic_dev, u16 q_id, u16 hw_ci) __free_pages(rx_info->page, nic_dev->page_order); rx_info->page = NULL; } + goto reset_rxq; +reset_rxq: rxq->delta = 1; rxq->next_to_update = (u16)((hw_ci + rxq->q_depth - 1) & rxq->q_mask); rxq->cons_idx = (u16)((rxq->next_to_update + 1) & rxq->q_mask); @@ -1238,15 +1422,10 @@ int rxq_restore(struct hinic3_nic_dev *nic_dev, u16 q_id, u16 hw_ci) return err; } - if (!rq_pi_rd_en) { - hinic3_write_db(rxq->rq, rxq->q_id & (NIC_DCB_COS_MAX - 1), - RQ_CFLAG_DP, (u16)((u32)rxq->next_to_update << rxq->rq->wqe_type)); - } else { - /* Write all the wqes before pi update */ - wmb(); + hinic3_write_db(rxq->rq, rxq->q_id & (NIC_DCB_COS_MAX - 1), + RQ_CFLAG_DP, + (u16)((u32)rxq->next_to_update << rxq->rq->wqe_type)); - hinic3_update_rq_hw_pi(rxq->rq, rxq->next_to_update); - } return 0; } diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h index f4d6f4fdb13e7ac0d7b394827313dbfea0373279..7dd4618260ead0a15195a52cb41beea279198ecd 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h @@ -4,6 +4,9 @@ #ifndef HINIC3_RX_H #define HINIC3_RX_H +#ifdef HAVE_PAGE_POOL_SUPPORT +#include +#endif #include #include #include @@ -27,22 +30,22 @@ #define HINIC3_RX_CSUM_IPSU_OTHER_ERR BIT(8) #define HINIC3_HEADER_DATA_UNIT 2 +#define HINIC3_CQE_LEN 32 struct hinic3_rxq_stats { - u64 packets; - u64 bytes; - u64 errors; - u64 csum_errors; - u64 other_errors; - u64 dropped; - u64 xdp_dropped; - u64 rx_buf_empty; - - u64 alloc_skb_err; - u64 alloc_rx_buf_err; - u64 xdp_large_pkt; - u64 restore_drop_sge; - u64 rsvd2; + u64 packets; + u64 bytes; + u64 errors; + u64 csum_errors; + u64 other_errors; + u64 dropped; + u64 xdp_dropped; + u64 rx_buf_empty; + u64 alloc_skb_err; + u64 alloc_rx_buf_err; + u64 xdp_large_pkt; + u64 restore_drop_sge; + u64 rsvd2; #ifdef HAVE_NDO_GET_STATS64 struct u64_stats_sync syncp; #else @@ -56,6 +59,9 @@ struct hinic3_rx_info { struct hinic3_rq_cqe *cqe; dma_addr_t cqe_dma; struct page *page; +#ifdef HAVE_PAGE_POOL_SUPPORT + struct page_pool *page_pool; +#endif u32 page_offset; u32 rsvd1; struct hinic3_rq_wqe *rq_wqe; @@ -96,7 +102,7 @@ struct hinic3_rxq { u16 next_to_update; struct device *dev; /* device for DMA mapping */ - unsigned long status; + u64 status; dma_addr_t cqe_start_paddr; void *cqe_start_vaddr; @@ -124,6 +130,9 @@ struct hinic3_dyna_rxq_res { struct hinic3_rx_info *rx_info; dma_addr_t cqe_start_paddr; void *cqe_start_vaddr; +#ifdef HAVE_PAGE_POOL_SUPPORT + struct page_pool *page_pool; +#endif }; int hinic3_alloc_rxqs(struct net_device *netdev); diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_srv_nic.h b/drivers/net/ethernet/huawei/hinic3/hinic3_srv_nic.h index bdd5a8eb282ddc845e81da542def526f37a862fa..051f05d93cfce7a8bc0de60b03e81bd2763ea9af 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_srv_nic.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_srv_nic.h @@ -11,9 +11,10 @@ #ifndef HINIC3_SRV_NIC_H #define HINIC3_SRV_NIC_H -#include "hinic3_mgmt_interface.h" +#include +#include "nic_mpu_cmd_defs.h" #include "mag_mpu_cmd.h" -#include "mag_cmd.h" +#include "mag_mpu_cmd_defs.h" #include "hinic3_lld.h" enum hinic3_queue_type { @@ -64,6 +65,7 @@ enum hinic3_nic_event_type { EVENT_NIC_DCB_STATE_CHANGE, EVENT_NIC_BOND_DOWN, EVENT_NIC_BOND_UP, + EVENT_NIC_OUTBAND_CFG, }; /* * diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c index e95878250d717dd72217db4cfa315d0419a9cd5e..d3f8696f1e0dbef7db953503ed8ea1346cc521c2 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c @@ -218,6 +218,77 @@ static void get_inner_l4_info(struct sk_buff *skb, union hinic3_l4 *l4, } } +static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic3_ip *ip, + union hinic3_l4 *l4, + enum sq_l3_type *l3_type, u8 *l4_proto) +{ + unsigned char *exthdr = NULL; + + if (ip->v4->version == IP4_VERSION) { + *l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD; + *l4_proto = ip->v4->protocol; + +#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD + /* inner_transport_header is wrong in centos7.0 and suse12.1 */ + l4->hdr = ip->hdr + ((u8)ip->v4->ihl << IP_HDR_IHL_UNIT_SHIFT); +#endif + } else if (ip->v4->version == IP6_VERSION) { + *l3_type = IPV6_PKT; + exthdr = ip->hdr + sizeof(*ip->v6); + *l4_proto = ip->v6->nexthdr; + if (exthdr != l4->hdr) { + __be16 frag_off = 0; +#ifndef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD + ipv6_skip_exthdr(skb, (int)(exthdr - skb->data), + l4_proto, &frag_off); +#else + int pld_off = 0; + + pld_off = ipv6_skip_exthdr(skb, + (int)(exthdr - skb->data), + l4_proto, &frag_off); + l4->hdr = skb->data + pld_off; +#endif + } + } else { + *l3_type = UNKNOWN_L3TYPE; + *l4_proto = 0; + } +} + +static u8 hinic3_get_inner_l4_type(struct sk_buff *skb) +{ + enum sq_l3_type l3_type; + u8 l4_proto; + union hinic3_ip ip; + union hinic3_l4 l4; + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + get_inner_l3_l4_type(skb, &ip, &l4, &l3_type, &l4_proto); + return l4_proto; +} + +static void hinic3_set_unknown_tunnel_csum(struct sk_buff *skb) +{ + int csum_offset; + __sum16 skb_csum; + u8 l4_proto; + + l4_proto = hinic3_get_inner_l4_type(skb); + /* Unsupport tunnel packet, disable csum offload */ + skb_checksum_help(skb); + /* The value of csum is changed from 0xffff to 0 according to RFC1624 */ + if (skb->ip_summed == CHECKSUM_NONE && l4_proto != IPPROTO_UDP) { + csum_offset = skb_checksum_start_offset(skb) + skb->csum_offset; + skb_csum = *(__sum16 *)(skb->data + csum_offset); + if (skb_csum == 0xffff) { + *(__sum16 *)(skb->data + csum_offset) = 0; + } + } +} + static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task, struct sk_buff *skb) { @@ -251,11 +322,9 @@ static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task, l4_proto = IPPROTO_RAW; } - if (l4_proto != IPPROTO_UDP || - ((struct udphdr *)skb_transport_header(skb))->dest != VXLAN_OFFLOAD_PORT_LE) { + if (l4_proto != IPPROTO_UDP) { TXQ_STATS_INC(txq, unknown_tunnel_pkt); - /* Unsupport tunnel packet, disable csum offload */ - skb_checksum_help(skb); + hinic3_set_unknown_tunnel_csum(skb); return 0; } } @@ -265,44 +334,6 @@ static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task, return 1; } -static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic3_ip *ip, - union hinic3_l4 *l4, - enum sq_l3_type *l3_type, u8 *l4_proto) -{ - unsigned char *exthdr = NULL; - - if (ip->v4->version == IP4_VERSION) { - *l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD; - *l4_proto = ip->v4->protocol; - -#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD - /* inner_transport_header is wrong in centos7.0 and suse12.1 */ - l4->hdr = ip->hdr + ((u8)ip->v4->ihl << IP_HDR_IHL_UNIT_SHIFT); -#endif - } else if (ip->v4->version == IP6_VERSION) { - *l3_type = IPV6_PKT; - exthdr = ip->hdr + sizeof(*ip->v6); - *l4_proto = ip->v6->nexthdr; - if (exthdr != l4->hdr) { - __be16 frag_off = 0; -#ifndef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD - ipv6_skip_exthdr(skb, (int)(exthdr - skb->data), - l4_proto, &frag_off); -#else - int pld_off = 0; - - pld_off = ipv6_skip_exthdr(skb, - (int)(exthdr - skb->data), - l4_proto, &frag_off); - l4->hdr = skb->data + pld_off; -#endif - } - } else { - *l3_type = UNKNOWN_L3TYPE; - *l4_proto = 0; - } -} - static void hinic3_set_tso_info(struct hinic3_sq_task *task, u32 *queue_info, enum sq_l4offload_type l4_offload, u32 offset, u32 mss) @@ -463,7 +494,8 @@ static void get_pkt_stats(struct hinic3_tx_info *tx_info, struct sk_buff *skb) #endif } else { #endif - ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + ihs = (u32)(skb_transport_offset(skb)) + + tcp_hdrlen(skb); #if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && \ defined(HAVE_SK_BUFF_ENCAPSULATION)) } @@ -471,7 +503,8 @@ static void get_pkt_stats(struct hinic3_tx_info *tx_info, struct sk_buff *skb) hdr_len = (skb_shinfo(skb)->gso_segs - 1) * ihs; tx_info->num_bytes = skb->len + (u64)hdr_len; } else { - tx_info->num_bytes = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; + tx_info->num_bytes = (skb->len > ETH_ZLEN) ? + skb->len : ETH_ZLEN; } tx_info->num_pkts = 1; @@ -626,6 +659,14 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb, return NETDEV_TX_BUSY; } + /* l2nic outband vlan cfg enable */ + if ((!skb_vlan_tag_present(skb)) && + (nic_dev->nic_cap.outband_vlan_cfg_en == 1) && + nic_dev->outband_cfg.outband_default_vid != 0) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + (u16)nic_dev->outband_cfg.outband_default_vid); + } + offload = hinic3_tx_offload(skb, &task, &queue_info, txq); if (unlikely(offload == TX_OFFLOAD_INVALID)) { TXQ_STATS_INC(txq, offload_cow_skb_err); @@ -834,6 +875,7 @@ int hinic3_alloc_txqs_res(struct hinic3_nic_dev *nic_dev, u16 num_sq, tqres->bds = kzalloc(size, GFP_KERNEL); if (!tqres->bds) { kfree(tqres->tx_info); + tqres->tx_info = NULL; nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc txq%d bds info\n", idx); goto err_out; @@ -847,7 +889,9 @@ int hinic3_alloc_txqs_res(struct hinic3_nic_dev *nic_dev, u16 num_sq, tqres = &txqs_res[i]; kfree(tqres->bds); + tqres->bds = NULL; kfree(tqres->tx_info); + tqres->tx_info = NULL; } return -ENOMEM; @@ -864,7 +908,9 @@ void hinic3_free_txqs_res(struct hinic3_nic_dev *nic_dev, u16 num_sq, free_all_tx_skbs(nic_dev, sq_depth, tqres->tx_info); kfree(tqres->bds); + tqres->bds = NULL; kfree(tqres->tx_info); + tqres->tx_info = NULL; } } @@ -938,6 +984,7 @@ void hinic3_free_txqs(struct net_device *netdev) struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); kfree(nic_dev->txqs); + nic_dev->txqs = NULL; } static bool is_hw_complete_sq_process(struct hinic3_io_queue *sq) @@ -954,7 +1001,7 @@ static bool is_hw_complete_sq_process(struct hinic3_io_queue *sq) static int hinic3_stop_sq(struct hinic3_txq *txq) { struct hinic3_nic_dev *nic_dev = netdev_priv(txq->netdev); - unsigned long timeout; + u64 timeout; int err; timeout = msecs_to_jiffies(HINIC3_FLUSH_QUEUE_TIMEOUT) + jiffies; @@ -963,7 +1010,7 @@ static int hinic3_stop_sq(struct hinic3_txq *txq) return 0; usleep_range(900, 1000); /* sleep 900 us ~ 1000 us */ - } while (time_before(jiffies, timeout)); + } while (time_before(jiffies, (unsigned long)timeout)); /* force hardware to drop packets */ timeout = msecs_to_jiffies(HINIC3_FLUSH_QUEUE_TIMEOUT) + jiffies; @@ -976,7 +1023,7 @@ static int hinic3_stop_sq(struct hinic3_txq *txq) break; usleep_range(9900, 10000); /* sleep 9900 us ~ 10000 us */ - } while (time_before(jiffies, timeout)); + } while (time_before(jiffies, (unsigned long)timeout)); /* Avoid msleep takes too long and get a fake result */ if (is_hw_complete_sq_process(txq->sq)) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h index 1b9e509109b8bca3f4fb941aa95fbbb715517c41..7ae029b0bfb0ed8e997ed3abec9d578d54e316e5 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h @@ -29,7 +29,7 @@ struct hinic3_wq { #define WQ_MASK_IDX(wq, idx) ((idx) & (wq)->idx_mask) #define WQ_MASK_PAGE(wq, pg_idx) \ - ((pg_idx) < (wq)->num_wq_pages ? (pg_idx) : 0) + (((pg_idx) < ((wq)->num_wq_pages)) ? (pg_idx) : 0) #define WQ_PAGE_IDX(wq, idx) ((idx) >> (wq)->wqebbs_per_page_shift) #define WQ_OFFSET_IN_PAGE(wq, idx) ((idx) & (wq)->wqebbs_per_page_mask) #define WQ_GET_WQEBB_ADDR(wq, pg_idx, idx_in_pg) \ @@ -75,7 +75,7 @@ static inline void *hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq, pg_idx = WQ_PAGE_IDX(wq, *prod_idx); off_in_page = WQ_OFFSET_IN_PAGE(wq, *prod_idx); - if (off_in_page + num_wqebbs > wq->wqebbs_per_page) { + if ((off_in_page + num_wqebbs) > wq->wqebbs_per_page) { /* wqe across wq page boundary */ *second_part_wqebbs_addr = WQ_GET_WQEBB_ADDR(wq, WQ_MASK_PAGE(wq, pg_idx + 1), 0); diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_api_cmd.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_api_cmd.c index b742f8a8d9fe0de441c22a70ac218d9518cc0ab6..0419fc22e56d38881565c04e4e0211862cb2a922 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_api_cmd.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_api_cmd.c @@ -400,12 +400,16 @@ static int wait_for_status_poll(struct hinic3_api_cmd_chain *chain) API_CMD_STATUS_TIMEOUT, 100); /* wait 100 us once */ } -static void copy_resp_data(struct hinic3_api_cmd_cell_ctxt *ctxt, void *ack, - u16 ack_size) +static void copy_resp_data(struct hinic3_api_cmd_chain *chain, + struct hinic3_api_cmd_cell_ctxt *ctxt, + void *ack, u16 ack_size) { struct hinic3_api_cmd_resp_fmt *resp = ctxt->resp; + int rsp_size_align = chain->rsp_size_align - 0x8; + int rsp_size = (ack_size > rsp_size_align) ? rsp_size_align : ack_size; + + memcpy(ack, &resp->resp_data, rsp_size); - memcpy(ack, &resp->resp_data, ack_size); ctxt->status = 0; } @@ -464,7 +468,7 @@ static int wait_for_api_cmd_completion(struct hinic3_api_cmd_chain *chain, case HINIC3_API_CMD_POLL_READ: err = wait_for_resp_polling(ctxt); if (err == 0) - copy_resp_data(ctxt, ack, ack_size); + copy_resp_data(chain, ctxt, ack, ack_size); else sdk_err(dev, "API CMD poll response timeout\n"); break; @@ -1055,13 +1059,11 @@ static int api_chain_init(struct hinic3_api_cmd_chain *chain, alloc_wb_status_err: kfree(chain->cell_ctxt); -/*lint -save -e548*/ alloc_cell_ctxt_err: if (chain->chain_type == HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) spin_lock_deinit(&chain->async_lock); else sema_deinit(&chain->sem); -/*lint -restore*/ return err; } @@ -1078,6 +1080,7 @@ static void api_chain_free(struct hinic3_api_cmd_chain *chain) dma_free_coherent(dev, sizeof(*chain->wb_status), chain->wb_status, chain->wb_status_paddr); kfree(chain->cell_ctxt); + chain->cell_ctxt = NULL; if (chain->chain_type == HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) spin_lock_deinit(&chain->async_lock); diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c index 0878186ee1ff5073793901bad5fc4f5e6b1fa1c5..ceb763682223e5758dbc998550e51348a79d89d5 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c @@ -657,7 +657,7 @@ static int cmdq_sync_cmd_direct_resp(struct hinic3_cmdq *cmdq, u8 mod, cmd_info->channel = channel; cmdq_set_cmd_buf(cmd_info, cmdq->hwdev, buf_in, NULL); - memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); + memcpy(&saved_cmd_info, cmd_info, sizeof(struct hinic3_cmdq_cmd_info)); cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL, wrapped, mod, cmd, curr_prod_idx); @@ -745,7 +745,7 @@ static int cmdq_sync_cmd_detail_resp(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd, cmd_info->channel = channel; cmdq_set_cmd_buf(cmd_info, cmdq->hwdev, buf_in, buf_out); - memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); + memcpy(&saved_cmd_info, cmd_info, sizeof(struct hinic3_cmdq_cmd_info)); cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_SGE_RESP, buf_in, buf_out, wrapped, mod, cmd, curr_prod_idx); @@ -994,9 +994,11 @@ int hinic3_cmdq_async(void *hwdev, u8 mod, u8 cmd, struct hinic3_cmd_buf *buf_in return cmdq_async_cmd(&cmdqs->cmdq[HINIC3_CMDQ_SYNC], mod, cmd, buf_in, channel); } +EXPORT_SYMBOL(hinic3_cmdq_async); int hinic3_cmdq_async_cos(void *hwdev, u8 mod, u8 cmd, - u8 cos_id, struct hinic3_cmd_buf *buf_in, u16 channel) + u8 cos_id, struct hinic3_cmd_buf *buf_in, + u16 channel) { struct hinic3_cmdqs *cmdqs = NULL; int err; @@ -1136,7 +1138,7 @@ void hinic3_cmdq_ceq_handler(void *handle, u32 ceqe_data) break; case HINIC3_CMD_TYPE_SET_ARM: /* arm_bit was set until here */ - if (cmdq_arm_ceq_handler(cmdq, wqe, ci)) + if (cmdq_arm_ceq_handler(cmdq, wqe, ci) != 0) return; break; default: @@ -1221,6 +1223,7 @@ static int init_cmdq(struct hinic3_cmdq *cmdq, struct hinic3_hwdev *hwdev, static void free_cmdq(struct hinic3_cmdq *cmdq) { kfree(cmdq->cmd_infos); + cmdq->cmd_infos = NULL; spin_lock_deinit(&cmdq->cmdq_lock); } @@ -1425,7 +1428,7 @@ static int create_cmdq_wq(struct hinic3_cmdqs *cmdqs) type = HINIC3_CMDQ_SYNC; for (; type < cmdqs->cmdq_num; type++) memcpy((u8 *)cmdqs->wq_block_vaddr + - CMDQ_WQ_CLA_SIZE * type, + ((u64)type * CMDQ_WQ_CLA_SIZE), cmdqs->cmdq[type].wq.wq_block_vaddr, cmdqs->cmdq[type].wq.num_wq_pages * sizeof(u64)); } diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h index b0344ea5a0755dd43eedcb6c7acb3cc3f57f2ac6..b174ad24b1d007b2a9e1c62c06003eb91f8f7c5b 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h @@ -14,11 +14,6 @@ #include "hinic3_common.h" #include "hinic3_hwdev.h" -struct dma_pool { - unsigned int size; - void *dev_hdl; -}; - #define HINIC3_SCMD_DATA_LEN 16 #define HINIC3_CMDQ_DEPTH 4096 diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c index 4c13a2e8ffd66872fa55c7effaf3aa68354a431d..af336f20ca12b5ecf39ccb7404e2baa3aaa7487d 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c @@ -22,11 +22,15 @@ #include "hinic3_sriov.h" #include "hinic3_nictool.h" #include "hinic3_pci_id_tbl.h" +#include "hinic3_hwdev.h" +#include "cfg_mgmt_mpu_cmd_defs.h" +#include "mpu_cmd_base_defs.h" #include "hinic3_dev_mgmt.h" #define HINIC3_WAIT_TOOL_CNT_TIMEOUT 10000 #define HINIC3_WAIT_TOOL_MIN_USLEEP_TIME 9900 #define HINIC3_WAIT_TOOL_MAX_USLEEP_TIME 10000 +#define HIGHT_BDF 8 static unsigned long card_bit_map; @@ -60,15 +64,23 @@ void lld_dev_cnt_init(struct hinic3_pcidev *pci_adapter) void lld_dev_hold(struct hinic3_lld_dev *dev) { - struct hinic3_pcidev *pci_adapter = pci_get_drvdata(dev->pdev); + struct hinic3_pcidev *pci_adapter = NULL; + + if (!dev) + return; + pci_adapter = pci_get_drvdata(dev->pdev); atomic_inc(&pci_adapter->ref_cnt); } void lld_dev_put(struct hinic3_lld_dev *dev) { - struct hinic3_pcidev *pci_adapter = pci_get_drvdata(dev->pdev); + struct hinic3_pcidev *pci_adapter = NULL; + + if (!dev) + return; + pci_adapter = pci_get_drvdata(dev->pdev); atomic_dec(&pci_adapter->ref_cnt); } @@ -236,6 +248,40 @@ void hinic3_get_all_chip_id(void *id_info) card_id->num = (u32)i; } +int hinic3_bar_mmap_param_valid(phys_addr_t phy_addr, unsigned long vmsize) +{ + struct card_node *chip_node = NULL; + struct hinic3_pcidev *dev = NULL; + u64 bar1_phy_addr = 0; + u64 bar3_phy_addr = 0; + u64 bar1_size = 0; + u64 bar3_size = 0; + + lld_hold(); + + /* get PF bar1 or bar3 physical address to verify */ + list_for_each_entry(chip_node, &g_hinic3_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (hinic3_func_type(dev->hwdev) == TYPE_VF) + continue; + + bar1_phy_addr = pci_resource_start(dev->pcidev, HINIC3_PF_PCI_CFG_REG_BAR); + bar1_size = pci_resource_len(dev->pcidev, HINIC3_PF_PCI_CFG_REG_BAR); + + bar3_phy_addr = pci_resource_start(dev->pcidev, HINIC3_PCI_MGMT_REG_BAR); + bar3_size = pci_resource_len(dev->pcidev, HINIC3_PCI_MGMT_REG_BAR); + if ((phy_addr == bar1_phy_addr && vmsize <= bar1_size) || + (phy_addr == bar3_phy_addr && vmsize <= bar3_size)) { + lld_put(); + return 0; + } + } + } + + lld_put(); + return -EINVAL; +} + void hinic3_get_card_func_info_by_card_name(const char *chip_name, struct hinic3_card_func_info *card_func) { @@ -328,7 +374,6 @@ struct hinic3_lld_dev *hinic3_get_lld_dev_by_chip_name(const char *chip_name) if (dev) lld_dev_hold(dev); lld_put(); - return dev; } @@ -422,6 +467,28 @@ struct hinic3_lld_dev *hinic3_get_lld_dev_by_chip_and_port(const char *chip_name return NULL; } +void *hinic3_get_ppf_dev(void) +{ + struct card_node *chip_node = NULL; + struct hinic3_pcidev *pci_adapter = NULL; + struct list_head *chip_list = NULL; + + lld_hold(); + chip_list = get_hinic3_chip_list(); + + list_for_each_entry(chip_node, chip_list, node) + list_for_each_entry(pci_adapter, &chip_node->func_list, node) + if (hinic3_func_type(pci_adapter->hwdev) == TYPE_PPF) { + pr_info("Get ppf_func_id:%u", hinic3_global_func_id(pci_adapter->hwdev)); + lld_put(); + return pci_adapter->lld_dev.hwdev; + } + + lld_put(); + return NULL; +} +EXPORT_SYMBOL(hinic3_get_ppf_dev); + struct hinic3_lld_dev *hinic3_get_lld_dev_by_dev_name(const char *dev_name, enum hinic3_service_type type) { @@ -518,6 +585,7 @@ EXPORT_SYMBOL(hinic3_get_ppf_lld_dev_unsafe); int hinic3_get_chip_name(struct hinic3_lld_dev *lld_dev, char *chip_name, u16 max_len) { struct hinic3_pcidev *pci_adapter = NULL; + int ret = 0; if (!lld_dev || !chip_name || !max_len) return -EINVAL; @@ -527,12 +595,18 @@ int hinic3_get_chip_name(struct hinic3_lld_dev *lld_dev, char *chip_name, u16 ma return -EFAULT; lld_hold(); - strncpy(chip_name, pci_adapter->chip_node->chip_name, max_len); + if (strscpy(chip_name, pci_adapter->chip_node->chip_name, max_len) < 0) + goto RELEASE; chip_name[max_len - 1] = '\0'; lld_put(); return 0; + +RELEASE: + lld_put(); + + return ret; } EXPORT_SYMBOL(hinic3_get_chip_name); @@ -541,6 +615,81 @@ struct hinic3_hwdev *hinic3_get_sdk_hwdev_by_lld(struct hinic3_lld_dev *lld_dev) return lld_dev->hwdev; } +void hinic3_write_oshr_info(struct os_hot_replace_info *out_oshr_info, + struct hw_pf_info *info, + struct hinic3_board_info *board_info, + struct card_node *chip_node, u32 serivce_enable, + u32 func_info_idx) +{ + out_oshr_info->func_infos[func_info_idx].pf_idx = info->glb_func_idx; + out_oshr_info->func_infos[func_info_idx].backup_pf = + (((info->glb_func_idx) / (board_info->port_num)) % HOT_REPLACE_PARTITION_NUM == 0) ? + ((info->glb_func_idx) + (board_info->port_num)) : + ((info->glb_func_idx) - (board_info->port_num)); + out_oshr_info->func_infos[func_info_idx].partition = + ((info->glb_func_idx) / (board_info->port_num)) % HOT_REPLACE_PARTITION_NUM; + out_oshr_info->func_infos[func_info_idx].port_id = info->port_id; + out_oshr_info->func_infos[func_info_idx].bdf = (info->bus_num << HIGHT_BDF) + info->glb_func_idx; + out_oshr_info->func_infos[func_info_idx].bus_num = chip_node->bus_num; + out_oshr_info->func_infos[func_info_idx].valid = serivce_enable; + memcpy(out_oshr_info->func_infos[func_info_idx].card_name, + chip_node->chip_name, IFNAMSIZ); +} + +void hinic3_get_os_hot_replace_info(void *oshr_info) +{ + struct os_hot_replace_info *out_oshr_info = (struct os_hot_replace_info *)oshr_info; + struct card_node *chip_node = NULL; + struct hinic3_pcidev *dst_dev = NULL; + struct hinic3_board_info *board_info = NULL; + struct hw_pf_info *infos = NULL; + struct hinic3_hw_pf_infos *pf_infos = NULL; + u32 func_info_idx = 0, func_id = 0, func_num, serivce_enable = 0; + struct list_head *hinic3_chip_list = get_hinic3_chip_list(); + int err; + + lld_hold(); + pf_infos = kzalloc(sizeof(struct hinic3_hw_pf_infos), GFP_KERNEL); + if (!pf_infos) { + pr_err("kzalloc pf_infos fail\n"); + lld_put(); + return; + } + list_for_each_entry(chip_node, hinic3_chip_list, node) { + list_for_each_entry(dst_dev, &chip_node->func_list, node) { // get all pf infos in one time by one pf_id + err = hinic3_get_hw_pf_infos(dst_dev->hwdev, pf_infos, HINIC3_CHANNEL_COMM); + if (err != 0) { + pr_err("get pf info failed\n"); + break; + } + + serivce_enable = 0; + infos = pf_infos->infos; + board_info = &((struct hinic3_hwdev *)(dst_dev->hwdev))->board_info; + if (((struct hinic3_hwdev *)(dst_dev->hwdev))->hot_replace_mode == HOT_REPLACE_ENABLE) { + serivce_enable = 1; + } + break; + } + + func_num = pf_infos->num_pfs; + if (func_num <= 0) { + pr_err("get pf num failed\n"); + break; + } + + for (func_id = 0; func_id < func_num; func_id++) { + hinic3_write_oshr_info(out_oshr_info, &infos[func_id], + board_info, chip_node, + serivce_enable, func_info_idx); + func_info_idx++; + } + } + out_oshr_info->func_cnt = func_info_idx; + kfree(pf_infos); + lld_put(); +} + struct card_node *hinic3_get_chip_node_by_lld(struct hinic3_lld_dev *lld_dev) { struct hinic3_pcidev *pci_adapter = pci_get_drvdata(lld_dev->pdev); @@ -620,9 +769,13 @@ void hinic3_get_card_info(const void *hwdev, void *bufin) } if (hinic3_func_for_mgmt(fun_hwdev)) - strlcpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ); + strscpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ); + + if (dev->lld_dev.pdev->subsystem_device == BIFUR_RESOURCE_PF_SSID) { + strscpy(info->pf[i].name, "bifur", IFNAMSIZ); + } - strlcpy(info->pf[i].bus_info, pci_name(dev->pcidev), + strscpy(info->pf[i].bus_info, pci_name(dev->pcidev), sizeof(info->pf[i].bus_info)); info->pf_num++; i = info->pf_num; @@ -719,6 +872,7 @@ int alloc_chip_node(struct hinic3_pcidev *pci_adapter) struct card_node *chip_node = NULL; unsigned char i; unsigned char bus_number = 0; + int err; if (chip_node_is_exist(pci_adapter, &bus_number)) return 0; @@ -750,6 +904,13 @@ int alloc_chip_node(struct hinic3_pcidev *pci_adapter) return -EINVAL; } + err = sscanf(chip_node->chip_name, HINIC3_CHIP_NAME "%d", &(chip_node->chip_id)); + if (err <= 0) { + clear_bit(i, &card_bit_map); + kfree(chip_node); + return -EINVAL; + } + sdk_info(&pci_adapter->pcidev->dev, "Add new chip %s to global list succeed\n", chip_node->chip_name); @@ -801,3 +962,36 @@ int hinic3_get_pf_id(struct card_node *chip_node, u32 port_id, u32 *pf_id, u32 * return 0; } + +void hinic3_get_mbox_cnt(const void *hwdev, void *bufin) +{ + struct card_node *chip_node = NULL; + struct card_mbox_cnt_info *info = (struct card_mbox_cnt_info *)bufin; + struct hinic3_pcidev *dev = NULL; + struct hinic3_hwdev *func_hwdev = NULL; + u32 i = 0; + + info->func_num = 0; + chip_node = hinic3_get_chip_node_by_hwdev(hwdev); + if (chip_node == NULL) + return; + + lld_hold(); + + list_for_each_entry(dev, &chip_node->func_list, node) { + func_hwdev = (struct hinic3_hwdev *)dev->hwdev; + strscpy(info->func_info[i].bus_info, pci_name(dev->pcidev), + sizeof(info->func_info[i].bus_info)); + + info->func_info[i].send_cnt = func_hwdev->mbox_send_cnt; + info->func_info[i].ack_cnt = func_hwdev->mbox_ack_cnt; + info->func_num++; + i = info->func_num; + if (i >= ARRAY_SIZE(info->func_info)) { + sdk_err(&dev->pcidev->dev, "chip_node->func_list bigger than pf_max + vf_max\n"); + break; + } + } + + lld_put(); +} \ No newline at end of file diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h index a327c4a4e4acffc9047d393ec654972879f00654..bfa8f3edfce19c0d50f6e0f42002e8346c008a05 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h @@ -19,8 +19,10 @@ #define PRINT_ULD_DETACH_TIMEOUT_INTERVAL 1000 /* 1 second */ #define ULD_LOCK_MIN_USLEEP_TIME 900 #define ULD_LOCK_MAX_USLEEP_TIME 1000 +#define BIFUR_RESOURCE_PF_SSID 0x05a1 -#define HINIC3_IS_VF_DEV(pdev) ((pdev)->device == HINIC3_DEV_ID_VF) +#define HINIC3_IS_VF_DEV(pdev) ((pdev)->device == HINIC3_DEV_ID_VF || \ + (pdev)->device == HINIC3_DEV_SDI_5_1_ID_VF) #define HINIC3_IS_SPU_DEV(pdev) \ (((pdev)->device == HINIC3_DEV_ID_SPU) || ((pdev)->device == HINIC3_DEV_ID_SDI_5_0_PF) || \ (((pdev)->device == HINIC3_DEV_ID_DPU_PF))) @@ -79,6 +81,13 @@ struct hinic3_pcidev { u16 probe_fault_level; u16 rsvd2; u64 rsvd4; + + struct workqueue_struct *multi_host_mgmt_workq; + struct work_struct slave_nic_work; + struct work_struct slave_vroce_work; + + struct workqueue_struct *migration_probe_workq; + struct delayed_work migration_probe_dwork; }; struct hinic_chip_info { @@ -104,4 +113,6 @@ void wait_lld_dev_unused(struct hinic3_pcidev *pci_adapter); void *hinic3_get_hwdev_by_pcidev(struct pci_dev *pdev); +int hinic3_bar_mmap_param_valid(phys_addr_t phy_addr, unsigned long vmsize); + #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.c index 1949ab879cbc220358c152a753d4237e22f2f812..8f9d00a76af775b4f5a72283bfcbb9b12f362e0f 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.c @@ -101,7 +101,8 @@ static bool check_image_device_type(struct hinic3_hwdev *hwdev, u32 device_type) static void encapsulate_update_cmd(struct hinic3_cmd_update_firmware *msg, struct firmware_section *section_info, - int *remain_len, u32 *send_len, u32 *send_pos) + const int *remain_len, u32 *send_len, + u32 *send_pos) { memset(msg->data, 0, sizeof(msg->data)); msg->ctl_info.sf = (*remain_len == section_info->section_len) ? true : false; @@ -380,6 +381,7 @@ int hinic3_init_devlink(struct hinic3_hwdev *hwdev) struct pci_dev *pdev = NULL; int err; + pdev = hwdev->hwif->pdev; devlink = devlink_alloc(&hinic3_devlink_ops, sizeof(struct hinic3_devlink)); if (!devlink) { sdk_err(hwdev->dev_hdl, "Failed to alloc devlink\n"); @@ -391,7 +393,6 @@ int hinic3_init_devlink(struct hinic3_hwdev *hwdev) hwdev->devlink_dev->activate_fw = FW_CFG_DEFAULT_INDEX; hwdev->devlink_dev->switch_cfg = FW_CFG_DEFAULT_INDEX; - pdev = hwdev->hwif->pdev; err = devlink_register(devlink, &pdev->dev); if (err) { sdk_err(hwdev->dev_hdl, "Failed to register devlink\n"); diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c index 4b08aa0cd7795a95efd6a440dda5d4ee2174106c..caa99e3d401fa2005786cc844ba0c6477a981741 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c @@ -26,6 +26,8 @@ #include "hinic3_prof_adap.h" #include "hinic3_eqs.h" +#include "vram_common.h" + #define HINIC3_EQS_WQ_NAME "hinic3_eqs" #define AEQ_CTRL_0_INTR_IDX_SHIFT 0 @@ -66,7 +68,6 @@ #define HINIC3_TASK_PROCESS_EQE_LIMIT 1024 #define HINIC3_EQ_UPDATE_CI_STEP 64 -/*lint -e806*/ static uint g_aeq_len = HINIC3_DEFAULT_AEQ_LEN; module_param(g_aeq_len, uint, 0444); MODULE_PARM_DESC(g_aeq_len, @@ -83,7 +84,6 @@ static uint g_num_ceqe_in_tasklet = HINIC3_TASK_PROCESS_EQE_LIMIT; module_param(g_num_ceqe_in_tasklet, uint, 0444); MODULE_PARM_DESC(g_num_ceqe_in_tasklet, "The max number of ceqe can be processed in tasklet, default = 1024"); -/*lint +e806*/ #define CEQ_CTRL_0_INTR_IDX_SHIFT 0 #define CEQ_CTRL_0_DMA_ATTR_SHIFT 12 @@ -801,6 +801,7 @@ static int alloc_eq_pages(struct hinic3_eq *eq) u32 reg, init_val; u16 pg_idx, i; int err; + gfp_t gfp_vram; eq->eq_pages = kcalloc(eq->num_pages, sizeof(*eq->eq_pages), GFP_KERNEL); @@ -809,12 +810,15 @@ static int alloc_eq_pages(struct hinic3_eq *eq) return -ENOMEM; } + gfp_vram = hi_vram_get_gfp_vram(); + for (pg_idx = 0; pg_idx < eq->num_pages; pg_idx++) { eq_page = &eq->eq_pages[pg_idx]; err = hinic3_dma_zalloc_coherent_align(eq->hwdev->dev_hdl, eq->page_size, HINIC3_MIN_EQ_PAGE_SIZE, - GFP_KERNEL, eq_page); + GFP_KERNEL | gfp_vram, + eq_page); if (err) { sdk_err(eq->hwdev->dev_hdl, "Failed to alloc eq page, page index: %hu\n", pg_idx); @@ -865,6 +869,7 @@ static void free_eq_pages(struct hinic3_eq *eq) &eq->eq_pages[pg_idx]); kfree(eq->eq_pages); + eq->eq_pages = NULL; } static inline u32 get_page_size(const struct hinic3_eq *eq) @@ -1104,7 +1109,8 @@ int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs, hwdev->aeqs = aeqs; aeqs->hwdev = hwdev; aeqs->num_aeqs = num_aeqs; - aeqs->workq = alloc_workqueue(HINIC3_EQS_WQ_NAME, WQ_MEM_RECLAIM, + aeqs->workq = alloc_workqueue(HINIC3_EQS_WQ_NAME, + WQ_MEM_RECLAIM | WQ_HIGHPRI, HINIC3_MAX_AEQS); if (!aeqs->workq) { sdk_err(hwdev->dev_hdl, "Failed to initialize aeq workqueue\n"); diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_api.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_api.c index a4cbac8e4cc1c581976381e31dba045cb14a6d96..6b96b870a110c6209bceb63b6b16d4b8ac02d0ed 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_api.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_api.c @@ -82,6 +82,48 @@ int hinic3_sm_ctr_rd16(void *hwdev, u8 node, u8 instance, u32 ctr_id, return 0; } +/** + * hinic3_sm_ctr_rd16_clear - small single 16 counter read and clear to zero + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + **/ +int hinic3_sm_ctr_rd16_clear(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u16 *value) +{ + struct chipif_sml_ctr_rd_req req; + union ctr_rd_rsp rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + memset(&req, 0, sizeof(req)); + + hinic3_sml_ctr_read_build_req(&req, instance, + CHIPIF_SM_CTR_OP_READ_CLEAR, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic3_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Sm 16bit counter clear fail, err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, sizeof(rsp) / sizeof(u32)); + *value = rsp.bs_ss16_rsp.value1; + + return 0; +} + /** * hinic3_sm_ctr_rd32 - small single 32 counter read * @hwdev: the hardware device diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c index cea7581cff8dde7d251cdaffc2395d0ab965c455..2d4a9f6ef5db621ad25f2724478a3f9c51ae6f4f 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c @@ -34,13 +34,16 @@ static void parse_pub_res_cap_dfx(struct hinic3_hwdev *hwdev, cap->max_vf); sdk_info(hwdev->dev_hdl, "Host_pf_num: 0x%x, pf_id_start: 0x%x, host_vf_num: 0x%x, vf_id_start: 0x%x\n", cap->pf_num, cap->pf_id_start, cap->vf_num, cap->vf_id_start); - sdk_info(hwdev->dev_hdl, "host_valid_bitmap: 0x%x, master_host_id: 0x%x, srv_multi_host_mode: 0x%x\n", - cap->host_valid_bitmap, cap->master_host_id, cap->srv_multi_host_mode); sdk_info(hwdev->dev_hdl, - "fake_vf_start_id: 0x%x, fake_vf_num: 0x%x, fake_vf_max_pctx: 0x%x\n", - cap->fake_vf_start_id, cap->fake_vf_num, cap->fake_vf_max_pctx); - sdk_info(hwdev->dev_hdl, "fake_vf_bfilter_start_addr: 0x%x, fake_vf_bfilter_len: 0x%x\n", - cap->fake_vf_bfilter_start_addr, cap->fake_vf_bfilter_len); + "host_valid_bitmap: 0x%x, master_host_id: 0x%x, srv_multi_host_mode: 0x%x, hot_plug_disable: 0x%x\n", + cap->host_valid_bitmap, cap->master_host_id, cap->srv_multi_host_mode, + cap->hot_plug_disable); + sdk_info(hwdev->dev_hdl, + "os_hot_replace: 0x%x, fake_vf_start_id: 0x%x, fake_vf_num: 0x%x, fake_vf_max_pctx: 0x%x\n", + cap->os_hot_replace, cap->fake_vf_start_id, cap->fake_vf_num, cap->fake_vf_max_pctx); + sdk_info(hwdev->dev_hdl, + "fake_vf_bfilter_start_addr: 0x%x, fake_vf_bfilter_len: 0x%x, bond_create_mode: 0x%x\n", + cap->fake_vf_bfilter_start_addr, cap->fake_vf_bfilter_len, cap->bond_create_mode); } static void parse_cqm_res_cap(struct hinic3_hwdev *hwdev, struct service_cap *cap, @@ -109,6 +112,9 @@ static void parse_pub_res_cap(struct hinic3_hwdev *hwdev, cap->host_valid_bitmap = dev_cap->host_valid_bitmap; cap->master_host_id = dev_cap->master_host_id; cap->srv_multi_host_mode = dev_cap->srv_multi_host_mode; + cap->hot_plug_disable = dev_cap->hot_plug_disable; + cap->bond_create_mode = dev_cap->bond_create_mode; + cap->os_hot_replace = dev_cap->os_hot_replace; cap->fake_vf_en = dev_cap->fake_vf_en; cap->fake_vf_start_bit = dev_cap->fake_vf_start_bit; cap->fake_vf_end_bit = dev_cap->fake_vf_end_bit; @@ -148,18 +154,27 @@ static void parse_l2nic_res_cap(struct hinic3_hwdev *hwdev, nic_cap->max_sqs = dev_cap->nic_max_sq_id + 1; nic_cap->max_rqs = dev_cap->nic_max_rq_id + 1; nic_cap->default_num_queues = dev_cap->nic_default_num_queues; + nic_cap->outband_vlan_cfg_en = dev_cap->outband_vlan_cfg_en; + nic_cap->lro_enable = dev_cap->lro_enable; sdk_info(hwdev->dev_hdl, "L2nic resource capbility, max_sqs: 0x%x, max_rqs: 0x%x\n", nic_cap->max_sqs, nic_cap->max_rqs); /* Check parameters from firmware */ - if (nic_cap->max_sqs > HINIC3_CFG_MAX_QP || - nic_cap->max_rqs > HINIC3_CFG_MAX_QP) { - sdk_info(hwdev->dev_hdl, "Number of qp exceed limit[1-%d]: sq: %u, rq: %u\n", - HINIC3_CFG_MAX_QP, nic_cap->max_sqs, nic_cap->max_rqs); + if (nic_cap->max_sqs > HINIC3_CFG_MAX_QP) { + sdk_info(hwdev->dev_hdl, "Number of sq exceed limit[1-%d]: sq: %u\n", + HINIC3_CFG_MAX_QP, nic_cap->max_sqs); nic_cap->max_sqs = HINIC3_CFG_MAX_QP; + } + + if (nic_cap->max_rqs > HINIC3_CFG_MAX_QP) { + sdk_info(hwdev->dev_hdl, "Number of rq exceed limit[1-%d]: rq: %u\n", + HINIC3_CFG_MAX_QP, nic_cap->max_rqs); nic_cap->max_rqs = HINIC3_CFG_MAX_QP; } + + if (nic_cap->outband_vlan_cfg_en) + sdk_info(hwdev->dev_hdl, "L2nic outband vlan cfg enabled\n"); } static void parse_fc_res_cap(struct hinic3_hwdev *hwdev, @@ -333,6 +348,28 @@ static void parse_ipsec_res_cap(struct hinic3_hwdev *hwdev, dev_cap->ipsec_max_sactx, dev_cap->ipsec_max_cq); } +static void parse_vbs_res_cap(struct hinic3_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct vbs_service_cap *vbs_cap = &cap->vbs_cap; + + vbs_cap->vbs_max_volq = dev_cap->vbs_max_volq; + vbs_cap->vbs_main_pf_enable = dev_cap->vbs_main_pf_enable; + vbs_cap->vbs_vsock_pf_enable = dev_cap->vbs_vsock_pf_enable; + vbs_cap->vbs_fushion_queue_pf_enable = dev_cap->vbs_fushion_queue_pf_enable; + + sdk_info(hwdev->dev_hdl, + "Get VBS resource capbility, vbs_max_volq: 0x%x\n", + dev_cap->vbs_max_volq); + sdk_info(hwdev->dev_hdl, + "Get VBS pf info, vbs_main_pf_enable: 0x%x, vbs_vsock_pf_enable: 0x%x, vbs_fushion_queue_pf_enable: 0x%x\n", + dev_cap->vbs_main_pf_enable, + dev_cap->vbs_vsock_pf_enable, + dev_cap->vbs_fushion_queue_pf_enable); +} + static void parse_dev_cap(struct hinic3_hwdev *dev, struct cfg_cmd_dev_cap *dev_cap, enum func_type type) { @@ -375,6 +412,9 @@ static void parse_dev_cap(struct hinic3_hwdev *dev, if (IS_PPA_TYPE(dev)) parse_ppa_res_cap(dev, cap, dev_cap, type); + + if (IS_VBS_TYPE(dev)) + parse_vbs_res_cap(dev, cap, dev_cap, type); } static int get_cap_from_fw(struct hinic3_hwdev *dev, enum func_type type) @@ -404,6 +444,23 @@ static int get_cap_from_fw(struct hinic3_hwdev *dev, enum func_type type) return 0; } +u8 hinic3_get_bond_create_mode(void *dev) +{ + struct hinic3_hwdev *hwdev = NULL; + struct service_cap *cap = NULL; + + if (!dev) { + pr_err("pointer dev is NULL\n"); + return -EINVAL; + } + + hwdev = (struct hinic3_hwdev *)dev; + cap = &hwdev->cfg_mgmt->svc_cap; + + return cap->bond_create_mode; +} +EXPORT_SYMBOL(hinic3_get_bond_create_mode); + int hinic3_get_dev_cap(void *dev) { enum func_type type; @@ -1173,6 +1230,20 @@ bool hinic3_support_ppa(void *hwdev, struct ppa_service_cap *cap) } EXPORT_SYMBOL(hinic3_support_ppa); +bool hinic3_support_bifur(void *hwdev, struct bifur_service_cap *cap) +{ + struct hinic3_hwdev *dev = (struct hinic3_hwdev *)hwdev; + + if (!hwdev) + return false; + + if (!IS_BIFUR_TYPE(dev)) + return false; + + return true; +} +EXPORT_SYMBOL(hinic3_support_bifur); + bool hinic3_support_migr(void *hwdev, struct migr_service_cap *cap) { struct hinic3_hwdev *dev = hwdev; diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h index db6e3cab67edf3bfd5f0026553b2ddaaf9b98138..7157e972de6a9a0e694e16ab0f33490cb528309d 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h @@ -166,6 +166,11 @@ struct service_cap { u8 srv_multi_host_mode; u16 virtio_vq_size; + u8 hot_plug_disable; + u8 bond_create_mode; + u8 os_hot_replace; + u8 rsvd1; + u8 timer_pf_num; u8 timer_pf_id_start; u16 timer_vf_num; @@ -319,9 +324,11 @@ struct cfg_mgmt_info { #define IS_RDMA_ENABLE(dev) \ ((dev)->cfg_mgmt->svc_cap.sf_svc_attr.rdma_en) #define IS_PPA_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_PPA) + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_PPA) #define IS_MIGR_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_MIGRATE) + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_MIGRATE) +#define IS_BIFUR_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_BIFUR) int init_cfg_mgmt(struct hinic3_hwdev *dev); @@ -333,5 +340,7 @@ void free_capability(struct hinic3_hwdev *dev); int hinic3_init_vf_dev_cap(void *hwdev); +u8 hinic3_get_bond_create_mode(void *dev); + #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c index d8a1a28ba4923f89a4a737a6bc7173015f6c32a4..47264f9475afba9f7576d3d585e6334a2d46fae4 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c @@ -21,7 +21,9 @@ #include "hinic3_hw_cfg.h" #include "hinic3_cmdq.h" #include "mpu_inband_cmd_defs.h" +#include "mpu_board_defs.h" #include "hinic3_hw_comm.h" +#include "vram_common.h" #define HINIC3_MSIX_CNT_LLI_TIMER_SHIFT 0 #define HINIC3_MSIX_CNT_LLI_CREDIT_SHIFT 8 @@ -242,12 +244,19 @@ int hinic3_func_reset(void *dev, u16 func_id, u64 reset_flag, u16 channel) struct hinic3_hwdev *hwdev = dev; u16 out_size = sizeof(func_reset); int err = 0; + int is_in_kexec; if (!dev) { pr_err("Invalid para: dev is null.\n"); return -EINVAL; } + is_in_kexec = vram_get_kexec_flag(); + if (is_in_kexec != 0) { + sdk_info(hwdev->dev_hdl, "Skip function reset!\n"); + return 0; + } + sdk_info(hwdev->dev_hdl, "Function is reset, flag: 0x%llx, channel:0x%x\n", reset_flag, channel); @@ -523,7 +532,7 @@ EXPORT_SYMBOL(hinic3_set_ppf_flr_type); int hinic3_set_ppf_tbl_hotreplace_flag(void *hwdev, u8 flag) { - struct comm_cmd_ppf_tbl_htrp_config htr_info = {0}; + struct comm_cmd_ppf_tbl_htrp_config htr_info = {}; u16 out_size = sizeof(struct comm_cmd_ppf_tbl_htrp_config); struct hinic3_hwdev *dev = hwdev; int ret; @@ -868,11 +877,19 @@ static int set_ppf_tmr_status(struct hinic3_hwdev *hwdev, int hinic3_ppf_tmr_start(void *hwdev) { + int is_in_kexec; + if (!hwdev) { pr_err("Hwdev pointer is NULL for starting ppf timer\n"); return -EINVAL; } + is_in_kexec = vram_get_kexec_flag(); + if (is_in_kexec != 0) { + pr_info("Skip starting ppt timer during kexec"); + return 0; + } + return set_ppf_tmr_status(hwdev, HINIC_PPF_TMR_FLAG_START); } EXPORT_SYMBOL(hinic3_ppf_tmr_start); @@ -888,18 +905,110 @@ int hinic3_ppf_tmr_stop(void *hwdev) } EXPORT_SYMBOL(hinic3_ppf_tmr_stop); +static int hi_vram_kalloc_align(struct hinic3_hwdev *hwdev, char *name, + u32 page_size, u32 page_num, + struct hinic3_dma_addr_align *mem_align) +{ + void *vaddr = NULL, *align_vaddr = NULL; + dma_addr_t paddr, align_paddr; + u64 real_size = page_size; + u64 align = page_size; + + vaddr = (void *)hi_vram_kalloc(name, real_size); + if (vaddr == NULL) { + sdk_err(hwdev->dev_hdl, "vram kalloc failed, name:%s.\n", name); + return -ENOMEM; + } + + paddr = (dma_addr_t)virt_to_phys(vaddr); + align_paddr = ALIGN(paddr, align); + /* align */ + if (align_paddr == paddr) { + align_vaddr = vaddr; + goto out; + } + + hi_vram_kfree((void *)vaddr, name, real_size); + + /* realloc memory for align */ + real_size = page_size + align; + vaddr = (void *)hi_vram_kalloc(name, real_size); + if (vaddr == NULL) { + sdk_err(hwdev->dev_hdl, "vram kalloc align failed, name:%s.\n", name); + return -ENOMEM; + } + + paddr = (dma_addr_t)virt_to_phys(vaddr); + align_paddr = ALIGN(paddr, align); + align_vaddr = (void *)((u64)vaddr + (align_paddr - paddr)); + +out: + mem_align->real_size = (u32)real_size; + mem_align->ori_vaddr = vaddr; + mem_align->ori_paddr = paddr; + mem_align->align_vaddr = align_vaddr; + mem_align->align_paddr = align_paddr; + + return 0; +} + +static void mqm_eqm_free_page_mem(struct hinic3_hwdev *hwdev) +{ + u32 i; + struct hinic3_dma_addr_align *page_addr; + int is_use_vram = get_use_vram_flag(); + struct mqm_eqm_vram_name_s *mqm_eqm_vram_name = hwdev->mqm_eqm_vram_name; + + page_addr = hwdev->mqm_att.brm_srch_page_addr; + + for (i = 0; i < hwdev->mqm_att.page_num; i++) { + if (is_use_vram != 0) { + hi_vram_kfree(page_addr->ori_vaddr, mqm_eqm_vram_name[i].vram_name, + page_addr->real_size); + } else { + hinic3_dma_free_coherent_align(hwdev->dev_hdl, page_addr); + } + page_addr->ori_vaddr = NULL; + page_addr++; + } + + kfree(mqm_eqm_vram_name); + hwdev->mqm_eqm_vram_name = NULL; +} + static int mqm_eqm_try_alloc_mem(struct hinic3_hwdev *hwdev, u32 page_size, u32 page_num) { struct hinic3_dma_addr_align *page_addr = hwdev->mqm_att.brm_srch_page_addr; + int is_use_vram = get_use_vram_flag(); + struct mqm_eqm_vram_name_s *mqm_eqm_vram_name = NULL; u32 valid_num = 0; u32 flag = 1; u32 i = 0; int err; + u16 func_id; + + mqm_eqm_vram_name = kzalloc(sizeof(struct mqm_eqm_vram_name_s) * page_num, GFP_KERNEL); + if (mqm_eqm_vram_name == NULL) { + sdk_err(hwdev->dev_hdl, "mqm eqm alloc vram name failed.\n"); + return -ENOMEM; + } + + hwdev->mqm_eqm_vram_name = mqm_eqm_vram_name; + func_id = hinic3_global_func_id(hwdev); for (i = 0; i < page_num; i++) { - err = hinic3_dma_zalloc_coherent_align(hwdev->dev_hdl, page_size, - page_size, GFP_KERNEL, page_addr); + if (is_use_vram != 0) { + snprintf(mqm_eqm_vram_name[i].vram_name, + VRAM_NAME_MAX_LEN, "%s%u%s%u", + VRAM_CQM_GLB_FUNC_BASE, func_id, VRAM_NIC_MQM, i); + err = hi_vram_kalloc_align( + hwdev, mqm_eqm_vram_name[i].vram_name, + page_size, page_num, page_addr); + } else { + err = hinic3_dma_zalloc_coherent_align(hwdev->dev_hdl, page_size, + page_size, GFP_KERNEL, page_addr); + } if (err) { flag = 0; break; @@ -908,15 +1017,12 @@ static int mqm_eqm_try_alloc_mem(struct hinic3_hwdev *hwdev, u32 page_size, page_addr++; } + hwdev->mqm_att.page_num = valid_num; + if (flag == 1) { hwdev->mqm_att.page_size = page_size; - hwdev->mqm_att.page_num = page_num; } else { - page_addr = hwdev->mqm_att.brm_srch_page_addr; - for (i = 0; i < valid_num; i++) { - hinic3_dma_free_coherent_align(hwdev->dev_hdl, page_addr); - page_addr++; - } + mqm_eqm_free_page_mem(hwdev); return -EFAULT; } @@ -955,19 +1061,6 @@ static int mqm_eqm_alloc_page_mem(struct hinic3_hwdev *hwdev) return ret; } -static void mqm_eqm_free_page_mem(struct hinic3_hwdev *hwdev) -{ - u32 i; - struct hinic3_dma_addr_align *page_addr; - - page_addr = hwdev->mqm_att.brm_srch_page_addr; - - for (i = 0; i < hwdev->mqm_att.page_num; i++) { - hinic3_dma_free_coherent_align(hwdev->dev_hdl, page_addr); - page_addr++; - } -} - static int mqm_eqm_set_cfg_2_hw(struct hinic3_hwdev *hwdev, u8 valid) { struct comm_cmd_eqm_cfg info_eqm_cfg; @@ -1098,6 +1191,7 @@ static int mqm_eqm_init(struct hinic3_hwdev *hwdev) { struct comm_cmd_get_eqm_num info_eqm_fix; int ret; + int is_in_kexec; if (hwdev->hwif->attr.func_type != TYPE_PPF) return 0; @@ -1127,10 +1221,15 @@ static int mqm_eqm_init(struct hinic3_hwdev *hwdev) goto err_page; } - ret = mqm_eqm_set_page_2_hw(hwdev); - if (ret) { - sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n"); - goto err_ecmd; + is_in_kexec = vram_get_kexec_flag(); + if (is_in_kexec == 0) { + ret = mqm_eqm_set_page_2_hw(hwdev); + if (ret) { + sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n"); + goto err_ecmd; + } + } else { + sdk_info(hwdev->dev_hdl, "Mqm db don't set to chip when os hot replace.\r\n"); } ret = mqm_eqm_set_cfg_2_hw(hwdev, 1); @@ -1568,3 +1667,15 @@ int hinic3_switch_config(void *hwdev, u8 cfg_index) return 0; } + +bool hinic3_is_optical_module_mode(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (dev->board_info.board_type == BOARD_TYPE_STRG_4X25G_COMSTORAGE || + dev->board_info.board_type == BOARD_TYPE_CAL_4X25G_COMSTORAGE || + dev->board_info.board_type == BOARD_TYPE_CAL_2X100G_TCE_BACKPLANE) + return false; + + return true; +} \ No newline at end of file diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.c index baa1ce0cb234588c6cf2b6e900945bc0c804a720..722fecdccf7144af1951ea6b48fe46b617a24f7c 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.c @@ -252,98 +252,6 @@ static u32 get_up_timeout_val(enum hinic3_mod_type mod, u16 cmd) return 0; /* use default mbox/apichain timeout time */ } -static int api_csr_read(void *hwdev, struct msg_module *nt_msg, - void *buf_in, u32 in_size, void *buf_out, u32 *out_size) -{ - struct up_log_msg_st *up_log_msg = (struct up_log_msg_st *)buf_in; - u8 *buf_out_tmp = (u8 *)buf_out; - int ret = 0; - u32 rd_len; - u32 rd_addr; - u32 rd_cnt = 0; - u32 offset = 0; - u8 node_id; - u32 i; - - if (!buf_in || !buf_out || in_size != sizeof(*up_log_msg) || - *out_size != up_log_msg->rd_len || up_log_msg->rd_len % DW_WIDTH != 0) - return -EINVAL; - - rd_len = up_log_msg->rd_len; - rd_addr = up_log_msg->addr; - node_id = (u8)nt_msg->mpu_cmd.mod; - - rd_cnt = rd_len / DW_WIDTH; - - for (i = 0; i < rd_cnt; i++) { - ret = hinic3_api_csr_rd32(hwdev, node_id, - rd_addr + offset, - (u32 *)(buf_out_tmp + offset)); - if (ret) { - pr_err("Csr rd fail, err: %d, node_id: %u, csr addr: 0x%08x\n", - ret, node_id, rd_addr + offset); - return ret; - } - offset += DW_WIDTH; - } - *out_size = rd_len; - - return ret; -} - -static int api_csr_write(void *hwdev, struct msg_module *nt_msg, - void *buf_in, u32 in_size, void *buf_out, - u32 *out_size) -{ - struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in; - int ret = 0; - u32 rd_len; - u32 rd_addr; - u32 rd_cnt = 0; - u32 offset = 0; - u8 node_id; - u32 i; - u8 *data = NULL; - - if (!buf_in || in_size != sizeof(*csr_write_msg) || csr_write_msg->rd_len == 0 || - csr_write_msg->rd_len > API_CSR_MAX_RD_LEN || csr_write_msg->rd_len % DW_WIDTH != 0) - return -EINVAL; - - rd_len = csr_write_msg->rd_len; - rd_addr = csr_write_msg->addr; - node_id = (u8)nt_msg->mpu_cmd.mod; - - rd_cnt = rd_len / DW_WIDTH; - - data = kzalloc(rd_len, GFP_KERNEL); - if (!data) { - pr_err("No more memory\n"); - return -EFAULT; - } - if (copy_from_user(data, (void *)csr_write_msg->data, rd_len)) { - pr_err("Copy information from user failed\n"); - kfree(data); - return -EFAULT; - } - - for (i = 0; i < rd_cnt; i++) { - ret = hinic3_api_csr_wr32(hwdev, node_id, - rd_addr + offset, - *((u32 *)(data + offset))); - if (ret) { - pr_err("Csr wr fail, ret: %d, node_id: %u, csr addr: 0x%08x\n", - ret, rd_addr + offset, node_id); - kfree(data); - return ret; - } - offset += DW_WIDTH; - } - - *out_size = 0; - kfree(data); - return ret; -} - int send_to_mpu(void *hwdev, struct msg_module *nt_msg, void *buf_in, u32 in_size, void *buf_out, u32 *out_size) { @@ -370,10 +278,8 @@ int send_to_mpu(void *hwdev, struct msg_module *nt_msg, return ret; } } else if (nt_msg->mpu_cmd.api_type == API_TYPE_API_CHAIN_BYPASS) { - if (nt_msg->mpu_cmd.cmd == API_CSR_WRITE) - return api_csr_write(hwdev, nt_msg, buf_in, in_size, buf_out, out_size); - - ret = api_csr_read(hwdev, nt_msg, buf_in, in_size, buf_out, out_size); + pr_err("Unsupported api_type %u\n", nt_msg->mpu_cmd.api_type); + return -EINVAL; } else if (nt_msg->mpu_cmd.api_type == API_TYPE_API_CHAIN_TO_MPU) { timeout = get_up_timeout_val(mod, cmd); if (hinic3_pcie_itf_id(hwdev) != SPU_HOST_ID) @@ -440,6 +346,23 @@ static int sm_rd16(void *hwdev, u32 id, u8 instance, return ret; } +static int sm_rd16_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u16 val1; + int ret; + + ret = hinic3_sm_ctr_rd16_clear(hwdev, node, instance, id, &val1); + if (ret != 0) { + pr_err("Get sm ctr clear information (16 bits)failed!\n"); + val1 = 0xffff; + } + + buf_out->val1 = val1; + + return ret; +} + static int sm_rd32(void *hwdev, u32 id, u8 instance, u8 node, struct sm_out_st *buf_out) { @@ -559,6 +482,7 @@ const struct sm_module_handle sm_module_cmd_handle[] = { {SM_CTR_RD32, sm_rd32}, {SM_CTR_RD64_PAIR, sm_rd64_pair}, {SM_CTR_RD64, sm_rd64}, + {SM_CTR_RD16_CLEAR, sm_rd16_clear}, {SM_CTR_RD32_CLEAR, sm_rd32_clear}, {SM_CTR_RD64_PAIR_CLEAR, sm_rd64_pair_clear}, {SM_CTR_RD64_CLEAR, sm_rd64_clear} diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c index 620136acb4169dfcea71332a25864b0a3875e71f..ac80b63af323f082f7b804f9fae5faaa85f093cc 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c @@ -101,6 +101,12 @@ enum hinic3_pcie_tph { #define SLAVE_HOST_STATUS_SET(host_id, enable) (((u8)(enable) & 1U) << (host_id)) #define SLAVE_HOST_STATUS_GET(host_id, val) (!!((val) & (1U << (host_id)))) +#ifdef HAVE_HOT_REPLACE_FUNC + extern int get_partition_id(void); +#else + static int get_partition_id(void) { return 0; } +#endif + void set_slave_host_enable(void *hwdev, u8 host_id, bool enable) { u32 reg_val; @@ -207,6 +213,28 @@ static void hinic3_init_host_mode_pre(struct hinic3_hwdev *hwdev) } } +static void hinic3_init_hot_plug_status(struct hinic3_hwdev *hwdev) +{ + struct service_cap *cap = &hwdev->cfg_mgmt->svc_cap; + + if (cap->hot_plug_disable) { + hwdev->hot_plug_mode = HOT_PLUG_DISABLE; + } else { + hwdev->hot_plug_mode = HOT_PLUG_ENABLE; + } +} + +static void hinic3_init_os_hot_replace(struct hinic3_hwdev *hwdev) +{ + struct service_cap *cap = &hwdev->cfg_mgmt->svc_cap; + + if (cap->os_hot_replace) { + hwdev->hot_replace_mode = HOT_REPLACE_ENABLE; + } else { + hwdev->hot_replace_mode = HOT_REPLACE_DISABLE; + } +} + static u8 hinic3_nic_sw_aeqe_handler(void *hwdev, u8 event, u8 *data) { struct hinic3_hwdev *dev = hwdev; @@ -270,14 +298,20 @@ static void chip_fault_show(struct hinic3_hwdev *hwdev, "fatal", "reset", "host", "flr", "general", "suggestion"}; char level_str[FAULT_SHOW_STR_LEN + 1]; u8 level; + int ret; memset(level_str, 0, FAULT_SHOW_STR_LEN + 1); level = event->event.chip.err_level; - if (level < FAULT_LEVEL_MAX) - strncpy(level_str, fault_level[level], - FAULT_SHOW_STR_LEN); - else - strncpy(level_str, "Unknown", FAULT_SHOW_STR_LEN); + if (level < FAULT_LEVEL_MAX) { + ret = strscpy(level_str, fault_level[level], + FAULT_SHOW_STR_LEN); + if (ret < 0) + return; + } else { + ret = strscpy(level_str, "Unknown", FAULT_SHOW_STR_LEN); + if (ret < 0) + return; + } if (level == FAULT_LEVEL_SERIOUS_FLR) dev_err(hwdev->dev_hdl, "err_level: %u [%s], flr func_id: %u\n", @@ -299,6 +333,7 @@ static void fault_report_show(struct hinic3_hwdev *hwdev, "reg rd timeout", "reg wr timeout", "phy fault", "tsensor fault"}; char type_str[FAULT_SHOW_STR_LEN + 1] = {0}; struct fault_event_stats *fault = NULL; + int ret; sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %u\n", hinic3_global_func_id(hwdev)); @@ -306,10 +341,14 @@ static void fault_report_show(struct hinic3_hwdev *hwdev, fault = &hwdev->hw_stats.fault_event_stats; if (event->type < FAULT_TYPE_MAX) { - strncpy(type_str, fault_type[event->type], sizeof(type_str)); + ret = strscpy(type_str, fault_type[event->type], sizeof(type_str)); + if (ret < 0) + return; atomic_inc(&fault->fault_type_stat[event->type]); } else { - strncpy(type_str, "Unknown", sizeof(type_str)); + ret = strscpy(type_str, "Unknown", sizeof(type_str)); + if (ret < 0) + return; } sdk_err(hwdev->dev_hdl, "Fault type: %u [%s]\n", event->type, type_str); @@ -1536,15 +1575,30 @@ int hinic3_init_hwdev(struct hinic3_init_para *para) hinic3_init_host_mode_pre(hwdev); + hinic3_init_hot_plug_status(hwdev); + + hinic3_init_os_hot_replace(hwdev); + err = hinic3_multi_host_mgmt_init(hwdev); if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init function mode\n"); goto init_multi_host_fail; } - err = hinic3_init_ppf_work(hwdev); - if (err != 0) - goto init_ppf_work_fail; + // hot_replace_mode is enable, run ppf function only when partition_id is 0 + // or run ppf function directly + if (hwdev->hot_replace_mode == HOT_REPLACE_ENABLE) { + if (get_partition_id() == 0) { + err = hinic3_init_ppf_work(hwdev); + if (err != 0) { + goto init_ppf_work_fail; + } + } + } else { + err = hinic3_init_ppf_work(hwdev); + if (err != 0) + goto init_ppf_work_fail; + } err = hinic3_set_comm_features(hwdev, hwdev->features, COMM_MAX_FEATURE_QWORD); if (err != 0) { @@ -1948,6 +2002,19 @@ void hinic3_link_event_stats(void *dev, u8 link) } EXPORT_SYMBOL(hinic3_link_event_stats); +int hinic3_get_link_event_stats(void *dev, int *link_state) +{ + struct hinic3_hwdev *hwdev = dev; + + if (!hwdev || !link_state) + return -EINVAL; + + *link_state = hwdev->hw_stats.link_event_stats.link_down_stats.counter; + + return 0; +} +EXPORT_SYMBOL(hinic3_get_link_event_stats); + u8 hinic3_max_pf_num(void *hwdev) { if (!hwdev) diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h index e739767ed16e175aff88b16a29059eabfdb17b95..7c2cfc256764ae33071c66b44a5ed043131b1587 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h @@ -10,6 +10,7 @@ #include "hinic3_hw.h" #include "mpu_inband_cmd_defs.h" #include "hinic3_profile.h" +#include "vram_common.h" struct cfg_mgmt_info; @@ -82,6 +83,22 @@ enum hinic3_host_mode_e { HINIC3_SDI_MODE_MAX, }; +enum hinic3_hot_plug_mode { + HOT_PLUG_ENABLE, + HOT_PLUG_DISABLE, +}; + +enum hinic3_os_hot_replace_mode { + HOT_REPLACE_DISABLE, + HOT_REPLACE_ENABLE, +}; + +#define UNSUPPORT_HOT_PLUG(hwdev) \ + ((hwdev)->hot_plug_mode == HOT_PLUG_DISABLE) + +#define SUPPORT_HOT_PLUG(hwdev) \ + ((hwdev)->hot_plug_mode == HOT_PLUG_ENABLE) + #define MULTI_HOST_CHIP_MODE_SHIFT 0 #define MULTI_HOST_MASTER_MBX_STS_SHIFT 17 #define MULTI_HOST_PRIV_DATA_SHIFT 0x8 @@ -100,6 +117,10 @@ enum hinic3_host_mode_e { ((val) & (~(MULTI_HOST_##member##_MASK \ << MULTI_HOST_##member##_SHIFT))) +struct mqm_eqm_vram_name_s { + char vram_name[VRAM_NAME_MAX_LEN]; +}; + struct hinic3_hwdev { void *adapter_hdl; /* pointer to hinic3_pcidev or NDIS_Adapter */ void *pcidev_hdl; /* pointer to pcidev or Handler */ @@ -138,6 +159,9 @@ struct hinic3_hwdev { u32 rsvd2; struct hinic3_multi_host_mgmt *mhost_mgmt; + char mhost_mgmt_name[VRAM_NAME_MAX_LEN]; + + struct mqm_eqm_vram_name_s *mqm_eqm_vram_name; struct mutex stateful_mutex; /* protect cqm init and deinit */ @@ -170,7 +194,10 @@ struct hinic3_hwdev { struct hinic3_devlink *devlink_dev; enum hinic3_func_mode func_mode; - u32 rsvd3; + enum hinic3_hot_plug_mode hot_plug_mode; + + enum hinic3_os_hot_replace_mode hot_replace_mode; + u32 rsvd5; DECLARE_BITMAP(func_probe_in_host, MAX_FUNCTION_NUM); DECLARE_BITMAP(netdev_setup_state, MAX_FUNCTION_NUM); @@ -179,7 +206,10 @@ struct hinic3_hwdev { u64 last_recv_aeq_cnt; u16 aeq_busy_cnt; - u64 rsvd4[8]; + u64 mbox_send_cnt; + u64 mbox_ack_cnt; + + u64 rsvd4[5]; }; #define HINIC3_DRV_FEATURE_QW0 \ diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.c index 33f121035917c387c00a0bb1f15c6e75fdc1f87c..8590f70e138762883da776671871fdcf8063ebd8 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.c @@ -544,6 +544,7 @@ static void free_db_area(struct hinic3_free_db_area *free_db_area) { spin_lock_deinit(&free_db_area->idx_lock); kfree(free_db_area->db_bitmap_array); + free_db_area->db_bitmap_array = NULL; } static int get_db_idx(struct hinic3_hwif *hwif, u32 *idx) @@ -841,6 +842,7 @@ void hinic3_free_hwif(struct hinic3_hwdev *hwdev) free_db_area(&hwdev->hwif->free_db_area); enable_all_msix(hwdev); kfree(hwdev->hwif); + hwdev->hwif = NULL; } u16 hinic3_global_func_id(void *hwdev) @@ -856,6 +858,44 @@ u16 hinic3_global_func_id(void *hwdev) } EXPORT_SYMBOL(hinic3_global_func_id); +/** + * get function id from register,used by sriov hot migration process + * @hwdev: the pointer to hw device + */ +u16 hinic3_global_func_id_hw(void *hwdev) +{ + u32 addr, attr0; + struct hinic3_hwdev *dev; + + dev = (struct hinic3_hwdev *)hwdev; + addr = HINIC3_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic3_hwif_read_reg(dev->hwif, addr); + + return HINIC3_AF0_GET(attr0, FUNC_GLOBAL_IDX); +} + +/** + * get function id, used by sriov hot migratition process. + * @hwdev: the pointer to hw device + * @func_id: function id + */ +int hinic3_global_func_id_get(void *hwdev, u16 *func_id) +{ + struct hinic3_hwdev *dev = (struct hinic3_hwdev *)hwdev; + + if (!hwdev || !func_id) + return -EINVAL; + + /* only vf get func_id from chip reg for sriov migrate */ + if (!HINIC3_IS_VF(dev)) { + *func_id = hinic3_global_func_id(hwdev); + return 0; + } + + *func_id = hinic3_global_func_id_hw(dev); + return 0; +} + u16 hinic3_intr_num(void *hwdev) { struct hinic3_hwif *hwif = NULL; diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c index 321a3b78a41da63a0eec986b0138828e69bca440..4e2e8b0c87475d86d67aa48eb1aef0a58512e3bd 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "ossl_knl.h" #include "hinic3_mt.h" @@ -29,26 +30,41 @@ #include "hinic3_lld.h" #include "hinic3_profile.h" +#include "hinic3_hw_cfg.h" +#include "hinic3_multi_host_mgmt.h" #include "hinic3_hwdev.h" #include "hinic3_prof_adap.h" -#include "comm_msg_intf.h" +#include "hinic3_devlink.h" -static bool disable_vf_load; +#include "vram_common.h" + +enum partition_dev_type { + PARTITION_DEV_NONE = 0, + PARTITION_DEV_SHARED, + PARTITION_DEV_EXCLUSIVE, + PARTITION_DEV_BACKUP, +}; + +#ifdef HAVE_HOT_REPLACE_FUNC +extern int vpci_set_partition_attrs(struct pci_dev *dev, unsigned int dev_type, unsigned int partition_id); +extern int get_partition_id(void); +#else +static int vpci_set_partition_attrs(struct pci_dev *dev, unsigned int dev_type, unsigned int partition_id) { return 0; } +static int get_partition_id(void) { return 0; } +#endif + +static bool disable_vf_load = false; module_param(disable_vf_load, bool, 0444); MODULE_PARM_DESC(disable_vf_load, "Disable virtual functions probe or not - default is false"); +static bool g_is_pf_migrated = false; static bool disable_attach; module_param(disable_attach, bool, 0444); MODULE_PARM_DESC(disable_attach, "disable_attach or not - default is false"); #define HINIC3_WAIT_SRIOV_CFG_TIMEOUT 15000 -MODULE_AUTHOR("Huawei Technologies CO., Ltd"); -MODULE_DESCRIPTION(HINIC3_DRV_DESC); -MODULE_VERSION(HINIC3_DRV_VERSION); -MODULE_LICENSE("GPL"); - #if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)) static DEVICE_ATTR(sriov_numvfs, 0664, hinic3_sriov_numvfs_show, hinic3_sriov_numvfs_store); @@ -71,7 +87,18 @@ static const struct attribute_group hinic3_attr_group = { struct hinic3_uld_info g_uld_info[SERVICE_T_MAX] = { {0} }; #define HINIC3_EVENT_PROCESS_TIMEOUT 10000 +#define HINIC3_WAIT_EVENT_PROCESS_TIMEOUT 100 struct mutex g_uld_mutex; +#define BUS_MAX_DEV_NUM 256 +#define HINIC3_SLAVE_WORK_MAX_NUM 20 + +typedef struct vf_offset_info { + u8 valid; + u16 vf_offset_from_pf[CMD_MAX_MAX_PF_NUM]; +} VF_OFFSET_INFO_S; + +static VF_OFFSET_INFO_S g_vf_offset; +DEFINE_MUTEX(g_vf_offset_lock); void hinic3_uld_lock_init(void) { @@ -80,13 +107,23 @@ void hinic3_uld_lock_init(void) static const char *s_uld_name[SERVICE_T_MAX] = { "nic", "ovs", "roce", "toe", "ioe", - "fc", "vbs", "ipsec", "virtio", "migrate", "ppa", "custom"}; + "fc", "vbs", "ipsec", "virtio", "migrate", + "ppa", "custom", "vroce", "crypt", "vsock", "bifur"}; const char **hinic3_get_uld_names(void) { return s_uld_name; } +#ifdef CONFIG_PCI_IOV +static int hinic3_get_pf_device_id(struct pci_dev *pdev) +{ + struct pci_dev *pf_dev = pci_physfn(pdev); + + return pf_dev->device; +} +#endif + static int attach_uld(struct hinic3_pcidev *dev, enum hinic3_service_type type, const struct hinic3_uld_info *uld_info) { @@ -105,6 +142,10 @@ static int attach_uld(struct hinic3_pcidev *dev, enum hinic3_service_type type, atomic_set(&dev->uld_ref_cnt[type], 0); + if (!uld_info->probe) { + err = 0; + goto out_unlock; + } err = uld_info->probe(&dev->lld_dev, &uld_dev, dev->uld_dev_name[type]); if (err) { sdk_err(&dev->pcidev->dev, @@ -173,6 +214,10 @@ static void detach_uld(struct hinic3_pcidev *dev, wait_uld_unused(dev, type); + if (!uld_info->remove) { + mutex_unlock(&dev->pdev_mutex); + return; + } uld_info->remove(&dev->lld_dev, dev->uld_dev[type]); dev->uld_dev[type] = NULL; @@ -190,10 +235,15 @@ static void attach_ulds(struct hinic3_pcidev *dev) enum hinic3_service_type type; struct pci_dev *pdev = dev->pcidev; - lld_hold(); + int is_in_kexec = vram_get_kexec_flag(); + /* don't need hold when driver parallel load during spu hot replace */ + if (is_in_kexec == 0) { + lld_hold(); + } + mutex_lock(&g_uld_mutex); - for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) { + for (type = SERVICE_T_OVS; type < SERVICE_T_MAX; type++) { if (g_uld_info[type].probe) { if (pdev->is_virtfn && (!hinic3_get_vf_service_load(pdev, (u16)type))) { @@ -205,7 +255,10 @@ static void attach_ulds(struct hinic3_pcidev *dev) } } mutex_unlock(&g_uld_mutex); - lld_put(); + + if (is_in_kexec == 0) { + lld_put(); + } } static void detach_ulds(struct hinic3_pcidev *dev) @@ -255,10 +308,10 @@ int hinic3_register_uld(enum hinic3_service_type type, } chip_list = get_hinic3_chip_list(); - memcpy(&g_uld_info[type], uld_info, sizeof(*uld_info)); + memcpy(&g_uld_info[type], uld_info, sizeof(struct hinic3_uld_info)); list_for_each_entry(chip_node, chip_list, node) { list_for_each_entry(dev, &chip_node->func_list, node) { - if (attach_uld(dev, type, uld_info)) { + if (attach_uld(dev, type, uld_info) != 0) { sdk_err(&dev->pcidev->dev, "Attach %s driver to pcie device failed\n", s_uld_name[type]); @@ -312,7 +365,7 @@ void hinic3_unregister_uld(enum hinic3_service_type type) } uld_info = &g_uld_info[type]; - memset(uld_info, 0, sizeof(*uld_info)); + memset(uld_info, 0, sizeof(struct hinic3_uld_info)); mutex_unlock(&g_uld_mutex); lld_put(); } @@ -366,6 +419,26 @@ void hinic3_detach_service(const struct hinic3_lld_dev *lld_dev, enum hinic3_ser } EXPORT_SYMBOL(hinic3_detach_service); +void hinic3_module_get(void *hwdev, enum hinic3_service_type type) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev || type >= SERVICE_T_MAX) + return; + __module_get(THIS_MODULE); +} +EXPORT_SYMBOL(hinic3_module_get); + +void hinic3_module_put(void *hwdev, enum hinic3_service_type type) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev || type >= SERVICE_T_MAX) + return; + module_put(THIS_MODULE); +} +EXPORT_SYMBOL(hinic3_module_put); + static void hinic3_sync_time_to_fmw(struct hinic3_pcidev *pdev_pri) { struct timeval tv = {0}; @@ -382,7 +455,8 @@ static void hinic3_sync_time_to_fmw(struct hinic3_pcidev *pdev_pri) err); } else { rtc_time_to_tm((unsigned long)(tv.tv_sec), &rt_time); - sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware succeed. UTC time %d-%02d-%02d %02d:%02d:%02d.\n", + sdk_info(&pdev_pri->pcidev->dev, + "Synchronize UTC time to firmware succeed. UTC time %d-%02d-%02d %02d:%02d:%02d.\n", rt_time.tm_year + HINIC3_SYNC_YEAR_OFFSET, rt_time.tm_mon + HINIC3_SYNC_MONTH_OFFSET, rt_time.tm_mday, rt_time.tm_hour, @@ -448,23 +522,434 @@ static void send_event_to_all_pf(struct hinic3_pcidev *dev, lld_put(); } +u32 hinic3_pdev_is_virtfn(struct pci_dev *pdev) +{ +#ifdef CONFIG_PCI_IOV + return pdev->is_virtfn; +#else + return 0; +#endif +} + +static int hinic3_get_function_enable(struct pci_dev *pdev, bool *en) +{ + struct pci_dev *pf_pdev = pdev->physfn; + struct hinic3_pcidev *pci_adapter = NULL; + void *pf_hwdev = NULL; + u16 global_func_id; + int err; + + /* PF in host os or function in guest os, probe sdk in default */ + if (!hinic3_pdev_is_virtfn(pdev) || !pf_pdev) { + *en = true; + return 0; + } + + pci_adapter = pci_get_drvdata(pf_pdev); + if (!pci_adapter || !pci_adapter->hwdev) { + /* vf in host and pf sdk not probed */ + return -EFAULT; + } + pf_hwdev = pci_adapter->hwdev; + + err = hinic3_get_vfid_by_vfpci(NULL, pdev, &global_func_id); + if (err) { + sdk_err(&pci_adapter->pcidev->dev, "Func hinic3_get_vfid_by_vfpci fail %d \n", err); + return err; + } + + err = hinic3_get_func_nic_enable(pf_hwdev, global_func_id, en); + if (!!err) { + sdk_info(&pdev->dev, "Failed to get function nic status, err %d.\n", err); + return err; + } + + return 0; +} + +int hinic3_set_func_probe_in_host(void *hwdev, u16 func_id, bool probe) +{ + struct hinic3_hwdev *dev = hwdev; + + if (hinic3_func_type(hwdev) != TYPE_PPF) + return -EINVAL; + + if (probe) + set_bit(func_id, dev->func_probe_in_host); + else + clear_bit(func_id, dev->func_probe_in_host); + + return 0; +} + +bool hinic3_get_func_probe_in_host(void *hwdev, u16 func_id) +{ + struct hinic3_hwdev *dev = hwdev; + struct hinic3_hwdev *ppf_dev = NULL; + bool probed = false; + + if (!hwdev) + return false; + + down(&dev->ppf_sem); + ppf_dev = hinic3_get_ppf_hwdev_by_pdev(dev->pcidev_hdl); + if (!ppf_dev || hinic3_func_type(ppf_dev) != TYPE_PPF) { + up(&dev->ppf_sem); + return false; + } + + probed = !!test_bit(func_id, ppf_dev->func_probe_in_host); + up(&dev->ppf_sem); + + return probed; +} + +void *hinic3_get_ppf_hwdev_by_pdev(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = NULL; + struct card_node *chip_node = NULL; + struct hinic3_pcidev *dev = NULL; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) + return NULL; + + chip_node = pci_adapter->chip_node; + lld_dev_hold(&pci_adapter->lld_dev); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (dev->lld_state == HINIC3_IN_REMOVE) + continue; + + if (dev->hwdev && hinic3_func_type(dev->hwdev) == TYPE_PPF) { + lld_dev_put(&pci_adapter->lld_dev); + return dev->hwdev; + } + } + lld_dev_put(&pci_adapter->lld_dev); + + return NULL; +} + +static int hinic3_set_vf_nic_used_state(void *hwdev, u16 func_id, bool opened) +{ + struct hinic3_hwdev *dev = hwdev; + struct hinic3_hwdev *ppf_dev = NULL; + + if (!dev || func_id >= MAX_FUNCTION_NUM) + return -EINVAL; + + down(&dev->ppf_sem); + ppf_dev = hinic3_get_ppf_hwdev_by_pdev(dev->pcidev_hdl); + if (!ppf_dev || hinic3_func_type(ppf_dev) != TYPE_PPF) { + up(&dev->ppf_sem); + return -EINVAL; + } + + if (opened) + set_bit(func_id, ppf_dev->netdev_setup_state); + else + clear_bit(func_id, ppf_dev->netdev_setup_state); + + up(&dev->ppf_sem); + + return 0; +} + +static void set_vf_func_in_use(struct pci_dev *pdev, bool in_use) +{ + struct pci_dev *pf_pdev = pdev->physfn; + struct hinic3_pcidev *pci_adapter = NULL; + void *pf_hwdev = NULL; + u16 global_func_id; + + /* only need to be set when VF is on the host */ + if (!hinic3_pdev_is_virtfn(pdev) || !pf_pdev) + return; + + pci_adapter = pci_get_drvdata(pf_pdev); + if (!pci_adapter || !pci_adapter->hwdev) + return; + + pf_hwdev = pci_adapter->hwdev; + + global_func_id = (u16)pdev->devfn + hinic3_glb_pf_vf_offset(pf_hwdev); + (void)hinic3_set_vf_nic_used_state(pf_hwdev, global_func_id, in_use); +} + +static int hinic3_pf_get_vf_offset_info(struct hinic3_pcidev *des_dev, u16 *vf_offset) +{ + int err, i; + struct hinic3_hw_pf_infos *pf_infos = NULL; + u16 pf_func_id; + struct hinic3_pcidev *pf_pci_adapter = NULL; + + pf_pci_adapter = (hinic3_pdev_is_virtfn(des_dev->pcidev)) ? pci_get_drvdata(des_dev->pcidev->physfn) : des_dev; + pf_func_id = hinic3_global_func_id(pf_pci_adapter->hwdev); + if (pf_func_id >= CMD_MAX_MAX_PF_NUM || !vf_offset) + return -EINVAL; + + mutex_lock(&g_vf_offset_lock); + if (g_vf_offset.valid == 0) { + pf_infos = kzalloc(sizeof(*pf_infos), GFP_KERNEL); + if (!pf_infos) { + sdk_err(&pf_pci_adapter->pcidev->dev, "Malloc pf_infos fail\n"); + err = -ENOMEM; + goto err_malloc; + } + + err = hinic3_get_hw_pf_infos(pf_pci_adapter->hwdev, pf_infos, HINIC3_CHANNEL_COMM); + if (err) { + sdk_warn(&pf_pci_adapter->pcidev->dev, "Hinic3_get_hw_pf_infos fail err %d\n", err); + err = -EFAULT; + goto err_out; + } + + g_vf_offset.valid = 1; + for (i = 0; i < CMD_MAX_MAX_PF_NUM; i++) { + g_vf_offset.vf_offset_from_pf[i] = pf_infos->infos[i].vf_offset; + } + + kfree(pf_infos); + } + + *vf_offset = g_vf_offset.vf_offset_from_pf[pf_func_id]; + + mutex_unlock(&g_vf_offset_lock); + + return 0; + +err_out: + kfree(pf_infos); +err_malloc: + mutex_unlock(&g_vf_offset_lock); + return err; +} + +static struct pci_dev *get_vf_pdev_by_pf(struct hinic3_pcidev *des_dev, + u16 func_id) +{ + int err; + u16 bus_num; + u16 vf_start, vf_end; + u16 des_fn, pf_func_id, vf_offset; + + vf_start = hinic3_glb_pf_vf_offset(des_dev->hwdev); + vf_end = vf_start + hinic3_func_max_vf(des_dev->hwdev); + pf_func_id = hinic3_global_func_id(des_dev->hwdev); + if (func_id <= vf_start || func_id > vf_end || pf_func_id >= CMD_MAX_MAX_PF_NUM) + return NULL; + + err = hinic3_pf_get_vf_offset_info(des_dev, &vf_offset); + if (err) { + sdk_warn(&des_dev->pcidev->dev, "Hinic3_pf_get_vf_offset_info fail\n"); + return NULL; + } + + des_fn = ((func_id - vf_start) - 1) + pf_func_id + vf_offset; + bus_num = des_dev->pcidev->bus->number + des_fn / BUS_MAX_DEV_NUM; + + return pci_get_domain_bus_and_slot(0, bus_num, (des_fn % BUS_MAX_DEV_NUM)); +} + +static struct hinic3_pcidev *get_des_pci_adapter(struct hinic3_pcidev *des_dev, + u16 func_id) +{ + struct pci_dev *des_pdev = NULL; + u16 vf_start, vf_end; + bool probe_in_host = false; + + if (hinic3_global_func_id(des_dev->hwdev) == func_id) + return des_dev; + + vf_start = hinic3_glb_pf_vf_offset(des_dev->hwdev); + vf_end = vf_start + hinic3_func_max_vf(des_dev->hwdev); + if (func_id <= vf_start || func_id > vf_end) + return NULL; + + des_pdev = get_vf_pdev_by_pf(des_dev, func_id); + if (!des_pdev) + return NULL; + + pci_dev_put(des_pdev); + + probe_in_host = hinic3_get_func_probe_in_host(des_dev->hwdev, func_id); + if (!probe_in_host) + return NULL; + + return pci_get_drvdata(des_pdev); +} + +int __set_vroce_func_state(struct hinic3_pcidev *pci_adapter) +{ + struct pci_dev *pdev = pci_adapter->pcidev; + u16 func_id; + int err; + u8 enable_vroce = false; + + func_id = hinic3_global_func_id(pci_adapter->hwdev); + + err = hinic3_get_func_vroce_enable(pci_adapter->hwdev, func_id, &enable_vroce); + if (0 != err) { + sdk_err(&pdev->dev, "Failed to get vroce state.\n"); + return err; + } + + mutex_lock(&g_uld_mutex); + + if (!!enable_vroce) { + if (!g_uld_info[SERVICE_T_ROCE].probe) { + sdk_info(&pdev->dev, "Uld(roce_info) has not been registered!\n"); + mutex_unlock(&g_uld_mutex); + return 0; + } + + err = attach_uld(pci_adapter, SERVICE_T_ROCE, &g_uld_info[SERVICE_T_ROCE]); + if (0 != err) { + sdk_err(&pdev->dev, "Failed to initialize VROCE.\n"); + mutex_unlock(&g_uld_mutex); + return err; + } + } else { + sdk_info(&pdev->dev, "Func %hu vroce state: disable.\n", func_id); + if (g_uld_info[SERVICE_T_ROCE].remove) + detach_uld(pci_adapter, SERVICE_T_ROCE); + } + + mutex_unlock(&g_uld_mutex); + + return 0; +} + +void slave_host_mgmt_vroce_work(struct work_struct *work) +{ + struct hinic3_pcidev *pci_adapter = + container_of(work, struct hinic3_pcidev, slave_vroce_work); + + __set_vroce_func_state(pci_adapter); +} + +void *hinic3_get_roce_uld_by_pdev(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = NULL; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) + return NULL; + + return pci_adapter->uld_dev[SERVICE_T_ROCE]; +} + +static int __func_service_state_process(struct hinic3_pcidev *event_dev, + struct hinic3_pcidev *des_dev, + struct hinic3_mhost_nic_func_state *state, u16 cmd) +{ + int err = 0; + struct hinic3_hwdev *dev = (struct hinic3_hwdev *)event_dev->hwdev; + + switch (cmd) { + case HINIC3_MHOST_GET_VROCE_STATE: + state->enable = hinic3_get_roce_uld_by_pdev(des_dev->pcidev) ? 1 : 0; + break; + case HINIC3_MHOST_NIC_STATE_CHANGE: + sdk_info(&des_dev->pcidev->dev, "Receive nic[%u] state changed event, state: %u\n", + state->func_idx, state->enable); + if (event_dev->multi_host_mgmt_workq) { + queue_work(event_dev->multi_host_mgmt_workq, &des_dev->slave_nic_work); + } else { + sdk_err(&des_dev->pcidev->dev, "Can not schedule slave nic work\n"); + err = -EFAULT; + } + break; + case HINIC3_MHOST_VROCE_STATE_CHANGE: + sdk_info(&des_dev->pcidev->dev, "Receive vroce[%u] state changed event, state: %u\n", + state->func_idx, state->enable); + queue_work_on(hisdk3_get_work_cpu_affinity(dev, WORK_TYPE_MBOX), + event_dev->multi_host_mgmt_workq, + &des_dev->slave_vroce_work); + break; + default: + sdk_warn(&des_dev->pcidev->dev, "Service state process with unknown cmd: %u\n", cmd); + err = -EFAULT; + break; + } + + return err; +} + +static void __multi_host_mgmt(struct hinic3_pcidev *dev, + struct hinic3_multi_host_mgmt_event *mhost_mgmt) +{ + struct hinic3_pcidev *cur_dev = NULL; + struct hinic3_pcidev *des_dev = NULL; + struct hinic3_mhost_nic_func_state *nic_state = NULL; + u16 sub_cmd = mhost_mgmt->sub_cmd; + + switch (sub_cmd) { + case HINIC3_MHOST_GET_VROCE_STATE: + case HINIC3_MHOST_VROCE_STATE_CHANGE: + case HINIC3_MHOST_NIC_STATE_CHANGE: + nic_state = mhost_mgmt->data; + nic_state->status = 0; + if (!dev->hwdev) + return; + + if (!IS_BMGW_SLAVE_HOST((struct hinic3_hwdev *)dev->hwdev)) + return; + + /* find func_idx pci_adapter and disable or enable nic */ + lld_dev_hold(&dev->lld_dev); + list_for_each_entry(cur_dev, &dev->chip_node->func_list, node) { + if (cur_dev->lld_state == HINIC3_IN_REMOVE || hinic3_pdev_is_virtfn(cur_dev->pcidev)) + continue; + + des_dev = get_des_pci_adapter(cur_dev, nic_state->func_idx); + if (!des_dev) + continue; + + if (__func_service_state_process(dev, des_dev, nic_state, sub_cmd)) + nic_state->status = 1; + break; + } + lld_dev_put(&dev->lld_dev); + break; + default: + sdk_warn(&dev->pcidev->dev, "Received unknown multi-host mgmt event: %u\n", + mhost_mgmt->sub_cmd); + break; + } +} + static void hinic3_event_process(void *adapter, struct hinic3_event_info *event) { struct hinic3_pcidev *dev = adapter; struct hinic3_fault_event *fault = (void *)event->event_data; + struct hinic3_multi_host_mgmt_event *mhost_event = (void *)event->event_data; u16 func_id; - if ((event->service == EVENT_SRV_COMM && event->type == EVENT_COMM_FAULT) && - fault->fault_level == FAULT_LEVEL_SERIOUS_FLR && - fault->event.chip.func_id < hinic3_max_pf_num(dev->hwdev)) { - func_id = fault->event.chip.func_id; - return send_event_to_dst_pf(adapter, func_id, event); - } - - if (event->type == EVENT_COMM_MGMT_WATCHDOG) + switch (HINIC3_SRV_EVENT_TYPE(event->service, event->type)) { + case HINIC3_SRV_EVENT_TYPE(EVENT_SRV_COMM, EVENT_COMM_MULTI_HOST_MGMT): + __multi_host_mgmt(dev, mhost_event); + break; + case HINIC3_SRV_EVENT_TYPE(EVENT_SRV_COMM, EVENT_COMM_FAULT): + if (fault->fault_level == FAULT_LEVEL_SERIOUS_FLR && + fault->event.chip.func_id < hinic3_max_pf_num(dev->hwdev)) { + func_id = fault->event.chip.func_id; + return send_event_to_dst_pf(adapter, func_id, event); + } + break; + case HINIC3_SRV_EVENT_TYPE(EVENT_SRV_COMM, EVENT_COMM_MGMT_WATCHDOG): send_event_to_all_pf(adapter, event); - else + break; + default: send_uld_dev_event(adapter, event); + break; + } } static void uld_def_init(struct hinic3_pcidev *pci_adapter) @@ -631,36 +1116,75 @@ static void hinic3_pci_deinit(struct pci_dev *pdev) kfree(pci_adapter); } -#ifdef CONFIG_X86 -/** - * cfg_order_reg - when cpu model is haswell or broadwell, should configure dma - * order register to zero - * @pci_adapter: pci_adapter - **/ -/*lint -save -e40 */ -static void cfg_order_reg(struct hinic3_pcidev *pci_adapter) +static void set_vf_load_state(struct pci_dev *pdev, struct hinic3_pcidev *pci_adapter) { - u8 cpu_model[] = {0x3c, 0x3f, 0x45, 0x46, 0x3d, 0x47, 0x4f, 0x56}; - struct cpuinfo_x86 *cpuinfo = NULL; - u32 i; - - if (hinic3_func_type(pci_adapter->hwdev) == TYPE_VF) - return; + /* In bm mode, slave host will load vfs in default */ + if (IS_BMGW_SLAVE_HOST(((struct hinic3_hwdev *)pci_adapter->hwdev)) && + hinic3_func_type(pci_adapter->hwdev) != TYPE_VF) + hinic3_set_vf_load_state(pdev, false); - cpuinfo = &cpu_data(0); - for (i = 0; i < sizeof(cpu_model); i++) { - if (cpu_model[i] == cpuinfo->x86_model) - hinic3_set_pcie_order_cfg(pci_adapter->hwdev); + if (!disable_attach) { + if ((hinic3_func_type(pci_adapter->hwdev) != TYPE_VF) && + hinic3_is_bm_slave_host(pci_adapter->hwdev)) { + if (hinic3_func_max_vf(pci_adapter->hwdev) == 0) { + sdk_warn(&pdev->dev, "The sriov enabling process is skipped, vfs_num: 0.\n"); + return; + } + hinic3_pci_sriov_enable(pdev, hinic3_func_max_vf(pci_adapter->hwdev)); + } } } -/*lint -restore*/ -#endif - -static int hinic3_func_init(struct pci_dev *pdev, struct hinic3_pcidev *pci_adapter) +static void hinic3_init_ppf_hwdev(struct hinic3_hwdev *hwdev) { - struct hinic3_init_para init_para = {0}; - bool cqm_init_en = false; + if (!hwdev) { + pr_err("[%s:%d] null hwdev pointer\n", __FILE__, __LINE__); + return; + } + + hwdev->ppf_hwdev = hinic3_get_ppf_hwdev_by_pdev(hwdev->pcidev_hdl); + return; +} + +static int set_nic_func_state(struct hinic3_pcidev *pci_adapter) +{ + struct pci_dev *pdev = pci_adapter->pcidev; + u16 func_id; + int err; + bool enable_nic = false; + + func_id = hinic3_global_func_id(pci_adapter->hwdev); + + err = hinic3_get_func_nic_enable(pci_adapter->hwdev, func_id, &enable_nic); + if (0 != err) { + sdk_err(&pdev->dev, "Failed to get nic state.\n"); + return err; + } + + if (!enable_nic) { + sdk_info(&pdev->dev, "Func %hu nic state: disable.\n", func_id); + detach_uld(pci_adapter, SERVICE_T_NIC); + return 0; + } + + if (IS_BMGW_SLAVE_HOST((struct hinic3_hwdev *)pci_adapter->hwdev)) + (void)hinic3_init_vf_dev_cap(pci_adapter->hwdev); + + if (g_uld_info[SERVICE_T_NIC].probe) { + err = attach_uld(pci_adapter, SERVICE_T_NIC, &g_uld_info[SERVICE_T_NIC]); + if (0 != err) { + sdk_err(&pdev->dev, "Initialize NIC failed\n"); + return err; + } + } + + return 0; +} + +static int hinic3_func_init(struct pci_dev *pdev, struct hinic3_pcidev *pci_adapter) +{ + struct hinic3_init_para init_para = {0}; + bool cqm_init_en = false; int err; init_para.adapter_hdl = pci_adapter; @@ -715,7 +1239,16 @@ static int hinic3_func_init(struct pci_dev *pdev, struct hinic3_pcidev *pci_adap list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list); lld_unlock_chip_node(); + hinic3_init_ppf_hwdev((struct hinic3_hwdev *)pci_adapter->hwdev); + + set_vf_load_state(pdev, pci_adapter); + if (!disable_attach) { + /* NIC is base driver, probe firstly */ + err = set_nic_func_state(pci_adapter); + if (err) + goto set_nic_func_state_err; + attach_ulds(pci_adapter); if (hinic3_func_type(pci_adapter->hwdev) != TYPE_VF) { @@ -726,10 +1259,6 @@ static int hinic3_func_init(struct pci_dev *pdev, struct hinic3_pcidev *pci_adap goto create_sysfs_err; } } - -#ifdef CONFIG_X86 - cfg_order_reg(pci_adapter); -#endif } return 0; @@ -737,6 +1266,7 @@ static int hinic3_func_init(struct pci_dev *pdev, struct hinic3_pcidev *pci_adap create_sysfs_err: detach_ulds(pci_adapter); +set_nic_func_state_err: lld_lock_chip_node(); list_del(&pci_adapter->node); lld_unlock_chip_node(); @@ -785,6 +1315,7 @@ static void hinic3_func_deinit(struct pci_dev *pdev) hinic3_free_stateful(pci_adapter->hwdev); hinic3_free_hwdev(pci_adapter->hwdev); + pci_adapter->hwdev = NULL; } static void wait_sriov_cfg_complete(struct hinic3_pcidev *pci_adapter) @@ -806,6 +1337,49 @@ static void wait_sriov_cfg_complete(struct hinic3_pcidev *pci_adapter) } while (time_before(jiffies, end)); } +static bool hinic3_get_vf_nic_en_status(struct pci_dev *pdev) +{ + bool nic_en = false; + u16 global_func_id; + struct pci_dev *pf_pdev = NULL; + struct hinic3_pcidev *pci_adapter = NULL; + + if (!pdev) { + pr_err("pdev is null.\n"); + return false; + } + + if (pdev->is_virtfn) + pf_pdev = pdev->physfn; + else + return false; + + pci_adapter = pci_get_drvdata(pf_pdev); + if (!pci_adapter) { + sdk_err(&pdev->dev, "pci_adapter is null.\n"); + return false; + } + + if (!IS_BMGW_SLAVE_HOST((struct hinic3_hwdev *)pci_adapter->hwdev)) + return false; + + if (hinic3_get_vfid_by_vfpci(NULL, pdev, &global_func_id)) { + sdk_err(&pdev->dev, "Get vf id by vfpci failed\n"); + return false; + } + + if (hinic3_get_mhost_func_nic_enable(pci_adapter->hwdev, + global_func_id, &nic_en)) { + sdk_err(&pdev->dev, "Get function nic status failed\n"); + return false; + } + + sdk_info(&pdev->dev, "Func %hu %s default probe in host\n", + global_func_id, (nic_en) ? "enable" : "disable"); + + return nic_en; +} + bool hinic3_get_vf_load_state(struct pci_dev *pdev) { struct hinic3_pcidev *pci_adapter = NULL; @@ -860,6 +1434,8 @@ int hinic3_set_vf_load_state(struct pci_dev *pdev, bool vf_load_state) } EXPORT_SYMBOL(hinic3_set_vf_load_state); + + bool hinic3_get_vf_service_load(struct pci_dev *pdev, u16 service) { struct hinic3_pcidev *pci_adapter = NULL; @@ -923,6 +1499,33 @@ int hinic3_set_vf_service_load(struct pci_dev *pdev, u16 service, } EXPORT_SYMBOL(hinic3_set_vf_service_load); +static bool hinic3_is_host_vmsec_enable(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = NULL; + struct pci_dev *pf_pdev = NULL; + + if (pdev->is_virtfn) { + pf_pdev = pdev->physfn; + } else { + pf_pdev = pdev; + } + + pci_adapter = pci_get_drvdata(pf_pdev); + if (!pci_adapter) { + pr_err("Pci_adapter is null.\n"); + return false; + } + + /* pf/vf used in host */ + if (IS_VM_SLAVE_HOST((struct hinic3_hwdev *)pci_adapter->hwdev) && + (hinic3_func_type(pci_adapter->hwdev) == TYPE_PF) && + IS_RDMA_TYPE((struct hinic3_hwdev *)pci_adapter->hwdev)) { + return true; + } + + return false; +} + static int hinic3_remove_func(struct hinic3_pcidev *pci_adapter) { struct pci_dev *pdev = pci_adapter->pcidev; @@ -936,6 +1539,13 @@ static int hinic3_remove_func(struct hinic3_pcidev *pci_adapter) pci_adapter->lld_state = HINIC3_IN_REMOVE; mutex_unlock(&pci_adapter->pdev_mutex); + if (!(pdev->is_virtfn) && (hinic3_is_host_vmsec_enable(pdev) == true) && + (hinic3_func_type((struct hinic3_hwdev *)pci_adapter->hwdev) == TYPE_PF)) { + cancel_delayed_work_sync(&pci_adapter->migration_probe_dwork); + flush_workqueue(pci_adapter->migration_probe_workq); + destroy_workqueue(pci_adapter->migration_probe_workq); + } + hinic3_detect_hw_present(pci_adapter->hwdev); hisdk3_remove_pre_process(pci_adapter->hwdev); @@ -960,23 +1570,129 @@ static int hinic3_remove_func(struct hinic3_pcidev *pci_adapter) sdk_info(&pdev->dev, "Pcie device removed function\n"); + set_vf_func_in_use(pdev, false); + + return 0; +} + +int hinic3_get_vfid_by_vfpci(void *hwdev, struct pci_dev *pdev, u16 *global_func_id) +{ + struct pci_dev *pf_pdev = NULL; + struct hinic3_pcidev *pci_adapter = NULL; + u16 pf_bus, vf_bus, vf_offset; + int err; + + if (!pdev || !global_func_id || !hinic3_pdev_is_virtfn(pdev)) + return -EINVAL; + (void)hwdev; + pf_pdev = pdev->physfn; + + vf_bus = pdev->bus->number; + pf_bus = pf_pdev->bus->number; + + if (pdev->vendor == HINIC3_VIRTIO_VNEDER_ID) { + return -EPERM; + } + + pci_adapter = pci_get_drvdata(pf_pdev); + if (!pci_adapter) { + sdk_err(&pdev->dev, "pci_adapter is null.\n"); + return -EINVAL; + } + + err = hinic3_pf_get_vf_offset_info(pci_adapter, &vf_offset); + if (err) { + sdk_err(&pdev->dev, "Func hinic3_pf_get_vf_offset_info fail\n"); + return -EFAULT; + } + + *global_func_id = (u16)((vf_bus - pf_bus) * BUS_MAX_DEV_NUM) + (u16)pdev->devfn + + (u16)(CMD_MAX_MAX_PF_NUM - g_vf_offset.vf_offset_from_pf[0]); + return 0; } +EXPORT_SYMBOL(hinic3_get_vfid_by_vfpci); + +static void hinic3_set_vf_status_in_host(struct pci_dev *pdev, bool status) +{ + struct pci_dev *pf_pdev = pdev->physfn; + struct hinic3_pcidev *pci_adapter = NULL; + void *pf_hwdev = NULL; + void *ppf_hwdev = NULL; + u16 global_func_id; + int ret; + + if (!pf_pdev) + return; + + if (!hinic3_pdev_is_virtfn(pdev)) + return; + + pci_adapter = pci_get_drvdata(pf_pdev); + pf_hwdev = pci_adapter->hwdev; + ppf_hwdev = hinic3_get_ppf_hwdev_by_pdev(pf_pdev); + if (!pf_hwdev || !ppf_hwdev) + return; + + ret = hinic3_get_vfid_by_vfpci(NULL, pdev, &global_func_id); + if (ret) { + sdk_err(&pci_adapter->pcidev->dev, "Func hinic3_get_vfid_by_vfpci fail %d \n", ret); + return; + } + + ret = hinic3_set_func_probe_in_host(ppf_hwdev, global_func_id, status); + if (ret) + sdk_err(&pci_adapter->pcidev->dev, "Set the function probe status in host failed\n"); +} +#ifdef CONFIG_PCI_IOV +static bool check_pdev_type_and_state(struct pci_dev *pdev) +{ + if (!(pdev->is_virtfn)) { + return false; + } + + if ((hinic3_get_pf_device_id(pdev) != HINIC3_DEV_ID_SDI_5_1_PF) && + (hinic3_get_pf_device_id(pdev) != HINIC3_DEV_ID_SDI_5_0_PF)) { + return false; + } + + if (!hinic3_get_vf_load_state(pdev)) { + return false; + } + + return true; +} +#endif static void hinic3_remove(struct pci_dev *pdev) { struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + sdk_info(&pdev->dev, "Pcie device remove begin\n"); + if (!pci_adapter) - return; + goto out; +#ifdef CONFIG_PCI_IOV + if (check_pdev_type_and_state(pdev)) { + goto out; + } +#endif - sdk_info(&pdev->dev, "Pcie device remove begin\n"); + cancel_work_sync(&pci_adapter->slave_nic_work); + cancel_work_sync(&pci_adapter->slave_vroce_work); hinic3_remove_func(pci_adapter); + if (!pci_adapter->pcidev->is_virtfn && + pci_adapter->multi_host_mgmt_workq) + destroy_workqueue(pci_adapter->multi_host_mgmt_workq); + hinic3_pci_deinit(pdev); hinic3_probe_pre_unprocess(pdev); +out: + hinic3_set_vf_status_in_host(pdev, false); + sdk_info(&pdev->dev, "Pcie device removed\n"); } @@ -995,7 +1711,7 @@ static int probe_func_param_init(struct hinic3_pcidev *pci_adapter) if (pci_adapter->lld_state >= HINIC3_PROBE_START) { sdk_warn(&pdev->dev, "Don not probe repeat\n"); mutex_unlock(&pci_adapter->pdev_mutex); - return 0; + return -EEXIST; } pci_adapter->lld_state = HINIC3_PROBE_START; mutex_unlock(&pci_adapter->pdev_mutex); @@ -1003,15 +1719,28 @@ static int probe_func_param_init(struct hinic3_pcidev *pci_adapter) return 0; } +static void hinic3_probe_success_process(struct hinic3_pcidev *pci_adapter) +{ + hinic3_probe_success(pci_adapter->hwdev); + + mutex_lock(&pci_adapter->pdev_mutex); + pci_adapter->lld_state = HINIC3_PROBE_OK; + mutex_unlock(&pci_adapter->pdev_mutex); +} + static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter) { struct pci_dev *pdev = pci_adapter->pcidev; int err; err = probe_func_param_init(pci_adapter); - if (err) + if (err == -EEXIST) + return 0; + else if (err) return err; + set_vf_func_in_use(pdev, true); + err = mapping_bar(pdev, pci_adapter); if (err) { sdk_err(&pdev->dev, "Failed to map bar\n"); @@ -1043,11 +1772,7 @@ static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter) } } - hinic3_probe_success(pci_adapter->hwdev); - - mutex_lock(&pci_adapter->pdev_mutex); - pci_adapter->lld_state = HINIC3_PROBE_OK; - mutex_unlock(&pci_adapter->pdev_mutex); + hinic3_probe_success_process(pci_adapter); return 0; @@ -1063,18 +1788,299 @@ static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter) unmapping_bar(pci_adapter); map_bar_failed: + set_vf_func_in_use(pdev, false); sdk_err(&pdev->dev, "Pcie device probe function failed\n"); return err; } +void hinic3_set_func_state(struct hinic3_pcidev *pci_adapter) +{ + struct pci_dev *pdev = pci_adapter->pcidev; + int err; + bool enable_func = false; + + err = hinic3_get_function_enable(pdev, &enable_func); + if (err) { + sdk_info(&pdev->dev, "Get function enable failed\n"); + return; + } + + sdk_info(&pdev->dev, "%s function resource start\n", + enable_func ? "Initialize" : "Free"); + if (enable_func) { + err = hinic3_probe_func(pci_adapter); + if (err) + sdk_info(&pdev->dev, "Function probe failed\n"); + } else { + hinic3_remove_func(pci_adapter); + } + if (err == 0) + sdk_info(&pdev->dev, "%s function resource end\n", + enable_func ? "Initialize" : "Free"); +} + +void slave_host_mgmt_work(struct work_struct *work) +{ + struct hinic3_pcidev *pci_adapter = + container_of(work, struct hinic3_pcidev, slave_nic_work); + + if (hinic3_pdev_is_virtfn(pci_adapter->pcidev)) + hinic3_set_func_state(pci_adapter); + else + set_nic_func_state(pci_adapter); +} + +static int pci_adapter_assign_val(struct hinic3_pcidev **ppci_adapter, + struct pci_dev *pdev, const struct pci_device_id *id) +{ + *ppci_adapter = pci_get_drvdata(pdev); + (*ppci_adapter)->disable_vf_load = disable_vf_load; + (*ppci_adapter)->id = *id; + (*ppci_adapter)->lld_state = HINIC3_NOT_PROBE; + (*ppci_adapter)->probe_fault_level = FAULT_LEVEL_SERIOUS_FLR; + lld_dev_cnt_init(*ppci_adapter); + + (*ppci_adapter)->multi_host_mgmt_workq = + alloc_workqueue("hinic_mhost_mgmt", WQ_UNBOUND, + HINIC3_SLAVE_WORK_MAX_NUM); + if (!(*ppci_adapter)->multi_host_mgmt_workq) { + hinic3_pci_deinit(pdev); + sdk_err(&pdev->dev, "Alloc multi host mgmt workqueue failed\n"); + return -ENOMEM; + } + + INIT_WORK(&(*ppci_adapter)->slave_nic_work, slave_host_mgmt_work); + INIT_WORK(&(*ppci_adapter)->slave_vroce_work, + slave_host_mgmt_vroce_work); + + return 0; +} + +static void slave_host_vfio_probe_delay_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct hinic3_pcidev *pci_adapter = container_of(delay, struct hinic3_pcidev, migration_probe_dwork); + struct pci_dev *pdev = pci_adapter->pcidev; + int (*dev_migration_probe)(struct pci_dev *); + int rc; + + if (hinic3_func_type((struct hinic3_hwdev *)pci_adapter->hwdev) != TYPE_PF) { + return; + } + + dev_migration_probe = __symbol_get("migration_dev_migration_probe"); + if (!(dev_migration_probe)) { + sdk_err(&pdev->dev, + "Failed to find: migration_dev_migration_probe"); + queue_delayed_work(pci_adapter->migration_probe_workq, + &pci_adapter->migration_probe_dwork, WAIT_TIME * HZ); + } else { + rc = dev_migration_probe(pdev); + __symbol_put("migration_dev_migration_probe"); + if (rc) { + sdk_err(&pdev->dev, + "Failed to __dev_migration_probe, rc:0x%x, pf migrated(%d).\n", + rc, g_is_pf_migrated); + } else { + g_is_pf_migrated = true; + sdk_info(&pdev->dev, + "Successed in __dev_migration_probe, pf migrated(%d).\n", + g_is_pf_migrated); + } + } + + return; +} + +struct vf_add_delaywork { + struct pci_dev *vf_pdev; + struct delayed_work migration_vf_add_dwork; +}; + +static void slave_host_migration_vf_add_delay_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct vf_add_delaywork *vf_add = container_of(delay, struct vf_add_delaywork, migration_vf_add_dwork); + struct pci_dev *vf_pdev = vf_add->vf_pdev; + struct pci_dev *pf_pdev = NULL; + int (*migration_dev_add_vf)(struct pci_dev *); + int ret; + struct hinic3_pcidev *pci_adapter = NULL; + + if (!vf_pdev) { + pr_err("vf pdev is null.\n"); + goto err1; + } + if (!vf_pdev->is_virtfn) { + sdk_err(&vf_pdev->dev, "Pdev is not virtfn.\n"); + goto err1; + } + + pf_pdev = vf_pdev->physfn; + if (!pf_pdev) { + sdk_err(&vf_pdev->dev, "pf_pdev is null.\n"); + goto err1; + } + + pci_adapter = pci_get_drvdata(pf_pdev); + if (!pci_adapter) { + sdk_err(&vf_pdev->dev, "Pci_adapter is null.\n"); + goto err1; + } + + if (!g_is_pf_migrated) { + sdk_info(&vf_pdev->dev, "pf is not migrated yet, so vf continues to try again.\n"); + goto delay_work; + } + + migration_dev_add_vf = __symbol_get("migration_dev_add_vf"); + if (migration_dev_add_vf) { + ret = migration_dev_add_vf(vf_pdev); + __symbol_put("migration_dev_add_vf"); + if (ret) { + sdk_err(&vf_pdev->dev, + "vf get migration symbol successed, but dev add vf failed, ret:%d.\n", + ret); + } else { + sdk_info(&vf_pdev->dev, + "vf get migration symbol successed, and dev add vf success.\n"); + } + goto err1; + } + sdk_info(&vf_pdev->dev, "pf is migrated, but vf get migration symbol failed.\n"); + +delay_work: + queue_delayed_work(pci_adapter->migration_probe_workq, + &vf_add->migration_vf_add_dwork, WAIT_TIME * HZ); + return; + +err1: + kfree(vf_add); + return; +} + +static void hinic3_probe_vf_add_dwork(struct pci_dev *pdev) +{ + struct pci_dev *pf_pdev = NULL; + struct hinic3_pcidev *pci_adapter = NULL; + + if (!hinic3_is_host_vmsec_enable(pdev)) { + return; + } + +#if defined(CONFIG_SP_VID_DID) + if ((pdev->vendor == PCI_VENDOR_ID_SPNIC) && (pdev->device == HINIC3_DEV_SDI_5_1_ID_VF)) { +#elif defined(CONFIG_NF_VID_DID) + if ((pdev->vendor == PCI_VENDOR_ID_NF) && (pdev->device == NFNIC_DEV_ID_VF)) { +#else + if ((pdev->vendor == PCI_VENDOR_ID_HUAWEI) && (pdev->device == HINIC3_DEV_SDI_5_0_ID_VF)) { +#endif + struct vf_add_delaywork *vf_add = kmalloc(sizeof(struct vf_add_delaywork), GFP_ATOMIC); + if (!vf_add) { + sdk_info(&pdev->dev, "vf_add is null.\n"); + return; + } + vf_add->vf_pdev = pdev; + + pf_pdev = pdev->physfn; + + if (!pf_pdev) { + sdk_info(&pdev->dev, "Vf-pf_pdev is null.\n"); + kfree(vf_add); + return; + } + + pci_adapter = pci_get_drvdata(pf_pdev); + if (!pci_adapter) { + sdk_info(&pdev->dev, "Pci_adapter is null.\n"); + kfree(vf_add); + return; + } + + INIT_DELAYED_WORK(&vf_add->migration_vf_add_dwork, + slave_host_migration_vf_add_delay_work); + + queue_delayed_work(pci_adapter->migration_probe_workq, + &vf_add->migration_vf_add_dwork, + WAIT_TIME * HZ); + } + + return; +} + +static int hinic3_probe_migration_dwork(struct pci_dev *pdev, struct hinic3_pcidev *pci_adapter) +{ + if (!hinic3_is_host_vmsec_enable(pdev)) { + sdk_info(&pdev->dev, "Probe_migration : hinic3_is_host_vmsec_enable is (0).\n"); + return 0; + } + + if (IS_VM_SLAVE_HOST((struct hinic3_hwdev *)pci_adapter->hwdev) && + hinic3_func_type((struct hinic3_hwdev *)pci_adapter->hwdev) == TYPE_PF) { + pci_adapter->migration_probe_workq = + create_singlethread_workqueue("hinic3_migration_probe_delay"); + if (!pci_adapter->migration_probe_workq) { + sdk_err(&pdev->dev, "Failed to create work queue:%s\n", + "hinic3_migration_probe_delay"); + return -EINVAL; + } + + INIT_DELAYED_WORK(&pci_adapter->migration_probe_dwork, + slave_host_vfio_probe_delay_work); + + queue_delayed_work(pci_adapter->migration_probe_workq, + &pci_adapter->migration_probe_dwork, WAIT_TIME * HZ); + } + + return 0; +} + +static bool hinic3_os_hot_replace_allow(struct hinic3_pcidev *pci_adapter) +{ + struct hinic3_hwdev *hwdev = (struct hinic3_hwdev *)pci_adapter->hwdev; + // check service enable and dev is not VF + if (hinic3_func_type(hwdev) == TYPE_VF || hwdev->hot_replace_mode == HOT_REPLACE_DISABLE) + return false; + + return true; +} + +static bool hinic3_os_hot_replace_process(struct hinic3_pcidev *pci_adapter) +{ + struct hinic3_board_info *board_info; + u16 cur_pf_id = hinic3_global_func_id(pci_adapter->hwdev); + u8 cur_partion_id; + board_info = &((struct hinic3_hwdev *)(pci_adapter->hwdev))->board_info; + // probe to os + vpci_set_partition_attrs(pci_adapter->pcidev, PARTITION_DEV_EXCLUSIVE, + get_function_partition(cur_pf_id, board_info->port_num)); + + // check pf_id is in the right partition_id + cur_partion_id = get_partition_id(); + if (get_function_partition(cur_pf_id, board_info->port_num) == cur_partion_id) { + return true; + } + + pci_adapter->probe_fault_level = FAULT_LEVEL_SUGGESTION; + return false; +} + static int hinic3_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hinic3_pcidev *pci_adapter = NULL; u16 probe_fault_level = FAULT_LEVEL_SERIOUS_FLR; + u32 device_id, function_id; int err; sdk_info(&pdev->dev, "Pcie device probe begin\n"); - +#ifdef CONFIG_PCI_IOV + hinic3_set_vf_status_in_host(pdev, true); + if (check_pdev_type_and_state(pdev)) { + sdk_info(&pdev->dev, "VFs are not binded to hinic\n"); + hinic3_probe_vf_add_dwork(pdev); + return -EINVAL; + } +#endif err = hinic3_probe_pre_process(pdev); if (err != 0 && err != HINIC3_NOT_PROBE) goto out; @@ -1082,33 +2088,53 @@ static int hinic3_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err == HINIC3_NOT_PROBE) return 0; - err = hinic3_pci_init(pdev); - if (err) + if (hinic3_pci_init(pdev)) goto pci_init_err; - pci_adapter = pci_get_drvdata(pdev); - pci_adapter->disable_vf_load = disable_vf_load; - pci_adapter->id = *id; - pci_adapter->lld_state = HINIC3_NOT_PROBE; - pci_adapter->probe_fault_level = probe_fault_level; - lld_dev_cnt_init(pci_adapter); + if (pci_adapter_assign_val(&pci_adapter, pdev, id)) + goto allco_queue_err; - if (pdev->is_virtfn && (!hinic3_get_vf_load_state(pdev))) { + if (pdev->is_virtfn && (!hinic3_get_vf_load_state(pdev)) && + (!hinic3_get_vf_nic_en_status(pdev))) { sdk_info(&pdev->dev, "VF device disable load in host\n"); return 0; } - err = hinic3_probe_func(pci_adapter); - if (err) + if (hinic3_probe_func(pci_adapter)) + goto hinic3_probe_func_fail; + + if (hinic3_os_hot_replace_allow(pci_adapter)) { + if (!hinic3_os_hot_replace_process(pci_adapter)) { + device_id = PCI_SLOT(pdev->devfn); + function_id = PCI_FUNC(pdev->devfn); + sdk_info(&pdev->dev, + "os hot replace: skip function %d:%d for partition %d", + device_id, function_id, get_partition_id()); + goto os_hot_repalce_not_allow; + } + } + + if (hinic3_probe_migration_dwork(pdev, pci_adapter)) goto hinic3_probe_func_fail; sdk_info(&pdev->dev, "Pcie device probed\n"); return 0; +os_hot_repalce_not_allow: + hinic3_func_deinit(pdev); + lld_lock_chip_node(); + free_chip_node(pci_adapter); + lld_unlock_chip_node(); + unmapping_bar(pci_adapter); + set_vf_func_in_use(pdev, false); + hinic3_probe_func_fail: + destroy_workqueue(pci_adapter->multi_host_mgmt_workq); + cancel_work_sync(&pci_adapter->slave_nic_work); + cancel_work_sync(&pci_adapter->slave_vroce_work); +allco_queue_err: probe_fault_level = pci_adapter->probe_fault_level; hinic3_pci_deinit(pdev); - pci_init_err: hinic3_probe_pre_unprocess(pdev); @@ -1131,6 +2157,10 @@ static int hinic3_get_pf_info(struct pci_dev *pdev, u16 service, } *pf_infos = kzalloc(sizeof(struct hinic3_hw_pf_infos), GFP_KERNEL); + if (*pf_infos == NULL) { + sdk_err(&pdev->dev, "pf_infos kzalloc failed\n"); + return -EFAULT; + } err = hinic3_get_hw_pf_infos(dev->hwdev, *pf_infos, HINIC3_CHANNEL_COMM); if (err) { kfree(*pf_infos); @@ -1146,6 +2176,7 @@ static int hinic3_set_func_en(struct pci_dev *des_pdev, struct hinic3_pcidev *ds { int err; + mutex_lock(&dst_dev->pdev_mutex); /* unload invalid vf func id */ if (!en && vf_func_id != hinic3_global_func_id(dst_dev->hwdev) && !strcmp(des_pdev->driver->name, HINIC3_DRV_NAME)) { @@ -1163,6 +2194,8 @@ static int hinic3_set_func_en(struct pci_dev *des_pdev, struct hinic3_pcidev *ds err = hinic3_probe_func(dst_dev); if (err) return -EFAULT; + } else { + mutex_unlock(&dst_dev->pdev_mutex); } return 0; @@ -1187,7 +2220,6 @@ static int get_vf_service_state_param(struct pci_dev *pdev, struct hinic3_pcidev return 0; } -#define BUS_MAX_DEV_NUM 256 static int hinic3_dst_pdev_valid(struct hinic3_pcidev *dst_dev, struct pci_dev **des_pdev_ptr, u16 vf_devfn, bool en) { @@ -1245,7 +2277,7 @@ int hinic3_set_vf_service_state(struct pci_dev *pdev, u16 vf_func_id, u16 servic lld_hold(); list_for_each_entry(dst_dev, &dev->chip_node->func_list, node) { - if (paramerter_is_unexpected(dst_dev, &func_id, &vf_start, &vf_end, vf_func_id)) + if (paramerter_is_unexpected(dst_dev, &func_id, &vf_start, &vf_end, vf_func_id) != 0) continue; vf_devfn = pf_infos->infos[func_id].vf_offset + (vf_func_id - vf_start) + @@ -1269,7 +2301,6 @@ int hinic3_set_vf_service_state(struct pci_dev *pdev, u16 vf_func_id, u16 servic if (en) pci_dev_put(des_pdev); - mutex_lock(&dst_dev->pdev_mutex); find_dst_dev = true; break; } @@ -1289,19 +2320,18 @@ int hinic3_set_vf_service_state(struct pci_dev *pdev, u16 vf_func_id, u16 servic } EXPORT_SYMBOL(hinic3_set_vf_service_state); -/*lint -save -e133 -e10*/ static const struct pci_device_id hinic3_pci_table[] = { + {PCI_VDEVICE(HUAWEI, HINIC3_DEV_ID_SPU), 0}, {PCI_VDEVICE(HUAWEI, HINIC3_DEV_ID_STANDARD), 0}, - {PCI_VDEVICE(HUAWEI, HINIC3_DEV_ID_DPU_PF), 0}, - {PCI_VDEVICE(HUAWEI, HINIC3_DEV_ID_SDI_5_0_PF), 0}, {PCI_VDEVICE(HUAWEI, HINIC3_DEV_ID_SDI_5_1_PF), 0}, + {PCI_VDEVICE(HUAWEI, HINIC3_DEV_ID_SDI_5_0_PF), 0}, + {PCI_VDEVICE(HUAWEI, HINIC3_DEV_ID_DPU_PF), 0}, + {PCI_VDEVICE(HUAWEI, HINIC3_DEV_SDI_5_1_ID_VF), 0}, {PCI_VDEVICE(HUAWEI, HINIC3_DEV_ID_VF), 0}, {0, 0} }; -/*lint -restore*/ - MODULE_DEVICE_TABLE(pci, hinic3_pci_table); /** @@ -1333,14 +2363,27 @@ static pci_ers_result_t hinic3_io_error_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_CAN_RECOVER; } +static void hinic3_timer_disable(void *hwdev) +{ + if (!hwdev) + return; + + if (hinic3_get_stateful_enable(hwdev) && hinic3_get_timer_enable(hwdev)) + (void)hinic3_func_tmr_bitmap_set(hwdev, hinic3_global_func_id(hwdev), false); + + return; +} + static void hinic3_shutdown(struct pci_dev *pdev) { struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); sdk_info(&pdev->dev, "Shutdown device\n"); - if (pci_adapter) + if (pci_adapter) { + hinic3_timer_disable(pci_adapter->hwdev); hinic3_shutdown_hwdev(pci_adapter->hwdev); + } pci_disable_device(pdev); @@ -1367,6 +2410,9 @@ static struct pci_driver hinic3_driver = { .probe = hinic3_probe, .remove = hinic3_remove, .shutdown = hinic3_shutdown, +#ifdef CONFIG_PARTITION_DEVICE + .driver.probe_concurrency = true, +#endif #if defined(HAVE_SRIOV_CONFIGURE) .sriov_configure = hinic3_pci_sriov_configure, #elif defined(HAVE_RHEL6_SRIOV_CONFIGURE) @@ -1388,16 +2434,21 @@ int hinic3_lld_init(void) err = hinic3_module_pre_init(); if (err) { pr_err("Init custom failed\n"); - return err; + goto module_pre_init_err; } err = pci_register_driver(&hinic3_driver); if (err) { - hinic3_module_post_exit(); - return err; + pr_err("sdk3 pci register driver failed\n"); + goto register_pci_driver_err; } return 0; + +register_pci_driver_err: + hinic3_module_post_exit(); +module_pre_init_err: + return err; } void hinic3_lld_exit(void) diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.c index d2a7dd7c8678a09212b398cf1aceca84f94c09ba..5398a34bde9aa1af8065975a24c1d8623cfa0d29 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.c @@ -584,6 +584,7 @@ static void free_recv_mbox(struct hinic3_recv_mbox *recv_msg) kfree(recv_msg->resp_buff); kfree(recv_msg->msg); kfree(recv_msg); + recv_msg = NULL; } static void recv_func_mbox_work_handler(struct work_struct *work) @@ -1233,6 +1234,7 @@ int hinic3_mbox_to_func(struct hinic3_mbox *func_to_func, u8 mod, u16 cmd, set_mbox_to_func_event(func_to_func, EVENT_FAIL); goto send_err; } + func_to_func->hwdev->mbox_send_cnt++; if (wait_mbox_msg_completion(func_to_func, timeout) != 0) { sdk_err(func_to_func->hwdev->dev_hdl, @@ -1241,6 +1243,7 @@ int hinic3_mbox_to_func(struct hinic3_mbox *func_to_func, u8 mod, u16 cmd, err = -ETIMEDOUT; goto send_err; } + func_to_func->hwdev->mbox_ack_cnt++; if (mod != msg_desc->mod || cmd != msg_desc->cmd) { sdk_err(func_to_func->hwdev->dev_hdl, diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c index 3ad9a77e389ee4bb66a4718b545479797be33dcd..0d75177779fa66516a1c0ae63b02e6d2ef41ea4e 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c @@ -348,7 +348,7 @@ int hinic3_pf_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) return -EPERM; - if (!buf_in || in_size == 0) + if ((buf_in == NULL) || (in_size == 0)) return -EINVAL; ret = msg_to_mgmt_pre(mod, buf_in, in_size); @@ -864,6 +864,7 @@ static int alloc_recv_msg(struct hinic3_recv_msg *recv_msg) static void free_recv_msg(struct hinic3_recv_msg *recv_msg) { kfree(recv_msg->msg); + recv_msg->msg = NULL; } /** @@ -935,6 +936,9 @@ static void free_msg_buf(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt) free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + pf_to_mgmt->mgmt_ack_buf = NULL; + pf_to_mgmt->sync_msg_buf = NULL; + pf_to_mgmt->async_msg_buf = NULL; } /** diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_multi_host_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_multi_host_mgmt.c index 3548410804491ee48a424d360795073d71166f93..b619800d3c27e7d18bebbef74d0836eacaa8a1a1 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_multi_host_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_multi_host_mgmt.c @@ -676,6 +676,11 @@ static int hinic3_register_slave_ppf(struct hinic3_hwdev *hwdev, bool registered if (!IS_SLAVE_HOST(hwdev)) return -EINVAL; + /* if unsupport hot plug, return true. */ + if (UNSUPPORT_HOT_PLUG((struct hinic3_hwdev *)hwdev)) { + return 0; + } + host_info = kcalloc(1, sizeof(struct register_slave_host), GFP_KERNEL); if (!host_info) return -ENOMEM; @@ -1074,8 +1079,9 @@ int hinic3_get_func_nic_enable(void *hwdev, u16 glb_func_idx, bool *en) if (!hwdev || !en) return -EINVAL; - /* if single host, return true. */ - if (!IS_MULTI_HOST((struct hinic3_hwdev *)hwdev)) { + /* if single host or unsupport hot plug, return true. */ + if (!IS_MULTI_HOST((struct hinic3_hwdev *)hwdev) || + UNSUPPORT_HOT_PLUG((struct hinic3_hwdev *)hwdev)) { *en = true; return 0; } @@ -1145,11 +1151,18 @@ int hinic3_multi_host_mgmt_init(struct hinic3_hwdev *hwdev) { int err; struct service_cap *cap = &hwdev->cfg_mgmt->svc_cap; + int is_use_vram, is_in_kexec; if (!IS_MULTI_HOST(hwdev) || !HINIC3_IS_PPF(hwdev)) return 0; - hwdev->mhost_mgmt = kcalloc(1, sizeof(*hwdev->mhost_mgmt), GFP_KERNEL); + is_use_vram = get_use_vram_flag(); + if (is_use_vram != 0) { + snprintf(hwdev->mhost_mgmt_name, VRAM_NAME_MAX_LEN, "%s", VRAM_NIC_MHOST_MGMT); + hwdev->mhost_mgmt = hi_vram_kalloc(hwdev->mhost_mgmt_name, sizeof(*hwdev->mhost_mgmt)); + } else { + hwdev->mhost_mgmt = kcalloc(1, sizeof(*hwdev->mhost_mgmt), GFP_KERNEL); + } if (!hwdev->mhost_mgmt) return -ENOMEM; @@ -1165,8 +1178,11 @@ int hinic3_multi_host_mgmt_init(struct hinic3_hwdev *hwdev) hinic3_register_ppf_mbox_cb(hwdev, HINIC3_MOD_HILINK, hwdev, hilink_ppf_mbox_handler); hinic3_register_ppf_mbox_cb(hwdev, HINIC3_MOD_SW_FUNC, hwdev, sw_func_ppf_mbox_handler); - bitmap_zero(hwdev->mhost_mgmt->func_nic_en, HINIC3_MAX_MGMT_FUNCTIONS); - bitmap_zero(hwdev->mhost_mgmt->func_vroce_en, HINIC3_MAX_MGMT_FUNCTIONS); + is_in_kexec = vram_get_kexec_flag(); + if (is_in_kexec == 0) { + bitmap_zero(hwdev->mhost_mgmt->func_nic_en, HINIC3_MAX_MGMT_FUNCTIONS); + bitmap_zero(hwdev->mhost_mgmt->func_vroce_en, HINIC3_MAX_MGMT_FUNCTIONS); + } /* Slave host: * register slave host ppf functions @@ -1179,7 +1195,13 @@ int hinic3_multi_host_mgmt_init(struct hinic3_hwdev *hwdev) return 0; out_free_mhost_mgmt: - kfree(hwdev->mhost_mgmt); + if (is_use_vram != 0) { + hi_vram_kfree((void *)hwdev->mhost_mgmt, + hwdev->mhost_mgmt_name, + sizeof(*hwdev->mhost_mgmt)); + } else { + kfree(hwdev->mhost_mgmt); + } hwdev->mhost_mgmt = NULL; return err; @@ -1187,6 +1209,7 @@ int hinic3_multi_host_mgmt_init(struct hinic3_hwdev *hwdev) int hinic3_multi_host_mgmt_free(struct hinic3_hwdev *hwdev) { + int is_use_vram; if (!IS_MULTI_HOST(hwdev) || !HINIC3_IS_PPF(hwdev)) return 0; @@ -1203,7 +1226,14 @@ int hinic3_multi_host_mgmt_free(struct hinic3_hwdev *hwdev) hinic3_unregister_ppf_mbox_cb(hwdev, HINIC3_MOD_HILINK); hinic3_unregister_ppf_mbox_cb(hwdev, HINIC3_MOD_SW_FUNC); - kfree(hwdev->mhost_mgmt); + is_use_vram = get_use_vram_flag(); + if (is_use_vram != 0) { + hi_vram_kfree((void *)hwdev->mhost_mgmt, + hwdev->mhost_mgmt_name, + sizeof(*hwdev->mhost_mgmt)); + } else { + kfree(hwdev->mhost_mgmt); + } hwdev->mhost_mgmt = NULL; return 0; diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c index f7d350cb59668962fd909d010f16ad1ee022604c..ee7afef7ca7869c604b74e2b4eddff6d0667f4d2 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c @@ -14,6 +14,7 @@ #include "hinic3_crm.h" #include "hinic3_hw.h" #include "hinic3_hw_cfg.h" +#include "hinic3_dev_mgmt.h" #include "hinic3_hwdev.h" #include "hinic3_lld.h" #include "hinic3_hw_mt.h" @@ -22,9 +23,7 @@ static int g_nictool_ref_cnt; static dev_t g_dev_id = {0}; -/*lint -save -e104 -e808*/ static struct class *g_nictool_class; -/*lint -restore*/ static struct cdev g_nictool_cdev; #define HINIC3_MAX_BUF_SIZE (2048 * 1024) @@ -92,6 +91,21 @@ static int get_all_chip_id_cmd(struct hinic3_lld_dev *lld_dev, const void *buf_i return 0; } +static int get_os_hot_replace_info(struct hinic3_lld_dev *lld_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (*out_size != sizeof(struct os_hot_replace_info) || !buf_out) { + pr_err("Invalid parameter: out_buf_size %u, expect %lu\n", + *out_size, sizeof(struct os_hot_replace_info)); + return -EFAULT; + } + + hinic3_get_os_hot_replace_info(buf_out); + + return 0; +} + static int get_card_usr_api_chain_mem(int card_idx) { unsigned char *tmp = NULL; @@ -100,8 +114,8 @@ static int get_card_usr_api_chain_mem(int card_idx) card_id = card_idx; if (!g_card_vir_addr[card_idx]) { g_card_vir_addr[card_idx] = - (void *)__get_free_pages(GFP_KERNEL, - DBGTOOL_PAGE_ORDER); + (void *)ossl_get_free_pages(GFP_KERNEL, + DBGTOOL_PAGE_ORDER); if (!g_card_vir_addr[card_idx]) { pr_err("Alloc api chain memory fail for card %d!\n", card_idx); return -EFAULT; @@ -166,7 +180,7 @@ static int get_pf_dev_info(struct hinic3_lld_dev *lld_dev, const void *buf_in, u if (!buf_out || *out_size != sizeof(struct pf_dev_info) * PF_DEV_INFO_NUM) { pr_err("Invalid parameter: out_buf_size %u, expect %lu\n", - *out_size, sizeof(dev_info) * PF_DEV_INFO_NUM); + *out_size, sizeof(*dev_info) * PF_DEV_INFO_NUM); return -EFAULT; } @@ -193,13 +207,18 @@ static int get_pf_dev_info(struct hinic3_lld_dev *lld_dev, const void *buf_in, u return 0; } -static long dbgtool_knl_free_mem(int id) +static void dbgtool_knl_free_mem(int id) { unsigned char *tmp = NULL; int i; + if (id < 0 || id >= MAX_CARD_NUM) { + pr_err("Invalid card id\n"); + return; + } + if (!g_card_vir_addr[id]) - return 0; + return; tmp = g_card_vir_addr[id]; for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { @@ -210,8 +229,6 @@ static long dbgtool_knl_free_mem(int id) free_pages((unsigned long)g_card_vir_addr[id], DBGTOOL_PAGE_ORDER); g_card_vir_addr[id] = NULL; g_card_phy_addr[id] = 0; - - return 0; } static int free_knl_mem(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, @@ -236,7 +253,8 @@ static int free_knl_mem(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 return 0; } -static int card_info_param_valid(char *dev_name, const void *buf_out, u32 buf_out_size, int *id) +static int card_info_param_valid(const char *dev_name, const void *buf_out, + u32 buf_out_size, int *id) { int err; @@ -351,7 +369,7 @@ static int get_hw_drv_version(struct hinic3_lld_dev *lld_dev, const void *buf_in } err = snprintf(ver_info->ver, sizeof(ver_info->ver), "%s %s", HINIC3_DRV_VERSION, - "2023-05-17_19:56:38"); + "2025-05-01_00:00:03"); if (err < 0) return -EINVAL; @@ -386,22 +404,38 @@ static int get_pf_id(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_ return 0; } +static int get_mbox_cnt(struct hinic3_lld_dev *lld_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + if (buf_out == NULL || *out_size != sizeof(struct card_mbox_cnt_info)) { + pr_err("buf_out is NULL, or out_size != %lu\n", + sizeof(struct card_info)); + return -EINVAL; + } + + hinic3_get_mbox_cnt(hinic3_get_sdk_hwdev_by_lld(lld_dev), buf_out); + + return 0; +} + struct hw_drv_module_handle hw_driv_module_cmd_handle[] = { - {FUNC_TYPE, get_func_type}, - {GET_FUNC_IDX, get_func_id}, - {GET_HW_STATS, (hw_driv_module)get_hw_driver_stats}, - {CLEAR_HW_STATS, clear_hw_driver_stats}, - {GET_SELF_TEST_RES, get_self_test_result}, - {GET_CHIP_FAULT_STATS, (hw_driv_module)get_chip_faults_stats}, - {GET_SINGLE_CARD_INFO, (hw_driv_module)get_single_card_info}, - {IS_DRV_IN_VM, is_driver_in_vm}, - {GET_CHIP_ID, get_all_chip_id_cmd}, - {GET_PF_DEV_INFO, get_pf_dev_info}, - {CMD_FREE_MEM, free_knl_mem}, - {GET_CHIP_INFO, get_card_func_info}, - {GET_FUNC_CAP, get_pf_cap_info}, - {GET_DRV_VERSION, get_hw_drv_version}, - {GET_PF_ID, get_pf_id}, + {FUNC_TYPE, get_func_type}, + {GET_FUNC_IDX, get_func_id}, + {GET_HW_STATS, (hw_driv_module)get_hw_driver_stats}, + {CLEAR_HW_STATS, clear_hw_driver_stats}, + {GET_SELF_TEST_RES, get_self_test_result}, + {GET_CHIP_FAULT_STATS, (hw_driv_module)get_chip_faults_stats}, + {GET_SINGLE_CARD_INFO, (hw_driv_module)get_single_card_info}, + {IS_DRV_IN_VM, is_driver_in_vm}, + {GET_CHIP_ID, get_all_chip_id_cmd}, + {GET_PF_DEV_INFO, get_pf_dev_info}, + {CMD_FREE_MEM, free_knl_mem}, + {GET_CHIP_INFO, get_card_func_info}, + {GET_FUNC_CAP, get_pf_cap_info}, + {GET_DRV_VERSION, get_hw_drv_version}, + {GET_PF_ID, get_pf_id}, + {GET_OS_HOT_REPLACE_INFO, get_os_hot_replace_info}, + {GET_MBOX_CNT, (hw_driv_module)get_mbox_cnt}, }; static int alloc_tmp_buf(void *hwdev, struct msg_module *nt_msg, u32 in_size, @@ -439,8 +473,8 @@ static void free_tmp_buf(void *hwdev, struct msg_module *nt_msg, static int send_to_hw_driver(struct hinic3_lld_dev *lld_dev, struct msg_module *nt_msg, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size) { - int index, num_cmds = sizeof(hw_driv_module_cmd_handle) / - sizeof(hw_driv_module_cmd_handle[0]); + int index, num_cmds = (int)(sizeof(hw_driv_module_cmd_handle) / + sizeof(hw_driv_module_cmd_handle[0])); enum driver_cmd_type cmd_type = (enum driver_cmd_type)(nt_msg->msg_formate); int err = 0; @@ -482,8 +516,8 @@ static int send_to_service_driver(struct hinic3_lld_dev *lld_dev, struct msg_mod if (nt_msg->msg_formate == GET_DRV_VERSION) return 0; - pr_err("Can not get the uld dev correctly: %s, %s driver may be not register\n", - nt_msg->device_name, service_name[type]); + pr_err("Can not get the uld dev correctly: %s driver may be not register\n", + service_name[type]); return -EINVAL; } @@ -550,26 +584,29 @@ static struct hinic3_lld_dev *get_lld_dev_by_nt_msg(struct msg_module *nt_msg) { struct hinic3_lld_dev *lld_dev = NULL; - if (nt_msg->module >= SEND_TO_SRV_DRV_BASE && nt_msg->module < SEND_TO_DRIVER_MAX && - nt_msg->module != SEND_TO_HW_DRIVER && nt_msg->msg_formate != GET_DRV_VERSION) { + if (nt_msg->module == SEND_TO_NIC_DRIVER && + (nt_msg->msg_formate == GET_XSFP_INFO || + nt_msg->msg_formate == GET_XSFP_PRESENT || + nt_msg->msg_formate == GET_XSFP_INFO_COMP_CMIS)) { + lld_dev = hinic3_get_lld_dev_by_chip_and_port(nt_msg->device_name, nt_msg->port_id); + } else if (nt_msg->module == SEND_TO_CUSTOM_DRIVER && + nt_msg->msg_formate == CMD_CUSTOM_BOND_GET_CHIP_NAME) { + lld_dev = hinic3_get_lld_dev_by_dev_name(nt_msg->device_name, + SERVICE_T_MAX); + } else if (nt_msg->module == SEND_TO_VBS_DRIVER || + nt_msg->module == SEND_TO_BIFUR_DRIVER) { + lld_dev = hinic3_get_lld_dev_by_chip_name(nt_msg->device_name); + } else if (nt_msg->module >= SEND_TO_SRV_DRV_BASE && + nt_msg->module < SEND_TO_DRIVER_MAX && + nt_msg->msg_formate != GET_DRV_VERSION) { lld_dev = hinic3_get_lld_dev_by_dev_name(nt_msg->device_name, nt_msg->module - SEND_TO_SRV_DRV_BASE); } else { lld_dev = hinic3_get_lld_dev_by_chip_name(nt_msg->device_name); if (!lld_dev) - lld_dev = hinic3_get_lld_dev_by_dev_name(nt_msg->device_name, - SERVICE_T_MAX); + lld_dev = hinic3_get_lld_dev_by_dev_name(nt_msg->device_name, SERVICE_T_MAX); } - if (nt_msg->module == SEND_TO_NIC_DRIVER && (nt_msg->msg_formate == GET_XSFP_INFO || - nt_msg->msg_formate == GET_XSFP_PRESENT)) - lld_dev = hinic3_get_lld_dev_by_chip_and_port(nt_msg->device_name, - nt_msg->port_id); - - if (nt_msg->module == SEND_TO_CUSTOM_DRIVER && - nt_msg->msg_formate == CMD_CUSTOM_BOND_GET_CHIP_NAME) - lld_dev = hinic3_get_lld_dev_by_dev_name(nt_msg->device_name, SERVICE_T_MAX); - return lld_dev; } @@ -591,14 +628,16 @@ static long hinicadm_k_unlocked_ioctl(struct file *pfile, unsigned long arg) lld_dev = get_lld_dev_by_nt_msg(&nt_msg); if (!lld_dev) { if (nt_msg.msg_formate != DEV_NAME_TEST) - pr_err("Can not find device %s for module %d\n", + pr_err("Can not find device %s for module %u\n", nt_msg.device_name, nt_msg.module); return -ENODEV; } - if (nt_msg.msg_formate == DEV_NAME_TEST) + if (nt_msg.msg_formate == DEV_NAME_TEST) { + lld_dev_put(lld_dev); return 0; + } ret = alloc_tmp_buf(hinic3_get_sdk_hwdev_by_lld(lld_dev), &nt_msg, in_size, &buf_in, out_size_expect, &buf_out); @@ -642,6 +681,9 @@ static long hinicadm_k_unlocked_ioctl(struct file *pfile, unsigned long arg) static long dbgtool_knl_ffm_info_rd(struct dbgtool_param *para, struct dbgtool_k_glb_info *dbgtool_info) { + if (!para->param.ffm_rd || !dbgtool_info->ffm) + return -EINVAL; + /* Copy the ffm_info to user mode */ if (copy_to_user(para->param.ffm_rd, dbgtool_info->ffm, (unsigned int)sizeof(struct ffm_record_info))) { @@ -674,13 +716,14 @@ static long dbgtool_k_unlocked_ioctl(struct file *pfile, card_info = (struct card_node *)g_card_node_array[i]; if (!card_info) continue; - if (!strncmp(param.chip_name, card_info->chip_name, IFNAMSIZ)) + if (memcmp(param.chip_name, card_info->chip_name, + strlen(card_info->chip_name) + 1) == 0) break; } if (i == MAX_CARD_NUM || !card_info) { lld_put(); - pr_err("Can't find this card %s\n", param.chip_name); + pr_err("Can't find this card.\n"); return -EFAULT; } @@ -701,6 +744,7 @@ static long dbgtool_k_unlocked_ioctl(struct file *pfile, default: pr_err("Dbgtool cmd(0x%x) not support now\n", real_cmd); ret = -EFAULT; + break; } up(&dbgtool_info->dbgtool_sem); @@ -746,9 +790,11 @@ static long nictool_k_unlocked_ioctl(struct file *pfile, static int hinic3_mem_mmap(struct file *filp, struct vm_area_struct *vma) { + pgprot_t vm_page_prot; unsigned long vmsize = vma->vm_end - vma->vm_start; phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; phys_addr_t phy_addr; + int err = 0; if (vmsize > (PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER))) { pr_err("Map size = %lu is bigger than alloc\n", vmsize); @@ -757,14 +803,18 @@ static int hinic3_mem_mmap(struct file *filp, struct vm_area_struct *vma) /* old version of tool set vma->vm_pgoff to 0 */ phy_addr = offset ? offset : g_card_phy_addr[card_id]; - - if (!phy_addr) { - pr_err("Card_id = %d physical address is 0\n", card_id); - return -EAGAIN; + /* check phy_addr valid */ + if (phy_addr != g_card_phy_addr[card_id]) { + err = hinic3_bar_mmap_param_valid(phy_addr, vmsize); + if (err != 0) { + pr_err("mmap param invalid, err: %d\n", err); + return err; + } } /* Disable cache and write buffer in the mapping area */ - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_page_prot = vm_page_prot; if (remap_pfn_range(vma, vma->vm_start, (phy_addr >> PAGE_SHIFT), vmsize, vma->vm_page_prot)) { pr_err("Remap pfn range failed.\n"); @@ -787,7 +837,6 @@ static const struct file_operations fifo_operations = { static void free_dbgtool_info(void *hwdev, struct card_node *chip_info) { struct dbgtool_k_glb_info *dbgtool_info = NULL; - int err, id; if (hinic3_func_type(hwdev) != TYPE_VF) chip_info->func_handle_array[hinic3_global_func_id(hwdev)] = NULL; @@ -795,23 +844,23 @@ static void free_dbgtool_info(void *hwdev, struct card_node *chip_info) if (--chip_info->func_num) return; - err = sscanf(chip_info->chip_name, HINIC3_CHIP_NAME "%d", &id); - if (err < 0) - pr_err("Failed to get card id\n"); - - if (id < MAX_CARD_NUM) - g_card_node_array[id] = NULL; + if (chip_info->chip_id >= 0 && chip_info->chip_id < MAX_CARD_NUM) + g_card_node_array[chip_info->chip_id] = NULL; dbgtool_info = chip_info->dbgtool_info; /* FFM deinit */ - kfree(dbgtool_info->ffm); - dbgtool_info->ffm = NULL; + if (dbgtool_info && dbgtool_info->ffm) { + kfree(dbgtool_info->ffm); + dbgtool_info->ffm = NULL; + } + + if (dbgtool_info) + kfree(dbgtool_info); - kfree(dbgtool_info); chip_info->dbgtool_info = NULL; - if (id < MAX_CARD_NUM) - (void)dbgtool_knl_free_mem(id); + if (chip_info->chip_id >= 0 && chip_info->chip_id < MAX_CARD_NUM) + dbgtool_knl_free_mem(chip_info->chip_id); } static int alloc_dbgtool_info(void *hwdev, struct card_node *chip_info) @@ -872,7 +921,6 @@ static int alloc_dbgtool_info(void *hwdev, struct card_node *chip_info) * nictool_k_init - initialize the hw interface **/ /* temp for dbgtool_info */ -/*lint -e438*/ int nictool_k_init(void *hwdev, void *chip_node) { struct card_node *chip_info = (struct card_node *)chip_node; @@ -895,9 +943,7 @@ int nictool_k_init(void *hwdev, void *chip_node) } /* Create equipment */ - /*lint -save -e160*/ g_nictool_class = class_create(THIS_MODULE, HIADM3_DEV_CLASS); - /*lint -restore*/ if (IS_ERR(g_nictool_class)) { pr_err("Create nictool_class fail\n"); err = -EFAULT; @@ -944,7 +990,7 @@ int nictool_k_init(void *hwdev, void *chip_node) free_dbgtool_info(hwdev, chip_info); return err; -} /*lint +e438*/ +} void nictool_k_uninit(void *hwdev, void *chip_node) { diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.h index f368133e341e9d6f625846ac66ad8555a0750504..c943dfccc6f8f77a4c23c827fe9ba8ba29e1cdfa 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.h @@ -18,6 +18,8 @@ int nictool_k_init(void *hwdev, void *chip_node); void nictool_k_uninit(void *hwdev, void *chip_node); +void hinic3_get_os_hot_replace_info(void *oshr_info); + void hinic3_get_all_chip_id(void *id_info); void hinic3_get_card_func_info_by_card_name @@ -29,6 +31,8 @@ bool hinic3_is_in_host(void); int hinic3_get_pf_id(struct card_node *chip_node, u32 port_id, u32 *pf_id, u32 *isvalid); +void hinic3_get_mbox_cnt(const void *hwdev, void *bufin); + extern struct hinic3_uld_info g_uld_info[SERVICE_T_MAX]; #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_pci_id_tbl.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_pci_id_tbl.h index 2d5423e53fbc7c4ed00db1794d29635c6203a104..6f145a0da609a5c5af6603db96b11c77092bc0cf 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_pci_id_tbl.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_pci_id_tbl.h @@ -19,6 +19,20 @@ #define HINIC3_DEV_ID_SPU 0xAC00 #define HINIC3_DEV_SDI_5_1_SSDID_VF 0x1000 #define HINIC3_DEV_SDI_V100_SSDID_MASK (3 << 12) +#elif defined(CONFIG_NF_VID_DID) +#define PCI_VENDOR_ID_NF 0x2036 +#define NFNIC_DEV_ID_STANDARD 0x1618 +#define NFNIC_DEV_ID_SDI_5_1_PF 0x0226 +#define NFNIC_DEV_ID_SDI_5_0_PF 0x0225 +#define NFNIC_DEV_ID_DPU_PF 0x0224 +#define NFNIC_DEV_ID_VF 0x1619 +#define NFNIC_DEV_ID_VF_HV 0x379F +#define NFNIC_DEV_SDI_5_1_ID_VF 0x375F +#define NFNIC_DEV_SDI_5_0_ID_VF 0x375F +#define NFNIC_DEV_SDI_5_1_ID_VF_HV 0x379F +#define NFNIC_DEV_ID_SPU 0xAC00 +#define NFNIC_DEV_SDI_5_1_SSDID_VF 0x1000 +#define NFNIC_DEV_SDI_V100_SSDID_MASK (3 << 12) #else #define PCI_VENDOR_ID_HUAWEI 0x19e5 #define HINIC3_DEV_ID_STANDARD 0x0222 @@ -28,16 +42,33 @@ #define HINIC3_DEV_ID_VF 0x375F #define HINIC3_DEV_ID_VF_HV 0x379F #define HINIC3_DEV_SDI_5_1_ID_VF 0x375F +#define HINIC3_DEV_SDI_5_0_ID_VF 0x375F #define HINIC3_DEV_SDI_5_1_ID_VF_HV 0x379F #define HINIC3_DEV_ID_SPU 0xAC00 #define HINIC3_DEV_SDI_5_1_SSDID_VF 0x1000 #define HINIC3_DEV_SDI_V100_SSDID_MASK (3 << 12) #endif +#define NFNIC_DEV_SSID_2X25G_NF 0x0860 +#define NFNIC_DEV_SSID_4X25G_NF 0x0861 +#define NFNIC_DEV_SSID_2x100G_NF 0x0862 +#define NFNIC_DEV_SSID_2x200G_NF 0x0863 + +#define HINIC3_DEV_SSID_2X10G 0x0035 #define HINIC3_DEV_SSID_2X25G 0x0051 #define HINIC3_DEV_SSID_4X25G 0x0052 +#define HINIC3_DEV_SSID_4X25G_BD 0x0252 +#define HINIC3_DEV_SSID_4X25G_SMARTNIC 0x0152 +#define HINIC3_DEV_SSID_6X25G_VL 0x0356 #define HINIC3_DEV_SSID_2X100G 0x00A1 +#define HINIC3_DEV_SSID_2X100G_SMARTNIC 0x01A1 +#define HINIC3_DEV_SSID_2X200G 0x04B1 #define HINIC3_DEV_SSID_2X100G_VF 0x1000 +#define HINIC3_DEV_SSID_HPC_4_HOST_NIC 0x005A +#define HINIC3_DEV_SSID_2X200G_VL 0x00B1 +#define HINIC3_DEV_SSID_1X100G 0x02A4 + +#define BIFUR_RESOURCE_PF_SSID 0x05a1 #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sriov.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sriov.c index b23b69f3dbe7dcccc5cad0e277aab0a62000b06d..c8258ffa193647787821aa984b62ff2f1406b27e 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sriov.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_sriov.c @@ -68,7 +68,6 @@ ssize_t hinic3_sriov_numvfs_show(struct device *dev, return sprintf(buf, "%d\n", pci_num_vf(pdev)); } -/*lint -save -e713*/ ssize_t hinic3_sriov_numvfs_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -116,7 +115,6 @@ ssize_t hinic3_sriov_numvfs_store(struct device *dev, return count; } -/*lint -restore*/ #endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */ int hinic3_pci_sriov_disable(struct pci_dev *dev) @@ -174,24 +172,15 @@ int hinic3_pci_sriov_disable(struct pci_dev *dev) return 0; } -int hinic3_pci_sriov_enable(struct pci_dev *dev, int num_vfs) -{ #ifdef CONFIG_PCI_IOV - struct hinic3_sriov_info *sriov_info = NULL; - struct hinic3_event_info event = {0}; - void *hwdev = NULL; - int pre_existing_vfs = 0; - int err = 0; - - sriov_info = hinic3_get_sriov_info_by_pcidev(dev); - hwdev = hinic3_get_hwdev_by_pcidev(dev); - if (!hwdev) { - sdk_err(&dev->dev, "SR-IOV enable is not permitted, please wait...\n"); - return -EPERM; - } +int hinic3_pci_sriov_check(struct hinic3_sriov_info *sriov_info, struct pci_dev *dev, int num_vfs) +{ + int pre_existing_vfs; + int err; if (test_and_set_bit(HINIC3_SRIOV_ENABLE, &sriov_info->state)) { - sdk_err(&dev->dev, "SR-IOV enable in process, please wait, num_vfs %d\n", + sdk_err(&dev->dev, + "SR-IOV enable in process, please wait, num_vfs %d\n", num_vfs); return -EPERM; } @@ -202,6 +191,7 @@ int hinic3_pci_sriov_enable(struct pci_dev *dev, int num_vfs) clear_bit(HINIC3_SRIOV_ENABLE, &sriov_info->state); return -ERANGE; } + if (pre_existing_vfs && pre_existing_vfs != num_vfs) { err = hinic3_pci_sriov_disable(dev); if (err) { @@ -213,6 +203,31 @@ int hinic3_pci_sriov_enable(struct pci_dev *dev, int num_vfs) return num_vfs; } + return 0; +} +#endif + + +int hinic3_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct hinic3_sriov_info *sriov_info = NULL; + struct hinic3_event_info event = {0}; + void *hwdev = NULL; + int err = 0; + + sriov_info = hinic3_get_sriov_info_by_pcidev(dev); + hwdev = hinic3_get_hwdev_by_pcidev(dev); + if (!hwdev) { + sdk_err(&dev->dev, "SR-IOV enable is not permitted, please wait...\n"); + return -EPERM; + } + + err = hinic3_pci_sriov_check(sriov_info, dev, num_vfs); + if (err != 0) { + return err; + } + err = hinic3_init_vf_hw(hwdev, 1, (u16)num_vfs); if (err) { sdk_err(&dev->dev, "Failed to init vf in hardware before enable sriov, error %d\n", @@ -262,6 +277,3 @@ int hinic3_pci_sriov_configure(struct pci_dev *dev, int num_vfs) else return hinic3_pci_sriov_enable(dev, num_vfs); } - -/*lint -restore*/ - diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_wq.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_wq.c index 2f5e0984e429ec2369d63397d5ee056a043c0984..4f8acd63644d35b357b017cd0a0bc99c18a20701 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_wq.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_wq.c @@ -128,7 +128,7 @@ int hinic3_wq_create(void *hwdev, struct hinic3_wq *wq, u32 q_depth, wq_page_size = ALIGN(dev->wq_page_size, PAGE_SIZE); - memset(wq, 0, sizeof(*wq)); + memset(wq, 0, sizeof(struct hinic3_wq)); wq->dev_hdl = dev->dev_hdl; wq->q_depth = q_depth; wq->idx_mask = (u16)(q_depth - 1); diff --git a/drivers/net/ethernet/huawei/hinic3/include/bond/bond_common_defs.h b/drivers/net/ethernet/huawei/hinic3/include/bond/bond_common_defs.h index 9e8fc750c0012a3859955dc4e87c3e57751c2a4f..bfb44993824c2f1992166316ec55cbe7ddf21a68 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/bond/bond_common_defs.h +++ b/drivers/net/ethernet/huawei/hinic3/include/bond/bond_common_defs.h @@ -10,6 +10,10 @@ #define OVS_PORT_NUM_MAX BOND_PORT_MAX_NUM #define DEFAULT_ROCE_BOND_FUNC 0xFFFFFFFF +#define BOND_ID_IS_VALID(_id) \ + (((_id) >= BOND_FIRST_ID) && ((_id) <= BOND_MAX_ID)) +#define BOND_ID_IS_INVALID(_id) (!(BOND_ID_IS_VALID(_id))) + enum bond_group_id { BOND_FIRST_ID = 1, BOND_MAX_ID = 4, diff --git a/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h b/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h index f56df083a059c51951d995684ef6180f6dbadd4b..f9737ea23e8a3dd4a61c6363bd43f60d66469973 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h +++ b/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h @@ -21,6 +21,7 @@ enum servic_bit_define { SERVICE_BIT_PPA = 11, SERVICE_BIT_MIGRATE = 12, SERVICE_BIT_VROCE = 13, + SERVICE_BIT_BIFUR = 14, SERVICE_BIT_MAX }; @@ -38,6 +39,7 @@ enum servic_bit_define { #define CFG_SERVICE_MASK_PPA (0x1 << SERVICE_BIT_PPA) #define CFG_SERVICE_MASK_MIGRATE (0x1 << SERVICE_BIT_MIGRATE) #define CFG_SERVICE_MASK_VROCE (0x1 << SERVICE_BIT_VROCE) +#define CFG_SERVICE_MASK_BIFUR (0x1 << SERVICE_BIT_BIFUR) /* Definition of the scenario ID in the cfg_data, which is used for SML memory allocation. */ enum scenes_id_define { @@ -50,6 +52,8 @@ enum scenes_id_define { SCENES_ID_STORAGE_ROCE = 6, SCENES_ID_COMPUTE_ROCE = 7, SCENES_ID_STORAGE_TOE = 8, + SCENES_ID_COMPUTE_DPU = 100, + SCENES_ID_COMPUTE_SMART_NIC = 101, SCENES_ID_MAX }; @@ -120,13 +124,18 @@ struct cfg_cmd_dev_cap { u8 srv_multi_host_mode; u8 virtio_vq_size; - u32 rsvd_func3[5]; + u8 hot_plug_disable; + u8 bond_create_mode; + u8 lro_enable; + u8 os_hot_replace; + + u32 rsvd_func3[4]; /* l2nic */ u16 nic_max_sq_id; u16 nic_max_rq_id; u16 nic_default_num_queues; - u16 rsvd1_nic; + u16 outband_vlan_cfg_en; u32 rsvd2_nic[2]; /* RoCE */ diff --git a/drivers/net/ethernet/huawei/hinic3/include/hinic3_common.h b/drivers/net/ethernet/huawei/hinic3/include/hinic3_common.h index d9f67e26a7513a0b1a2d572f662692460b683f56..6c5b995cc8a89b2846ce8da0cf6a2751e7fd338c 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/hinic3_common.h +++ b/drivers/net/ethernet/huawei/hinic3/include/hinic3_common.h @@ -37,7 +37,7 @@ static inline void hinic3_cpu_to_be32(void *data, int len) { int i, chunk_sz = sizeof(u32); int data_len = len; - u32 *mem = data; + u32 *mem = (u32 *)data; if (!data) return; @@ -59,7 +59,7 @@ static inline void hinic3_be32_to_cpu(void *data, int len) { int i, chunk_sz = sizeof(u32); int data_len = len; - u32 *mem = data; + u32 *mem = (u32 *)data; if (!data) return; @@ -86,52 +86,16 @@ static inline void hinic3_set_sge(struct hinic3_sge *sge, dma_addr_t addr, sge->len = (u32)len; } -#ifdef HW_CONVERT_ENDIAN #define hinic3_hw_be32(val) (val) #define hinic3_hw_cpu32(val) (val) #define hinic3_hw_cpu16(val) (val) -#else -#define hinic3_hw_be32(val) cpu_to_be32(val) -#define hinic3_hw_cpu32(val) be32_to_cpu(val) -#define hinic3_hw_cpu16(val) be16_to_cpu(val) -#endif static inline void hinic3_hw_be32_len(void *data, int len) { -#ifndef HW_CONVERT_ENDIAN - int i, chunk_sz = sizeof(u32); - int data_len = len; - u32 *mem = data; - - if (!data) - return; - - data_len = data_len / chunk_sz; - - for (i = 0; i < data_len; i++) { - *mem = hinic3_hw_be32(*mem); - mem++; - } -#endif } static inline void hinic3_hw_cpu32_len(void *data, int len) { -#ifndef HW_CONVERT_ENDIAN - int i, chunk_sz = sizeof(u32); - int data_len = len; - u32 *mem = data; - - if (!data) - return; - - data_len = data_len / chunk_sz; - - for (i = 0; i < data_len; i++) { - *mem = hinic3_hw_cpu32(*mem); - mem++; - } -#endif } int hinic3_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, @@ -149,6 +113,8 @@ int hinic3_wait_for_timeout(void *priv_data, wait_cpl_handler handler, /* func_attr.glb_func_idx, global function index */ u16 hinic3_global_func_id(void *hwdev); +int hinic3_global_func_id_get(void *hwdev, u16 *func_id); + /* func_attr.p2p_idx, belongs to which pf */ u8 hinic3_pf_id_of_vf(void *hwdev); diff --git a/drivers/net/ethernet/huawei/hinic3/include/hinic3_lld.h b/drivers/net/ethernet/huawei/hinic3/include/hinic3_lld.h index 9a9bfe280df8181ea5e7a064d2f7153da9f5a6fe..e36ba1d90f28b25988eaf12f47886e7d37318a1b 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/hinic3_lld.h +++ b/drivers/net/ethernet/huawei/hinic3/include/hinic3_lld.h @@ -219,5 +219,7 @@ void hinic3_detach_nic(const struct hinic3_lld_dev *lld_dev); int hinic3_attach_service(const struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type); void hinic3_detach_service(const struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type); const char **hinic3_get_uld_names(void); +int hinic3_lld_init(void); +void hinic3_lld_exit(void); #endif #endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mag_mpu_cmd.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mag_mpu_cmd.h index 97d34f0c21f4ae0f088b6b61cfafc086ba43545f..199f17a6b526d7838c1d6f698191c43a1541bb3b 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/mpu/mag_mpu_cmd.h +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mag_mpu_cmd.h @@ -43,6 +43,10 @@ enum mag_cmd { MAG_CMD_GET_XSFP_PRESENT = 62, /* get xsfp present status @see mag_cmd_get_xsfp_present */ MAG_CMD_SET_XSFP_RW = 63, /* sfp/qsfp single byte read/write, @see mag_cmd_set_xsfp_rw */ MAG_CMD_CFG_XSFP_TEMPERATURE = 64, /* get xsfp temp @see mag_cmd_sfp_temp_out_info */ + /**< set xsfp tlv info @see struct mag_cmd_set_xsfp_tlv_req */ + MAG_CMD_SET_XSFP_TLV_INFO = 65, + /**< get xsfp tlv info @see struct drv_mag_cmd_get_xsfp_tlv_rsp */ + MAG_CMD_GET_XSFP_TLV_INFO = 66, /* Event reported 100-149 */ MAG_CMD_WIRE_EVENT = 100, diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_board_defs.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_board_defs.h index caaba5dfb652b0532585543478435db979cd8eae..88a9c0d541fc1e81e392b78deb61ce52f854a269 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_board_defs.h +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_board_defs.h @@ -16,32 +16,43 @@ #define BOARD_TYPE_RSVD_RANGE_END 255 enum board_type_define_e { - BOARD_TYPE_MPU_DEFAULT = 0, /* Default config */ - BOARD_TYPE_TEST_EVB_4X25G = 1, /* EVB Board */ - BOARD_TYPE_TEST_CEM_2X100G = 2, /* 2X100G CEM Card */ - BOARD_TYPE_STRG_SMARTIO_4X32G_FC = 30, /* 4X32G SmartIO FC Card */ - BOARD_TYPE_STRG_SMARTIO_4X25G_TIOE = 31, /* 4X25GE SmartIO TIOE Card */ - BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE = 32, /* 4X25GE SmartIO ROCE Card */ - BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE_AA = 33, /* 4X25GE SmartIO ROCE_AA Card */ - BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV = 34, /* 4X25GE SmartIO container Card */ - BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV_SW = 35, /* 4X25GE SmartIO container switch Card */ - BOARD_TYPE_STRG_4X25G_COMSTORAGE = 36, /* 4X25GE compute storage Onboard Card */ - BOARD_TYPE_STRG_2X100G_TIOE = 40, /* 2X100G SmartIO TIOE Card */ - BOARD_TYPE_STRG_2X100G_ROCE = 41, /* 2X100G SmartIO ROCE Card */ - BOARD_TYPE_STRG_2X100G_ROCE_AA = 42, /* 2X100G SmartIO ROCE_AA Card */ - BOARD_TYPE_CAL_2X25G_NIC_75MPPS = 100, /* 2X25G ETH Standard card 75MPPS */ - BOARD_TYPE_CAL_2X25G_NIC_40MPPS = 101, /* 2X25G ETH Standard card 40MPPS */ - BOARD_TYPE_CAL_2X100G_DPU = 102, /* 2x100G DPU Card */ - BOARD_TYPE_CAL_4X25G_NIC_120MPPS = 105, /* 4X25G ETH Standard card 120MPPS */ - BOARD_TYPE_CAL_4X25G_COMSTORAGE = 106, /* 4X25GE compute storage Onboard Card */ - BOARD_TYPE_CAL_2X32G_FC_HBA = 110, /* 2X32G FC HBA card */ - BOARD_TYPE_CAL_2X16G_FC_HBA = 111, /* 2X16G FC HBA card */ - BOARD_TYPE_CAL_2X100G_NIC_120MPPS = 115, /* 2X100G ETH Standard card 120MPPS */ - BOARD_TYPE_CAL_2X25G_DPU = 116, /* 2x25G DPU Card */ - BOARD_TYPE_CAL_4X25G_DPU = 118, /* 4x25G DPU Card */ - BOARD_TYPE_CLD_2X100G_SDI5_1 = 170, /* 2X100G SDI 5.1 Card */ - BOARD_TYPE_CLD_2X25G_SDI5_0_LITE = 171, /* 2x25G SDI5.0 Lite Card */ - BOARD_TYPE_CLD_2X100G_SDI5_0 = 172, /* 2x100G SDI5.0 Card */ + BOARD_TYPE_MPU_DEFAULT = 0, + BOARD_TYPE_TEST_EVB_4X25G = 1, + BOARD_TYPE_TEST_CEM_2X100G = 2, + BOARD_TYPE_STRG_SMARTIO_4X32G_FC = 30, + BOARD_TYPE_STRG_SMARTIO_4X25G_TIOE = 31, + BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE = 32, + BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE_AA = 33, + BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV = 34, + BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV_SW = 35, + BOARD_TYPE_STRG_4X25G_COMSTORAGE = 36, + BOARD_TYPE_STRG_2X100G_TIOE = 40, + BOARD_TYPE_STRG_2X100G_ROCE = 41, + BOARD_TYPE_STRG_2X100G_ROCE_AA = 42, + BOARD_TYPE_CAL_2X25G_NIC_75MPPS = 100, + BOARD_TYPE_CAL_2X25G_NIC_40MPPS = 101, + BOARD_TYPE_CAL_2X100G_DPU_VL = 102, + BOARD_TYPE_CAL_4X25G_NIC_120MPPS = 105, + BOARD_TYPE_CAL_4X25G_COMSTORAGE = 106, + BOARD_TYPE_CAL_2X32G_FC_HBA = 110, + BOARD_TYPE_CAL_2X16G_FC_HBA = 111, + BOARD_TYPE_CAL_2X100G_NIC_120MPPS = 115, + BOARD_TYPE_CAL_2X25G_DPU_BD = 116, + BOARD_TYPE_CAL_2X100G_TCE_BACKPLANE = 117, + BOARD_TYPE_CAL_4X25G_DPU_VL = 118, + BOARD_TYPE_CAL_4X25G_SMARTNIC_120MPPS = 119, + BOARD_TYPE_CAL_2X100G_SMARTNIC_120MPPS = 120, + BOARD_TYPE_CAL_6X25G_DPU_VL = 121, + BOARD_TYPE_CAL_4X25G_DPU_BD = 122, + BOARD_TYPE_CAL_2X25G_NIC_4HOST = 123, + BOARD_TYPE_CAL_2X10G_LOW_POWER = 125, + BOARD_TYPE_CAL_2X200G_NIC_INTERNET = 127, + BOARD_TYPE_CAL_1X100GR2_OCP = 129, + BOARD_TYPE_CAL_2X200G_DPU_VL = 130, + BOARD_TYPE_CLD_2X100G_SDI5_1 = 170, + BOARD_TYPE_CLD_2X25G_SDI5_0_LITE = 171, + BOARD_TYPE_CLD_2X100G_SDI5_0 = 172, + BOARD_TYPE_CLD_4X25G_SDI5_0_C = 175, BOARD_TYPE_MAX_INDEX = 0xFF }; @@ -49,23 +60,76 @@ static inline u32 spu_board_type_valid(u32 board_type) { return ((board_type) == BOARD_TYPE_CLD_2X25G_SDI5_0_LITE) || ((board_type) == BOARD_TYPE_CLD_2X100G_SDI5_0) || - ((board_type) == BOARD_TYPE_CAL_2X25G_DPU) || - ((board_type) == BOARD_TYPE_CAL_2X100G_DPU) || - ((board_type) == BOARD_TYPE_CAL_4X25G_DPU); + ((board_type) == BOARD_TYPE_CLD_4X25G_SDI5_0_C) || + ((board_type) == BOARD_TYPE_CAL_2X25G_DPU_BD) || + ((board_type) == BOARD_TYPE_CAL_2X100G_DPU_VL) || + ((board_type) == BOARD_TYPE_CAL_4X25G_DPU_VL) || + ((board_type) == BOARD_TYPE_CAL_4X25G_DPU_BD) || + ((board_type) == BOARD_TYPE_CAL_2X200G_DPU_VL); +} + +static inline int board_type_is_sdi_50(u32 board_type) +{ + return ((board_type) == BOARD_TYPE_CLD_2X25G_SDI5_0_LITE) || + ((board_type) == BOARD_TYPE_CLD_2X100G_SDI5_0) || + ((board_type) == BOARD_TYPE_CLD_4X25G_SDI5_0_C); } static inline int board_type_is_sdi(u32 board_type) { return ((board_type) == BOARD_TYPE_CLD_2X100G_SDI5_1) || ((board_type) == BOARD_TYPE_CLD_2X25G_SDI5_0_LITE) || - ((board_type) == BOARD_TYPE_CLD_2X100G_SDI5_0); + ((board_type) == BOARD_TYPE_CLD_2X100G_SDI5_0) || + ((board_type) == BOARD_TYPE_CLD_4X25G_SDI5_0_C); +} + +static inline int board_type_is_dpu_spu(u32 board_type) +{ + return ((board_type) == BOARD_TYPE_CAL_2X25G_DPU_BD) || + ((board_type) == BOARD_TYPE_CAL_2X100G_DPU_VL) || + ((board_type) == BOARD_TYPE_CAL_4X25G_DPU_VL) || + ((board_type) == BOARD_TYPE_CAL_4X25G_DPU_BD) || + ((board_type) == BOARD_TYPE_CAL_2X200G_DPU_VL); } static inline int board_type_is_dpu(u32 board_type) { - return ((board_type) == BOARD_TYPE_CAL_2X25G_DPU) || - ((board_type) == BOARD_TYPE_CAL_2X100G_DPU) || - ((board_type) == BOARD_TYPE_CAL_4X25G_DPU); + return ((board_type) == BOARD_TYPE_CAL_2X25G_DPU_BD) || + ((board_type) == BOARD_TYPE_CAL_2X100G_DPU_VL) || + ((board_type) == BOARD_TYPE_CAL_4X25G_DPU_VL) || + ((board_type) == BOARD_TYPE_CAL_4X25G_DPU_BD) || + ((board_type) == BOARD_TYPE_CAL_6X25G_DPU_VL) || + ((board_type) == BOARD_TYPE_CAL_2X200G_DPU_VL); +} + +static inline int board_type_is_smartnic(u32 board_type) +{ + return ((board_type) == BOARD_TYPE_CAL_4X25G_SMARTNIC_120MPPS) || + ((board_type) == BOARD_TYPE_CAL_2X100G_SMARTNIC_120MPPS); +} + +/* 此接口判断是否是分布式存储的标卡以及计算的标卡(含ROCE特性), + * 仅用于LLDP TX功能冲突命令字处理的判断 + */ +static inline int board_type_is_compute(u32 board_type) +{ + return ((board_type) == BOARD_TYPE_CAL_2X25G_NIC_75MPPS) || + ((board_type) == BOARD_TYPE_CAL_2X25G_NIC_40MPPS) || + ((board_type) == BOARD_TYPE_CAL_4X25G_NIC_120MPPS) || + ((board_type) == BOARD_TYPE_CAL_4X25G_COMSTORAGE) || + ((board_type) == BOARD_TYPE_CAL_2X100G_NIC_120MPPS) || + ((board_type) == BOARD_TYPE_CAL_2X10G_LOW_POWER) || + ((board_type) == BOARD_TYPE_CAL_2X200G_NIC_INTERNET) || + ((board_type) == BOARD_TYPE_CAL_1X100GR2_OCP) || + ((board_type) == BOARD_TYPE_CAL_4X25G_SMARTNIC_120MPPS) || + ((board_type) == BOARD_TYPE_CAL_2X25G_NIC_4HOST) || + ((board_type) == BOARD_TYPE_CAL_2X100G_SMARTNIC_120MPPS); +} + +/* 此接口判断服务器输入reboot网卡是否需要复位 */ +static inline int board_type_is_multi_socket(u32 board_type) +{ + return ((board_type) == BOARD_TYPE_CAL_1X100GR2_OCP); } #endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_cmd_base_defs.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_cmd_base_defs.h index 89d5cc42cfd6760375bedf542535f1c4fa268f5a..e65c206bc627d17f9ecf257e48aac5693a86c20b 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_cmd_base_defs.h +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_cmd_base_defs.h @@ -1,11 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2023. All rights reserved. + * Description : common definitions + */ #ifndef COMM_DEFS_H #define COMM_DEFS_H -#include "mgmt_msg_base.h" - /** MPU CMD MODULE TYPE */ enum hinic3_mod_type { HINIC3_MOD_COMM = 0, /* HW communication module */ @@ -33,16 +33,16 @@ enum hinic3_mod_type { HINIC3_MOD_MAX, }; -/* Func reset flag, Specifies the resource to be cleaned.*/ -enum func_reset_flag_e { +/* func reset的flag ,用于指示清理哪种资源 */ +enum func_reset_flag_e{ RES_TYPE_FLUSH_BIT = 0, RES_TYPE_MQM, RES_TYPE_SMF, RES_TYPE_PF_BW_CFG, RES_TYPE_COMM = 10, - RES_TYPE_COMM_MGMT_CH, /* clear mbox and aeq, The RES_TYPE_COMM bit must be set */ - RES_TYPE_COMM_CMD_CH, /* clear cmdq and ceq, The RES_TYPE_COMM bit must be set */ + RES_TYPE_COMM_MGMT_CH, /* clear mbox and aeq, The RES_TYPE_COMM bit must be set */ + RES_TYPE_COMM_CMD_CH, /* clear cmdq and ceq, The RES_TYPE_COMM bit must be set */ RES_TYPE_NIC, RES_TYPE_OVS, RES_TYPE_VBS, @@ -53,33 +53,48 @@ enum func_reset_flag_e { RES_TYPE_MAX, }; -#define HINIC3_COMM_RES \ - ((1 << RES_TYPE_COMM) | (1 << RES_TYPE_COMM_CMD_CH) | \ - (1 << RES_TYPE_FLUSH_BIT) | (1 << RES_TYPE_MQM) | \ - (1 << RES_TYPE_SMF) | (1 << RES_TYPE_PF_BW_CFG)) +#define HINIC3_COMM_RES \ + ((1 << RES_TYPE_COMM) | (1 << RES_TYPE_COMM_CMD_CH) | \ + (1 << RES_TYPE_FLUSH_BIT) | (1 << RES_TYPE_MQM) | \ + (1 << RES_TYPE_SMF) | (1 << RES_TYPE_PF_BW_CFG)) -#define HINIC3_NIC_RES BIT(RES_TYPE_NIC) -#define HINIC3_OVS_RES BIT(RES_TYPE_OVS) -#define HINIC3_VBS_RES BIT(RES_TYPE_VBS) -#define HINIC3_ROCE_RES BIT(RES_TYPE_ROCE) -#define HINIC3_FC_RES BIT(RES_TYPE_FC) -#define HINIC3_TOE_RES BIT(RES_TYPE_TOE) -#define HINIC3_IPSEC_RES BIT(RES_TYPE_IPSEC) +#define HINIC3_NIC_RES (1 << RES_TYPE_NIC) +#define HINIC3_OVS_RES (1 << RES_TYPE_OVS) +#define HINIC3_VBS_RES (1 << RES_TYPE_VBS) +#define HINIC3_ROCE_RES (1 << RES_TYPE_ROCE) +#define HINIC3_FC_RES (1 << RES_TYPE_FC) +#define HINIC3_TOE_RES (1 << RES_TYPE_TOE) +#define HINIC3_IPSEC_RES (1 << RES_TYPE_IPSEC) /* MODE OVS、NIC、UNKNOWN */ #define HINIC3_WORK_MODE_OVS 0 #define HINIC3_WORK_MODE_UNKNOWN 1 #define HINIC3_WORK_MODE_NIC 2 -#define DEVICE_TYPE_L2NIC 0 -#define DEVICE_TYPE_NVME 1 -#define DEVICE_TYPE_VIRTIO_NET 2 -#define DEVICE_TYPE_VIRTIO_BLK 3 -#define DEVICE_TYPE_VIRTIO_VSOCK 4 -#define DEVICE_TYPE_VIRTIO_NET_TRANSITION 5 -#define DEVICE_TYPE_VIRTIO_BLK_TRANSITION 6 -#define DEVICE_TYPE_VIRTIO_SCSI_TRANSITION 7 -#define DEVICE_TYPE_VIRTIO_HPC 8 +#define DEVICE_TYPE_L2NIC 0 +#define DEVICE_TYPE_NVME 1 +#define DEVICE_TYPE_VIRTIO_NET 2 +#define DEVICE_TYPE_VIRTIO_BLK 3 +#define DEVICE_TYPE_VIRTIO_VSOCK 4 +#define DEVICE_TYPE_VIRTIO_NET_TRANSITION 5 +#define DEVICE_TYPE_VIRTIO_BLK_TRANSITION 6 +#define DEVICE_TYPE_VIRTIO_SCSI_TRANSITION 7 +#define DEVICE_TYPE_VIRTIO_HPC 8 +#define DEVICE_TYPE_VIRTIO_FS 9 + +#define IS_STORAGE_DEVICE_TYPE(dev_type) \ + ((dev_type) == DEVICE_TYPE_VIRTIO_BLK || \ + (dev_type) == DEVICE_TYPE_VIRTIO_BLK_TRANSITION || \ + (dev_type) == DEVICE_TYPE_VIRTIO_SCSI_TRANSITION || \ + (dev_type) == DEVICE_TYPE_VIRTIO_FS) + +#define MGMT_MSG_CMD_OP_SET 1 +#define MGMT_MSG_CMD_OP_GET 0 + +#define MGMT_MSG_CMD_OP_START 1 +#define MGMT_MSG_CMD_OP_STOP 0 + +#define HOT_REPLACE_PARTITION_NUM 2 enum hinic3_svc_type { SVC_T_COMM = 0, @@ -98,8 +113,17 @@ enum hinic3_svc_type { }; /** - * Common header control information of the COMM message interaction command word - * between the driver and PF. + * Common header control information of the COMM message interaction command word between the driver and PF + * stuct mgmt_msg_head and struct comm_info_head are the same stucture + */ +struct mgmt_msg_head { + u8 status; + u8 version; + u8 rsvd0[6]; +}; + +/** + * Common header control information of the COMM message interaction command word between the driver and PF */ struct comm_info_head { /** response status code, 0: success, others: error code */ @@ -113,4 +137,29 @@ struct comm_info_head { u8 rsvd[5]; }; + +static inline u32 get_function_partition(u32 function_id, u32 port_num) +{ + return (function_id / port_num) % HOT_REPLACE_PARTITION_NUM; +} + +static inline u32 is_primary_function(u32 function_id, u32 port_num) +{ + return (function_id / port_num) % HOT_REPLACE_PARTITION_NUM == 0; +} + +static inline u32 mpu_nic_get_primary_function(u32 function_id, u32 port_num) +{ + return ((function_id / port_num) % HOT_REPLACE_PARTITION_NUM == 0) ? + function_id : (function_id - port_num); +} + +// when func_id is in partition 0/1, it will get its another func_id in partition 1/0 +static inline u32 mpu_nic_get_backup_function(u32 function_id, u32 port_num) +{ + return ((function_id / port_num) % HOT_REPLACE_PARTITION_NUM == 0) ? + (function_id + port_num) : (function_id - port_num); +} + + #endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h index b24e72942c679bbaee4f1397a615092ce0dde0c6..fd0401fcc148ec2cd832b7743ce5b4fd45118eb5 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h @@ -89,6 +89,10 @@ enum hinic3_mgmt_cmd { * @see comm_read_ucode_sm_resp */ COMM_MGMT_CMD_CLEAR_LOG, /**< clear log @see comm_cmd_clear_log_s */ + COMM_MGMT_CMD_UCODE_SM_COUNTER_PER, + /**< get ucode sm counter @see struct comm_read_ucode_sm_per_req + * @see struct comm_read_ucode_sm_per_resp + */ COMM_MGMT_CMD_CHECK_IF_SWITCH_WORKMODE = 140, /* check if switch workmode reserved * @see comm_cmd_check_if_switch_workmode @@ -101,6 +105,7 @@ enum hinic3_mgmt_cmd { COMM_MGMT_CMD_CPI_TCAM_DBG, /* enable or disable the scheduled cpi tcam task, * set task interval time @see comm_cmd_cpi_tcam_dbg_s */ + COMM_MGMT_CMD_LLDP_TX_FUNC_SET, COMM_MGMT_CMD_SECTION_RSVD_0 = 160, /**< rsvd0 section */ COMM_MGMT_CMD_SECTION_RSVD_1 = 170, /**< rsvd1 section */ diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h index f535777123454c2cb3a973901ced9abc0fbae4c0..fd3a7dd0b204b093f23726dced7a0699f38d5636 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h @@ -313,7 +313,8 @@ struct comm_cmd_get_fw_version { struct mgmt_msg_head head; u16 fw_type; /**< firmware type @see enum hinic3_fw_ver_type */ - u16 rsvd1; + u16 fw_dfx_vld : 1; /**< 0: release, 1: debug */ + u16 rsvd1 : 15; u8 ver[HINIC3_FW_VERSION_LEN]; /**< firmware version */ u8 time[HINIC3_FW_COMPILE_TIME_LEN]; /**< firmware compile time */ }; @@ -558,6 +559,7 @@ struct nic_log_info_request { #define MPU_TEMP_OP_GET 0 #define MPU_TEMP_THRESHOLD_OP_CFG 1 +#define MPU_TEMP_MCTP_DFX_INFO_GET 2 struct comm_temp_in_info { struct comm_info_head head; u8 opt_type; /**< operation type 0:read operation 1:cfg operation */ @@ -660,6 +662,28 @@ struct comm_read_ucode_sm_resp { u64 val2; }; +#define PER_REQ_MAX_DATA_LEN 0x600 + +struct comm_read_ucode_sm_per_req { + struct mgmt_msg_head msg_head; + + u32 tbl_type; + u32 count_id; +}; + +struct comm_read_ucode_sm_per_resp { + struct mgmt_msg_head msg_head; + + u8 data[PER_REQ_MAX_DATA_LEN]; +}; + +struct ucode_sm_counter_get_info { + u32 width_type; + u32 tbl_type; + unsigned int base_count; + unsigned int count_num; +}; + enum log_type { MPU_LOG_CLEAR = 0, SMU_LOG_CLEAR = 1, @@ -1057,7 +1081,9 @@ struct comm_cmd_ncsi_settings { u8 lldp_over_ncsi_enable; u8 lldp_over_mctp_enable; u32 magicwd; - u8 rsvd[8]; + u8 lldp_tx_enable; + u8 rsvd[3]; + u32 crc; struct tag_ncsi_chan_info ncsi_chan_info; }; diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_outband_ncsi_cmd_defs.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_outband_ncsi_cmd_defs.h index fde8617e24958b635782faa89dcc386ff78a18bb..767f8864710d1cc4d7c6a64111b8807019b2ca1d 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_outband_ncsi_cmd_defs.h +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_outband_ncsi_cmd_defs.h @@ -114,6 +114,14 @@ struct tag_ncsi_client_info { #define AEN_CTRL_CONFIG_REQ_SHIFT 1 #define AEN_CTRL_DRV_CHANGE_SHIFT 2 +/* AEN Type */ +enum aen_type_e{ + AEN_LINK_STATUS_CHANGE_TYPE = 0x0, + AEN_CONFIG_REQUIRED_TYPE = 0x1, + OEM_AEN_CONFIG_REQUEST_TYPE = 0x80, + AEN_TYPE_MAX = 0x100 +} ; + /* get link status 0x0A */ #define GET_LINK_STATUS_REQ_LEN 0 #define GET_LINK_STATUS_RSP_LEN 16 diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h index fe663e1fb0810dd220674b5addbd3784b350ce91..83b75f984753649ca3265db459089fcfc11e14b1 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h @@ -1,5 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ +/* + * Copyright (C), 2001-2021, Huawei Tech. Co., Ltd. + * File Name : nic_cfg_comm.h + * Version : Initial Draft + * Description : nic config common header file + * Function List : + * History : + * Modification: Created file + */ #ifndef NIC_CFG_COMM_H #define NIC_CFG_COMM_H @@ -26,7 +34,7 @@ enum nic_rss_hash_type { }; #define NIC_RSS_INDIR_SIZE 256 -#define NIC_RSS_KEY_SIZE 40 +#define NIC_RSS_KEY_SIZE 40 /* * * Definition of the NIC receiving mode @@ -36,6 +44,7 @@ enum nic_rss_hash_type { #define NIC_RX_MODE_BC 0x04 #define NIC_RX_MODE_MC_ALL 0x08 #define NIC_RX_MODE_PROMISC 0x10 +#define NIC_RX_DB_COS_MAX 0x4 /* IEEE 802.1Qaz std */ #define NIC_DCB_COS_MAX 0x8 @@ -52,4 +61,7 @@ enum nic_rss_hash_type { #define NIC_DCB_PRIO_STRICT 0x1 #define NIC_DCB_MAX_PFC_NUM 0x4 + +#define NIC_ETS_PERCENT_WEIGHT 100 + #endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/vram_common.h b/drivers/net/ethernet/huawei/hinic3/include/vram_common.h index 801aeed183104a95e9d54c39be8cd4f2f331ab47..9f93f7e192a87ea5dfe288ffa840f4df25241e3a 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/vram_common.h +++ b/drivers/net/ethernet/huawei/hinic3/include/vram_common.h @@ -23,7 +23,11 @@ #define VRAM_CQM_BITMAP_BASE "B" #define VRAM_NIC_DCB "DCB" +#define VRAM_NIC_MHOST_MGMT "MHOST_MGMT" #define VRAM_NIC_VRAM "NIC_VRAM" +#define VRAM_NIC_IRQ_VRAM "NIC_IRQ" + +#define VRAM_NIC_MQM "NM" #define VRAM_VBS_BASE_IOCB "BASE_IOCB" #define VRAM_VBS_EX_IOCB "EX_IOCB" @@ -55,11 +59,16 @@ enum KUP_HOOK_POINT { PRE_FREEZE, FREEZE_TO_KILL, PRE_UPDATE_KERNEL, - FLUSH_DURING_KUP, POST_UPDATE_KERNEL, UNFREEZE_TO_RUN, POST_RUN, KUP_HOOK_MAX, }; +#define hi_vram_kalloc(name, size) 0 +#define hi_vram_kfree(vaddr, name, size) +#define get_use_vram_flag(void) 0 +#define vram_get_kexec_flag(void) 0 +#define hi_vram_get_gfp_vram(void) 0 + #endif /* VRAM_COMMON_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/mag_cmd.h b/drivers/net/ethernet/huawei/hinic3/mag_mpu_cmd_defs.h similarity index 79% rename from drivers/net/ethernet/huawei/hinic3/mag_cmd.h rename to drivers/net/ethernet/huawei/hinic3/mag_mpu_cmd_defs.h index 964950361972bc98f26ea1a860ace14d857b393b..e77d7d57482ac0a0f830825d7bc0eaeb9a208c27 100644 --- a/drivers/net/ethernet/huawei/hinic3/mag_cmd.h +++ b/drivers/net/ethernet/huawei/hinic3/mag_mpu_cmd_defs.h @@ -9,6 +9,7 @@ /* serdes cmd struct define */ #define CMD_ARRAY_BUF_SIZE 64 #define SERDES_CMD_DATA_BUF_SIZE 512 +#define RATE_MBPS_TO_GBPS 1000 struct serdes_in_info { u32 chip_id : 16; u32 macro_id : 16; @@ -127,6 +128,7 @@ enum mag_cmd_cnt_type { MAG_RX_PCS_E_BLK_CNT = 5, MAG_RX_PCS_DEC_ERR_BLK_CNT = 6, MAG_RX_PCS_LANE_BIP_ERR_CNT = 7, + MAG_RX_RSFEC_ERR_CW_CNT = 8, MAG_CNT_NUM }; @@ -208,7 +210,9 @@ struct mag_cmd_get_port_info { u32 supported_mode; u32 advertised_mode; - u8 rsvd2[8]; + u32 supported_fec_mode; + u16 bond_speed; + u8 rsvd2[2]; }; #define MAG_CMD_OPCODE_GET 0 @@ -330,8 +334,8 @@ struct mag_cmd_cfg_fec_mode { u8 port_id; u8 opcode; /* 0:get fec mode 1:set fec mode */ - u8 fec; - u8 rsvd0; + u8 advertised_fec; + u8 supported_fec; }; /* speed */ @@ -641,7 +645,7 @@ struct mag_cmd_event_port_info { u32 cable_length; /* 1/3/5m */ u8 cable_temp; /* temp */ u8 max_speed; /* Maximum rate of an optical module */ - u8 sfp_type; /* sfp/qsfp */ + u8 sfp_type; /* sfp/qsfp/dsfp */ u8 rsvd1; u32 power[4]; /* Optical Power */ @@ -678,6 +682,10 @@ struct mag_cmd_event_port_info { u8 rsvd3[360]; }; +struct mag_cmd_rsfec_stats { + u32 rx_err_lane_phy; +}; + struct mag_cmd_port_stats { u64 mac_tx_fragment_pkt_num; u64 mac_tx_undersize_pkt_num; @@ -913,10 +921,223 @@ struct mag_cmd_sfp_temp_in_info { }; struct mag_cmd_sfp_temp_out_info { - struct mgmt_msg_head head; /* 8B */ - s16 sfp_temp_data[MAG_SFP_PORT_NUM]; /* Temperature read */ - s32 max_temp; /* Chip optical module threshold */ - s32 min_temp; /* Chip optical module threshold */ + struct mgmt_msg_head head; /* 8B */ + s16 sfp_temp_data[MAG_SFP_PORT_NUM]; /* Temperature read */ + s32 max_temp; /* Chip optical module threshold */ + s32 min_temp; /* Chip optical module threshold */ +}; + +#define XSFP_CMIS_PARSE_PAGE_NUM 6 +#define XSFP_CMIS_INFO_MAX_SIZE 1536 +#define QSFP_CMIS_PAGE_SIZE 128 +#define QSFP_CMIS_MAX_CHANNEL_NUM 0x8 + +/* Lower: Control and Essentials, Upper: Administrative Information */ +#define QSFP_CMIS_PAGE_00H 0x00 +/* Advertising */ +#define QSFP_CMIS_PAGE_01H 0x01 +/* Module and lane Thresholds */ +#define QSFP_CMIS_PAGE_02H 0x02 +/* User EEPROM */ +#define QSFP_CMIS_PAGE_03H 0x03 +/* Laser Capabilities Advertising (Page 04h, Optional) */ +#define QSFP_CMIS_PAGE_04H 0x04 +#define QSFP_CMIS_PAGE_05H 0x05 +/* Lane and Data Path Control */ +#define QSFP_CMIS_PAGE_10H 0x10 +/* Lane Status */ +#define QSFP_CMIS_PAGE_11H 0x11 +#define QSFP_CMIS_PAGE_12H 0x12 + +#define MGMT_TLV_U8_SIZE 1 +#define MGMT_TLV_U16_SIZE 2 +#define MGMT_TLV_U32_SIZE 4 + +#define MGMT_TLV_GET_U8(addr) (*((u8 *)(void *)(addr))) +#define MGMT_TLV_SET_U8(addr, value) \ + ((*((u8 *)(void *)(addr))) = ((u8)(value))) + +#define MGMT_TLV_GET_U16(addr) (*((u16 *)(void *)(addr))) +#define MGMT_TLV_SET_U16(addr, value) \ + ((*((u16 *)(void *)(addr))) = ((u16)(value))) + +#define MGMT_TLV_GET_U32(addr) (*((u32 *)(void *)(addr))) +#define MGMT_TLV_SET_U32(addr, value) \ + ((*((u32 *)(void *)(addr))) = ((u32)(value))) + +#define MGMT_TLV_TYPE_END 0xFFFF + +enum mag_xsfp_type { + MAG_XSFP_TYPE_PAGE = 0x01, + MAG_XSFP_TYPE_WIRE_TYPE = 0x02, + MAG_XSFP_TYPE_END = MGMT_TLV_TYPE_END +}; + +struct qsfp_cmis_lower_page_00_s { + u8 resv0[14]; + u8 temperature_msb; + u8 temperature_lsb; + u8 volt_supply[2]; + u8 resv1[67]; + u8 media_type; + u8 electrical_interface_id; + u8 media_interface_id; + u8 lane_count; + u8 resv2[39]; +}; + +struct qsfp_cmis_upper_page_00_s { + u8 identifier; + u8 vendor_name[16]; + u8 vendor_oui[3]; + u8 vendor_pn[16]; + u8 vendor_rev[2]; + u8 vendor_sn[16]; + u8 date_code[8]; + u8 clei_code[10]; + u8 power_character[2]; + u8 cable_len; + u8 connector; + u8 copper_cable_attenuation[6]; + u8 near_end_implementation; + u8 far_end_config; + u8 media_technology; + u8 resv0[43]; +}; + +struct qsfp_cmis_upper_page_01_s { + u8 firmware_rev[2]; + u8 hardware_rev[2]; + u8 smf_len_km; + u8 om5_len; + u8 om4_len; + u8 om3_len; + u8 om2_len; + u8 resv0; + u8 wavelength[2]; + u8 wavelength_tolerance[2]; + u8 pages_implement; + u8 resv1[16]; + u8 monitor_implement[2]; + u8 resv2[95]; +}; + +struct qsfp_cmis_upper_page_02_s { + u8 temperature_high_alarm[2]; + u8 temperature_low_alarm[2]; + u8 temperature_high_warn[2]; + u8 temperature_low_warn[2]; + u8 volt_high_alarm[2]; + u8 volt_low_alarm[2]; + u8 volt_high_warn[2]; + u8 volt_low_warn[2]; + u8 resv0[32]; + u8 tx_power_high_alarm[2]; + u8 tx_power_low_alarm[2]; + u8 tx_power_high_warn[2]; + u8 tx_power_low_warn[2]; + u8 tx_bias_high_alarm[2]; + u8 tx_bias_low_alarm[2]; + u8 tx_bias_high_warn[2]; + u8 tx_bias_low_warn[2]; + u8 rx_power_high_alarm[2]; + u8 rx_power_low_alarm[2]; + u8 rx_power_high_warn[2]; + u8 rx_power_low_warn[2]; + u8 resv1[56]; +}; + +struct qsfp_cmis_upper_page_03_s { + u8 resv0[QSFP_CMIS_PAGE_SIZE]; /* Reg 128-255: Upper Memory: Page 03H */ +}; + +struct qsfp_cmis_upper_page_10_s { + u8 resv0[2]; /* Reg 128-129: Upper Memory: Page 10H */ + u8 tx_disable; /* Reg 130: Tx disable, 0b=enabled, 1b=disabled */ + u8 resv1[125]; /* Reg 131-255 */ +}; + +struct qsfp_cmis_upper_page_11_s { + u8 resv0[7]; + u8 tx_fault; + u8 tx_los; + u8 resv1[10]; + u8 rx_los; + u8 resv2[6]; + u8 tx_power[16]; + u8 tx_bias[16]; + u8 rx_power[16]; + u8 resv3[54]; +}; + +struct qsfp_cmis_info_s { + struct qsfp_cmis_lower_page_00_s lower_page_00; + struct qsfp_cmis_upper_page_00_s upper_page_00; + struct qsfp_cmis_upper_page_01_s upper_page_01; + struct qsfp_cmis_upper_page_02_s upper_page_02; + struct qsfp_cmis_upper_page_10_s upper_page_10; + struct qsfp_cmis_upper_page_11_s upper_page_11; +}; + +struct qsfp_cmis_comm_power_s { + u32 chl_power[QSFP_CMIS_MAX_CHANNEL_NUM]; +}; + +struct qsfp_cmis_wire_info_s { + struct qsfp_cmis_comm_power_s rx_power; + u8 rx_los; + u8 resv0[3]; +}; + +struct mgmt_tlv_info { + u16 type; + u16 length; + u8 value[]; +}; + +struct mag_cmd_set_xsfp_tlv_req { + struct mgmt_msg_head head; + + u8 tlv_buf[]; +}; + +struct mag_cmd_set_xsfp_tlv_rsp { + struct mgmt_msg_head head; +}; + +struct mag_cmd_get_xsfp_tlv_req { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd; + u16 rsp_buf_len; +}; + +struct mag_cmd_get_xsfp_tlv_rsp { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd[3]; + + u8 tlv_buf[]; +}; + + +struct parse_tlv_info { + u8 tlv_page_info[XSFP_CMIS_INFO_MAX_SIZE + 1]; + u32 tlv_page_info_len; + u32 tlv_page_num[XSFP_CMIS_PARSE_PAGE_NUM]; + u32 wire_type; + u8 id; +}; + +struct drv_mag_cmd_get_xsfp_tlv_rsp { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd[3]; + + u8 tlv_buf[XSFP_CMIS_INFO_MAX_SIZE]; }; #endif diff --git a/drivers/net/ethernet/huawei/hinic3/mgmt_msg_base.h b/drivers/net/ethernet/huawei/hinic3/mgmt_msg_base.h deleted file mode 100644 index 257bf6761df0009c4217138f8c91c68ce9e28a40..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic3/mgmt_msg_base.h +++ /dev/null @@ -1,27 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved. - * File Name : mgmt_msg_base.h - * Version : Initial Draft - * Created : 2021/6/28 - * Last Modified : - * Description : COMM Command interfaces between Driver and MPU - * Function List : - */ - -#ifndef MGMT_MSG_BASE_H -#define MGMT_MSG_BASE_H - -#define MGMT_MSG_CMD_OP_SET 1 -#define MGMT_MSG_CMD_OP_GET 0 - -#define MGMT_MSG_CMD_OP_START 1 -#define MGMT_MSG_CMD_OP_STOP 0 - -struct mgmt_msg_head { - u8 status; - u8 version; - u8 rsvd0[6]; -}; - -#endif diff --git a/drivers/net/ethernet/huawei/hinic3/nic_cfg_comm.h b/drivers/net/ethernet/huawei/hinic3/nic_cfg_comm.h deleted file mode 100644 index 9fb4232716da4b676ee168b242e6e0cff4f4511d..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic3/nic_cfg_comm.h +++ /dev/null @@ -1,63 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C), 2001-2021, Huawei Tech. Co., Ltd. - * File Name : nic_cfg_comm.h - * Version : Initial Draft - * Description : nic config common header file - * Function List : - * History : - * Modification: Created file - */ - -#ifndef NIC_CFG_COMM_H -#define NIC_CFG_COMM_H - -/* rss */ -#define HINIC3_RSS_TYPE_VALID_SHIFT 23 -#define HINIC3_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24 -#define HINIC3_RSS_TYPE_IPV6_EXT_SHIFT 25 -#define HINIC3_RSS_TYPE_TCP_IPV6_SHIFT 26 -#define HINIC3_RSS_TYPE_IPV6_SHIFT 27 -#define HINIC3_RSS_TYPE_TCP_IPV4_SHIFT 28 -#define HINIC3_RSS_TYPE_IPV4_SHIFT 29 -#define HINIC3_RSS_TYPE_UDP_IPV6_SHIFT 30 -#define HINIC3_RSS_TYPE_UDP_IPV4_SHIFT 31 - -#define HINIC3_RSS_TYPE_SET(val, member) (((u32)(val) & 0x1) << HINIC3_RSS_TYPE_##member##_SHIFT) -#define HINIC3_RSS_TYPE_GET(val, member) (((u32)(val) >> HINIC3_RSS_TYPE_##member##_SHIFT) & 0x1) - -enum nic_rss_hash_type { - NIC_RSS_HASH_TYPE_XOR = 0, - NIC_RSS_HASH_TYPE_TOEP, - - NIC_RSS_HASH_TYPE_MAX /* MUST BE THE LAST ONE */ -}; - -#define NIC_RSS_INDIR_SIZE 256 -#define NIC_RSS_KEY_SIZE 40 - -/* * - * Definition of the NIC receiving mode - */ -#define NIC_RX_MODE_UC 0x01 -#define NIC_RX_MODE_MC 0x02 -#define NIC_RX_MODE_BC 0x04 -#define NIC_RX_MODE_MC_ALL 0x08 -#define NIC_RX_MODE_PROMISC 0x10 - -/* IEEE 802.1Qaz std */ -#define NIC_DCB_COS_MAX 0x8 -#define NIC_DCB_UP_MAX 0x8 -#define NIC_DCB_TC_MAX 0x8 -#define NIC_DCB_PG_MAX 0x8 -#define NIC_DCB_TSA_SP 0x0 -#define NIC_DCB_TSA_CBS 0x1 /* hi1822 do NOT support */ -#define NIC_DCB_TSA_ETS 0x2 -#define NIC_DCB_DSCP_NUM 0x8 -#define NIC_DCB_IP_PRI_MAX 0x40 - -#define NIC_DCB_PRIO_DWRR 0x0 -#define NIC_DCB_PRIO_STRICT 0x1 - -#define NIC_DCB_MAX_PFC_NUM 0x4 -#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmd.h b/drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd.h similarity index 77% rename from drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmd.h rename to drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd.h index 31e224ab10957564695a2dd24426f04160f7ebca..8e0fa891ac54ef6aeacd6da4a2fe9817eedc7740 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmd.h +++ b/drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C), 2001-2011, Huawei Tech. Co., Ltd. - * File Name : hinic3_comm_cmd.h + * File Name : nic_mpu_cmd.h * Version : Initial Draft * Created : 2019/4/25 * Last Modified : @@ -9,8 +9,8 @@ * Function List : */ -#ifndef HINIC3_NIC_CMD_H -#define HINIC3_NIC_CMD_H +#ifndef NIC_MPU_CMD_H +#define NIC_MPU_CMD_H /* Commands between NIC to MPU */ @@ -33,8 +33,9 @@ enum hinic3_nic_cmd { HINIC3_NIC_CMD_CFG_LOCAL_LRO_STATE, HINIC3_NIC_CMD_CACHE_OUT_QP_RES, + HINIC3_NIC_CMD_SET_FUNC_ER_FWD_ID, - /* MAC & VLAN CFG */ + /** MAC & VLAN CFG & VXLAN CFG */ HINIC3_NIC_CMD_GET_MAC = 20, HINIC3_NIC_CMD_SET_MAC, HINIC3_NIC_CMD_DEL_MAC, @@ -45,6 +46,11 @@ enum hinic3_nic_cmd { HINIC3_NIC_CMD_SET_VLAN_FILTER_EN, HINIC3_NIC_CMD_SET_RX_VLAN_OFFLOAD, HINIC3_NIC_CMD_SMAC_CHECK_STATE, + HINIC3_NIC_CMD_OUTBAND_SET_FUNC_VLAN, + + HINIC3_NIC_CMD_CFG_VXLAN_PORT, + HINIC3_NIC_CMD_RX_RATE_CFG, + HINIC3_NIC_CMD_WR_ORDERING_CFG, /* SR-IOV */ HINIC3_NIC_CMD_CFG_VF_VLAN = 40, @@ -61,6 +67,7 @@ enum hinic3_nic_cmd { HINIC3_NIC_CMD_SET_RSS_CTX_TBL_INTO_FUNC, /* IP checksum error packets, enable rss quadruple hash */ HINIC3_NIC_CMD_IPCS_ERR_RSS_ENABLE_OP = 66, + HINIC3_NIC_CMD_GTP_INNER_PARSE_STATUS, /* PPA/FDIR */ HINIC3_NIC_CMD_ADD_TC_FLOW = 80, @@ -76,6 +83,10 @@ enum hinic3_nic_cmd { HINIC3_NIC_CMD_CFG_PPA_FLUSH, HINIC3_NIC_CMD_SET_FDIR_STATUS, HINIC3_NIC_CMD_GET_PPA_COUNTER, + HINIC3_NIC_CMD_SET_FUNC_FLOW_BIFUR_ENABLE, + HINIC3_NIC_CMD_SET_BOND_MASK, + HINIC3_NIC_CMD_GET_BLOCK_TC_FLOWS, + HINIC3_NIC_CMD_GET_BOND_MASK, /* PORT CFG */ HINIC3_NIC_CMD_SET_PORT_ENABLE = 100, @@ -94,6 +105,8 @@ enum hinic3_nic_cmd { HINIC3_NIC_CMD_QOS_PORT_CFG, HINIC3_NIC_CMD_QOS_MAP_CFG, HINIC3_NIC_CMD_FORCE_PKT_DROP, + HINIC3_NIC_CMD_CFG_TX_PROMISC_SKIP = 114, + HINIC3_NIC_CMD_SET_PORT_FLOW_BIFUR_ENABLE = 117, HINIC3_NIC_CMD_TX_PAUSE_EXCP_NOTICE = 118, HINIC3_NIC_CMD_INQUIRT_PAUSE_CFG = 119, @@ -116,6 +129,10 @@ enum hinic3_nic_cmd { HINIC3_NIC_CMD_SET_UCAPTURE_OPT = 160, /* TODO: move to roce */ HINIC3_NIC_CMD_SET_VHD_CFG, + /* OUT OF BAND */ + HINIC3_NIC_CMD_GET_OUTBAND_CFG = 170, + HINIC3_NIC_CMD_OUTBAND_CFG_NOTICE, + /* TODO: move to HILINK */ HINIC3_NIC_CMD_GET_PORT_STAT = 200, HINIC3_NIC_CMD_CLEAN_PORT_STAT, @@ -136,24 +153,22 @@ enum hinic3_nic_cmd { HINIC3_NIC_CMD_LINK_ERR_EVENT, HINIC3_NIC_CMD_SET_LED_STATUS, - HINIC3_NIC_CMD_MAX = 256, -}; + /* mig */ + HINIC3_NIC_CMD_MIG_SET_CEQ_CTRL = 230, + HINIC3_NIC_CMD_MIG_CFG_MSIX_INFO, + HINIC3_NIC_CMD_MIG_CFG_FUNC_VAT_TBL, + HINIC3_NIC_CMD_MIG_GET_VF_INFO, + HINIC3_NIC_CMD_MIG_CHK_MBX_EMPTY, + HINIC3_NIC_CMD_MIG_SET_VPORT_ENABLE, + HINIC3_NIC_CMD_MIG_CFG_SQ_CI, + HINIC3_NIC_CMD_MIG_CFG_RSS_TBL, + HINIC3_NIC_CMD_MIG_CFG_MAC_TBL, + HINIC3_NIC_CMD_MIG_TMP_SET_CMDQ_CTX, + + HINIC3_OSHR_CMD_ACTIVE_FUNCTION = 240, + HINIC3_NIC_CMD_GET_RQ_INFO = 241, -/* NIC CMDQ MODE */ -enum hinic3_ucode_cmd { - HINIC3_UCODE_CMD_MODIFY_QUEUE_CTX = 0, - HINIC3_UCODE_CMD_CLEAN_QUEUE_CONTEXT, - HINIC3_UCODE_CMD_ARM_SQ, - HINIC3_UCODE_CMD_ARM_RQ, - HINIC3_UCODE_CMD_SET_RSS_INDIR_TABLE, - HINIC3_UCODE_CMD_SET_RSS_CONTEXT_TABLE, - HINIC3_UCODE_CMD_GET_RSS_INDIR_TABLE, - HINIC3_UCODE_CMD_GET_RSS_CONTEXT_TABLE, - HINIC3_UCODE_CMD_SET_IQ_ENABLE, - HINIC3_UCODE_CMD_SET_RQ_FLUSH = 10, - HINIC3_UCODE_CMD_MODIFY_VLAN_CTX, - HINIC3_UCODE_CMD_PPA_HASH_TABLE, - HINIC3_UCODE_CMD_RXQ_INFO_GET = 13, + HINIC3_NIC_CMD_MAX = 256, }; -#endif /* HINIC3_NIC_CMD_H */ +#endif /* NIC_MPU_CMD_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd_defs.h similarity index 84% rename from drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h rename to drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd_defs.h index 522518df64223d36c29e0fb316d21b716982f1fd..ee6bf2070ba8d734671f266c23cc2f7fb9fddd4b 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h +++ b/drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd_defs.h @@ -20,6 +20,10 @@ #define NIC_TCAM_BLOCK_LARGE_NUM 256 #define NIC_TCAM_BLOCK_LARGE_SIZE 16 +#define TRAFFIC_BIFUR_MODEL_TYPE 2 + +#define NIC_TCAM_FLOW_BIFUR_FLAG (1 << 0) + #ifndef BIT #define BIT(n) (1UL << (n)) #endif @@ -103,6 +107,7 @@ struct hinic3_port_state { #define HINIC3_SET_PORT_CAR_PROFILE 0 #define HINIC3_SET_PORT_CAR_STATE 1 +#define HINIC3_GET_PORT_CAR_LIMIT_SPEED 2 struct hinic3_port_car_info { u32 cir; /* unit: kbps, range:[1,400*1000*1000], i.e. 1Kbps~400Gbps(400M*kbps) */ @@ -117,7 +122,7 @@ struct hinic3_cmd_set_port_car { u8 port_id; u8 opcode; /* 0--set car profile, 1--set car state */ u8 state; /* 0--disable, 1--enable */ - u8 rsvd; + u8 level; struct hinic3_port_car_info car; }; @@ -350,6 +355,14 @@ struct hinic3_cmd_local_lro_state { u8 state; /* 0: disable, 1: enable */ }; +struct hinic3_cmd_gtp_inner_parse_status { + struct hinic3_mgmt_msg_head msg_head; + + u16 func_id; + u8 opcode; /* 0: get state, 1: set state */ + u8 status; /* 0: disable, 1: enable */ +}; + struct hinic3_cmd_vf_vlan_config { struct hinic3_mgmt_msg_head msg_head; @@ -373,7 +386,8 @@ struct hinic3_cmd_tx_rate_cfg { struct hinic3_mgmt_msg_head msg_head; u16 func_id; - u16 rsvd1; + u8 rsvd1; + u8 direct; u32 min_rate; u32 max_rate; u8 rsvd2[8]; @@ -416,8 +430,22 @@ struct hinic3_cmd_vlan_config { u16 func_id; u8 opcode; - u8 rsvd1; + u8 outband_defvid_flag; u16 vlan_id; + u8 blacklist_flag; + u8 rsvd2; +}; + +#define VLAN_BLACKLIST_ENABLE 1 +#define VLAN_BLACKLIST_DISABLE 0 + +struct hinic3_cmd_vxlan_port_info { + struct hinic3_mgmt_msg_head msg_head; + + u16 func_id; + u8 opcode; + u8 cfg_mode; + u16 vxlan_port; u16 rsvd2; }; @@ -561,11 +589,13 @@ struct hinic3_up_ets_cfg { /* delet */ u8 tc_prio[NIC_DCB_TC_MAX]; }; -#define CMD_QOS_ETS_COS_TC BIT(0) -#define CMD_QOS_ETS_TC_BW BIT(1) -#define CMD_QOS_ETS_COS_PRIO BIT(2) -#define CMD_QOS_ETS_COS_BW BIT(3) -#define CMD_QOS_ETS_TC_PRIO BIT(4) +#define CMD_QOS_ETS_COS_TC BIT(0) +#define CMD_QOS_ETS_TC_BW BIT(1) +#define CMD_QOS_ETS_COS_PRIO BIT(2) +#define CMD_QOS_ETS_COS_BW BIT(3) +#define CMD_QOS_ETS_TC_PRIO BIT(4) +#define CMD_QOS_ETS_TC_RATELIMIT BIT(5) + struct hinic3_cmd_ets_cfg { struct hinic3_mgmt_msg_head head; @@ -580,6 +610,7 @@ struct hinic3_cmd_ets_cfg { u8 cos_prio[NIC_DCB_COS_MAX]; /* 0 - DWRR, 1 - STRICT */ u8 cos_bw[NIC_DCB_COS_MAX]; u8 tc_prio[NIC_DCB_TC_MAX]; /* 0 - DWRR, 1 - STRICT */ + u8 rate_limit[NIC_DCB_TC_MAX]; }; struct hinic3_cmd_set_dcb_state { @@ -666,6 +697,8 @@ struct nic_cmd_pause_inquiry_cfg { u32 type; /* 1: set, 2: get */ + u32 cos_id; + u32 rx_inquiry_pause_drop_pkts_en; u32 rx_inquiry_pause_period_ms; u32 rx_inquiry_pause_times; @@ -678,7 +711,7 @@ struct nic_cmd_pause_inquiry_cfg { u32 tx_inquiry_pause_times; /* tx pause Default Times Period 5 */ u32 tx_inquiry_pause_frame_thd; /* tx pause Detection Threshold */ u32 tx_inquiry_rx_total_pkts; - u32 rsvd[4]; + u32 rsvd[3]; }; /* pfc/pause Storm TX exception reporting */ @@ -903,7 +936,9 @@ struct nic_cmd_fdir_get_block_rules { }; struct hinic3_tcam_key_ipv4_mem { - u32 rsvd1 : 4; + u32 rsvd1 : 1; + u32 bifur_flag : 2; + u32 model : 1; u32 tunnel_type : 4; u32 ip_proto : 8; u32 rsvd0 : 16; @@ -935,14 +970,16 @@ union hinic3_tag_tcam_ext_info { u32 id : 16; /* id */ u32 type : 4; /* type: 0-func, 1-vmdq, 2-port, 3-rsvd, 4-trunk, 5-dp, 6-mc */ u32 host_id : 3; - u32 rsv : 8; + u32 rss_q_num : 8; /* rss queue num */ u32 ext : 1; } bs; u32 value; }; struct hinic3_tcam_key_ipv6_mem { - u32 rsvd1 : 4; + u32 bifur_flag : 2; + u32 vlan_flag : 1; + u32 outer_ip_type : 1; u32 tunnel_type : 4; u32 ip_proto : 8; u32 rsvd0 : 16; @@ -1044,7 +1081,7 @@ struct hinic3_ppa_cfg_ppa_en_cmd { u16 func_id; u8 ppa_en; - u8 rsvd; + u8 ppa_miss_drop_en; }; struct hinic3_func_flow_bifur_en_cmd { @@ -1058,7 +1095,8 @@ struct hinic3_port_flow_bifur_en_cmd { struct hinic3_mgmt_msg_head msg_head; u16 port_id; u8 flow_bifur_en; - u8 rsvd[5]; + u8 flow_bifur_type; /* 0->vf bifur, 2->traffic bifur */ + u8 rsvd[4]; }; struct hinic3_bond_mask_cmd { @@ -1070,9 +1108,18 @@ struct hinic3_bond_mask_cmd { u8 rsvd[3]; }; +struct hinic3_func_er_value_cmd { + struct hinic3_mgmt_msg_head msg_head; + u16 vf_id; + u16 er_fwd_id; +}; + #define HINIC3_TX_SET_PROMISC_SKIP 0 #define HINIC3_TX_GET_PROMISC_SKIP 1 +#define HINIC3_GET_TRAFFIC_BIFUR_STATE 0 +#define HINIC3_SET_TRAFFIC_BIFUR_STATE 1 + struct hinic3_tx_promisc_cfg { struct hinic3_mgmt_msg_head msg_head; u8 port_id; @@ -1116,53 +1163,88 @@ enum { NIC_NVM_DATA_VLAN_PRI = BIT(3), NIC_NVM_DATA_VLAN_ID = BIT(4), NIC_NVM_DATA_WORK_MODE = BIT(5), - NIC_NVM_DATA_PF_SPEED_LIMIT = BIT(6), + NIC_NVM_DATA_PF_TX_SPEED_LIMIT = BIT(6), NIC_NVM_DATA_GE_MODE = BIT(7), NIC_NVM_DATA_AUTO_NEG = BIT(8), NIC_NVM_DATA_LINK_FEC = BIT(9), NIC_NVM_DATA_PF_ADAPTIVE_LINK = BIT(10), NIC_NVM_DATA_SRIOV_CONTROL = BIT(11), NIC_NVM_DATA_EXTEND_MODE = BIT(12), + NIC_NVM_DATA_LEGACY_VLAN = BIT(13), + NIC_NVM_DATA_LEGACY_VLAN_PRI = BIT(14), + NIC_NVM_DATA_LEGACY_VLAN_ID = BIT(15), NIC_NVM_DATA_RESET = BIT(31), }; -#define BIOS_CFG_SIGNATURE 0x1923E518 -#define BIOS_OP_CFG_ALL(op_code_val) ((((op_code_val) >> 1) & (0xFFFFFFFF)) != 0) -#define BIOS_OP_CFG_WRITE(op_code_val) ((((op_code_val) & NIC_NVM_DATA_SET)) != 0) -#define BIOS_OP_CFG_PXE_EN(op_code_val) (((op_code_val) & NIC_NVM_DATA_PXE) != 0) -#define BIOS_OP_CFG_VLAN_EN(op_code_val) (((op_code_val) & NIC_NVM_DATA_VLAN) != 0) -#define BIOS_OP_CFG_VLAN_PRI(op_code_val) (((op_code_val) & NIC_NVM_DATA_VLAN_PRI) != 0) -#define BIOS_OP_CFG_VLAN_ID(op_code_val) (((op_code_val) & NIC_NVM_DATA_VLAN_ID) != 0) -#define BIOS_OP_CFG_WORK_MODE(op_code_val) (((op_code_val) & NIC_NVM_DATA_WORK_MODE) != 0) -#define BIOS_OP_CFG_PF_BW(op_code_val) (((op_code_val) & NIC_NVM_DATA_PF_SPEED_LIMIT) != 0) -#define BIOS_OP_CFG_GE_SPEED(op_code_val) (((op_code_val) & NIC_NVM_DATA_GE_MODE) != 0) -#define BIOS_OP_CFG_AUTO_NEG(op_code_val) (((op_code_val) & NIC_NVM_DATA_AUTO_NEG) != 0) -#define BIOS_OP_CFG_LINK_FEC(op_code_val) (((op_code_val) & NIC_NVM_DATA_LINK_FEC) != 0) -#define BIOS_OP_CFG_AUTO_ADPAT(op_code_val) (((op_code_val) & NIC_NVM_DATA_PF_ADAPTIVE_LINK) != 0) -#define BIOS_OP_CFG_SRIOV_ENABLE(op_code_val) (((op_code_val) & NIC_NVM_DATA_SRIOV_CONTROL) != 0) -#define BIOS_OP_CFG_EXTEND_MODE(op_code_val) (((op_code_val) & NIC_NVM_DATA_EXTEND_MODE) != 0) -#define BIOS_OP_CFG_RST_DEF_SET(op_code_val) (((op_code_val) & (u32)NIC_NVM_DATA_RESET) != 0) +#define BIOS_CFG_SIGNATURE 0x1923E518 +#define BIOS_OP_CFG_ALL(op_code_val) \ + ((((op_code_val) >> 1) & (0xFFFFFFFF)) != 0) +#define BIOS_OP_CFG_WRITE(op_code_val) \ + ((((op_code_val) & NIC_NVM_DATA_SET)) != 0) +#define BIOS_OP_CFG_PXE_EN(op_code_val) \ + (((op_code_val) & NIC_NVM_DATA_PXE) != 0) +#define BIOS_OP_CFG_VLAN_EN(op_code_val) \ + (((op_code_val) & NIC_NVM_DATA_VLAN) != 0) +#define BIOS_OP_CFG_VLAN_PRI(op_code_val) \ + (((op_code_val) & NIC_NVM_DATA_VLAN_PRI) != 0) +#define BIOS_OP_CFG_VLAN_ID(op_code_val) \ + (((op_code_val) & NIC_NVM_DATA_VLAN_ID) != 0) +#define BIOS_OP_CFG_WORK_MODE(op_code_val) \ + (((op_code_val) & NIC_NVM_DATA_WORK_MODE) != 0) +#define BIOS_OP_CFG_PF_BW(op_code_val) \ + (((op_code_val) & NIC_NVM_DATA_PF_TX_SPEED_LIMIT) != 0) +#define BIOS_OP_CFG_GE_SPEED(op_code_val) \ + (((op_code_val) & NIC_NVM_DATA_GE_MODE) != 0) +#define BIOS_OP_CFG_AUTO_NEG(op_code_val) \ + (((op_code_val) & NIC_NVM_DATA_AUTO_NEG) != 0) +#define BIOS_OP_CFG_LINK_FEC(op_code_val) \ + (((op_code_val) & NIC_NVM_DATA_LINK_FEC) != 0) +#define BIOS_OP_CFG_AUTO_ADPAT(op_code_val) \ + (((op_code_val) & NIC_NVM_DATA_PF_ADAPTIVE_LINK) != 0) +#define BIOS_OP_CFG_SRIOV_ENABLE(op_code_val) \ + (((op_code_val) & NIC_NVM_DATA_SRIOV_CONTROL) != 0) +#define BIOS_OP_CFG_EXTEND_MODE(op_code_val) \ + (((op_code_val) & NIC_NVM_DATA_EXTEND_MODE) != 0) +#define BIOS_OP_CFG_LEGACY_VLAN_EN(op_code_val) \ + (((op_code_val) & NIC_NVM_DATA_LEGACY_VLAN) != 0) +#define BIOS_OP_CFG_LEGACY_VLAN_PRI(op_code_val) \ + (((op_code_val) & NIC_NVM_DATA_LEGACY_VLAN_PRI) != 0) +#define BIOS_OP_CFG_LEGACY_VLAN_ID(op_code_val) \ + (((op_code_val) & NIC_NVM_DATA_LEGACY_VLAN_ID) != 0) +#define BIOS_OP_CFG_RST_DEF_SET(op_code_val) \ + (((op_code_val) & (u32)NIC_NVM_DATA_RESET) != 0) + #define NIC_BIOS_CFG_MAX_PF_BW 100 + +struct nic_legacy_vlan_cfg { + /* Legacy mode PXE VLAN enable: 0 - disable 1 - enable */ + u8 pxe_vlan_en : 1; + /* Legacy mode PXE VLAN priority: 0-7 */ + u8 pxe_vlan_pri : 3; + /* Legacy mode PXE VLAN ID 1-4094 */ + u16 pxe_vlan_id : 12; +}; + /* Note: This structure must be 4-byte aligned. */ struct nic_bios_cfg { u32 signature; - u8 pxe_en; /* PXE enable: 0 - disable 1 - enable */ + u8 pxe_en; u8 extend_mode; - u8 rsvd0[2]; - u8 pxe_vlan_en; /* PXE VLAN enable: 0 - disable 1 - enable */ - u8 pxe_vlan_pri; /* PXE VLAN priority: 0-7 */ - u16 pxe_vlan_id; /* PXE VLAN ID 1-4094 */ - u32 service_mode; /* @See CHIPIF_SERVICE_MODE_x */ - u32 pf_bw; /* PF rate, in percentage. The value ranges from 0 to 100. */ - u8 speed; /* enum of port speed */ - u8 auto_neg; /* Auto-Negotiation Switch 0 - Invalid Field 1 - On 2 - Off */ - u8 lanes; /* lane num */ - u8 fec; /* FEC mode, @See enum mag_cmd_port_fec */ - u8 auto_adapt; /* Adaptive Mode Configuration 0 - Invalid Configuration 1 - On 2 - Off */ - u8 func_valid; /* Whether func_id is valid; 0: invalid; other: valid */ - u8 func_id; /* This member is valid only when func_valid is not set to 0. */ - u8 sriov_en; /* SRIOV-EN: 0 - Invalid configuration, 1 - On, 2 - Off */ + struct nic_legacy_vlan_cfg nlvc; + u8 pxe_vlan_en; + u8 pxe_vlan_pri; + u16 pxe_vlan_id; + u32 service_mode; + u32 pf_tx_bw; + u8 speed; + u8 auto_neg; + u8 lanes; + u8 fec; + u8 auto_adapt; + u8 func_valid; + u8 func_id; + u8 sriov_en; }; struct nic_cmd_bios_cfg { @@ -1171,6 +1253,14 @@ struct nic_cmd_bios_cfg { struct nic_bios_cfg bios_cfg; }; +struct nic_rx_rate_bios_cfg { + struct mgmt_msg_head msg_head; + + u32 op_code; /* Operation Code:[0:read 1:write] */ + u8 rx_rate_limit; + u8 func_id; +}; + struct nic_cmd_vhd_config { struct hinic3_mgmt_msg_head head; @@ -1192,7 +1282,8 @@ struct hinic3_create_bond_info { u32 active_port_max_num; /* Maximum number of active bond member interfaces */ u32 active_port_min_num; /* Minimum number of active bond member interfaces */ u32 xmit_hash_policy; - u32 rsvd[2]; + u32 default_param_flag; + u32 rsvd; }; struct hinic3_cmd_create_bond { @@ -1287,7 +1378,8 @@ struct hinic3_smac_check_state { struct hinic3_mgmt_msg_head head; u8 smac_check_en; /* 1: enable 0: disable */ u8 op_code; /* 1: set 0: get */ - u8 rsvd[2]; + u8 flash_en; /* 1: enable 0: disable */ + u8 rsvd; }; struct hinic3_clear_log_state { @@ -1295,4 +1387,34 @@ struct hinic3_clear_log_state { u32 type; }; +struct hinic3_outband_cfg_info { + struct hinic3_mgmt_msg_head msg_head; + + u16 outband_default_vid; + u16 func_id; +}; + +struct hinic3_wr_ordering { + struct hinic3_mgmt_msg_head head; + u8 op_code; /* 1: set 0: get */ + u8 wr_pkt_so_ro; + u8 rd_pkt_so_ro; + u8 rsvd; +}; + +struct hinic3_function_active_info { + struct hinic3_mgmt_msg_head head; + u16 func_id; + u16 rsvd1; +}; + +struct hinic3_rq_info { + struct hinic3_mgmt_msg_head head; + u16 func_id; + u16 rq_depth; + u16 rq_num; + u16 pf_num; + u16 port_num; +}; + #endif /* HINIC_MGMT_INTERFACE_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/nic_npu_cmd.h b/drivers/net/ethernet/huawei/hinic3/nic_npu_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..97eda4353cd79700eb7d818cda8bd446ae0ea4a8 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/nic_npu_cmd.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C), 2001-2011, Huawei Tech. Co., Ltd. + * File Name : nic_npu_cmd.h + * Version : Initial Draft + * Created : 2019/4/25 + * Last Modified : + * Description : NIC Commands between Driver and NPU + * Function List : + */ + +#ifndef NIC_NPU_CMD_H +#define NIC_NPU_CMD_H + +/* NIC CMDQ MODE */ +enum hinic3_ucode_cmd { + HINIC3_UCODE_CMD_MODIFY_QUEUE_CTX = 0, + HINIC3_UCODE_CMD_CLEAN_QUEUE_CONTEXT, + HINIC3_UCODE_CMD_ARM_SQ, /**< Unused */ + HINIC3_UCODE_CMD_ARM_RQ, /**< Unused */ + HINIC3_UCODE_CMD_SET_RSS_INDIR_TABLE, + HINIC3_UCODE_CMD_SET_RSS_CONTEXT_TABLE, + HINIC3_UCODE_CMD_GET_RSS_INDIR_TABLE, + HINIC3_UCODE_CMD_GET_RSS_CONTEXT_TABLE, /**< Unused */ + HINIC3_UCODE_CMD_SET_IQ_ENABLE, /**< Unused */ + HINIC3_UCODE_CMD_SET_RQ_FLUSH = 10, + HINIC3_UCODE_CMD_MODIFY_VLAN_CTX, + HINIC3_UCODE_CMD_PPA_HASH_TABLE, + HINIC3_UCODE_CMD_RXQ_INFO_GET = 13, + HINIC3_UCODE_MIG_CFG_Q_CTX = 14, + HINIC3_UCODE_MIG_CHK_SQ_STOP, + HINIC3_UCODE_CHK_RQ_STOP, + HINIC3_UCODE_MIG_CFG_BAT_INFO, +}; + +#endif /* NIC_NPU_CMD_H */ \ No newline at end of file diff --git a/drivers/net/ethernet/huawei/hinic3/ossl_knl_linux.h b/drivers/net/ethernet/huawei/hinic3/ossl_knl_linux.h index ee005a8ade7483bdda1519c4b3413a4c68c2d555..5f0f790ec2ea4a7f6395dc4a2f88947719f38229 100644 --- a/drivers/net/ethernet/huawei/hinic3/ossl_knl_linux.h +++ b/drivers/net/ethernet/huawei/hinic3/ossl_knl_linux.h @@ -4,8 +4,8 @@ #ifndef OSSL_KNL_LINUX_H_ #define OSSL_KNL_LINUX_H_ -#include #include +#include #include #include #include @@ -19,6 +19,12 @@ #include #include #include +#include +#include +#include +#include +#include +#include #ifndef NETIF_F_SCTP_CSUM #define NETIF_F_SCTP_CSUM 0 @@ -37,6 +43,17 @@ #define ossl_get_free_pages __get_free_pages +#ifndef ETHTOOL_LINK_MODE_100000baseKR_Full_BIT +#define ETHTOOL_LINK_MODE_100000baseKR_Full_BIT 75 +#define ETHTOOL_LINK_MODE_100000baseCR_Full_BIT 78 +#define ETHTOOL_LINK_MODE_100000baseSR_Full_BIT 76 +#endif +#ifndef ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT +#define ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT 80 +#define ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT 81 +#define ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT 84 +#endif + #ifndef high_16_bits #define low_16_bits(x) ((x) & 0xFFFF) #define high_16_bits(x) (((x) & 0xFFFF0000) >> 16) @@ -54,13 +71,11 @@ #endif #define HAVE_INET6_IFADDR_LIST - #define HAVE_NDO_GET_STATS64 #ifndef HAVE_MQPRIO #define HAVE_MQPRIO #endif - #ifndef HAVE_SETUP_TC #define HAVE_SETUP_TC #endif @@ -68,25 +83,20 @@ #ifndef HAVE_NDO_SET_FEATURES #define HAVE_NDO_SET_FEATURES #endif - #define HAVE_IRQ_AFFINITY_NOTIFY - #define HAVE_ETHTOOL_SET_PHYS_ID - #define HAVE_NETDEV_WANTED_FEAUTES #ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED #define HAVE_PCI_DEV_FLAGS_ASSIGNED #define HAVE_VF_SPOOFCHK_CONFIGURE #endif - #ifndef HAVE_SKB_L4_RXHASH #define HAVE_SKB_L4_RXHASH #endif #define HAVE_ETHTOOL_GRXFHINDIR_SIZE #define HAVE_INT_NDO_VLAN_RX_ADD_VID - #ifdef ETHTOOL_SRXNTUPLE #undef ETHTOOL_SRXNTUPLE #endif @@ -95,9 +105,9 @@ #define _kc_kunmap_atomic(addr) kunmap_atomic(addr) #include - #define HAVE_FDB_OPS #define HAVE_ETHTOOL_GET_TS_INFO + #define HAVE_NAPI_GRO_FLUSH_OLD #ifndef HAVE_SRIOV_CONFIGURE @@ -107,6 +117,7 @@ #define HAVE_ENCAP_TSO_OFFLOAD #define HAVE_SKB_INNER_NETWORK_HEADER + #define HAVE_NDO_SET_VF_LINK_STATE #define HAVE_SKB_INNER_PROTOCOL #define HAVE_MPLS_FEATURES @@ -115,12 +126,12 @@ #define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK #define HAVE_VXLAN_CHECKS -#define HAVE_NDO_SELECT_QUEUE_ACCEL #define HAVE_NET_GET_RANDOM_ONCE #define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS #define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK + #define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE #define HAVE_VLAN_FIND_DEV_DEEP_RCU @@ -128,8 +139,8 @@ #define HAVE_MULTI_VLAN_OFFLOAD_EN #define HAVE_ETH_GET_HEADLEN_FUNC -#define HAVE_RXFH_HASHFUNC +#define HAVE_RXFH_HASHFUNC #define HAVE_NDO_SET_VF_TRUST #include @@ -138,6 +149,7 @@ #define HAVE_NETDEVICE_MIN_MAX_MTU + #define HAVE_VOID_NDO_GET_STATS64 #define HAVE_VM_OPS_FAULT_NO_VMA @@ -146,9 +158,13 @@ #define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE #define HAVE_PTP_CLOCK_DO_AUX_WORK + #define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV #define HAVE_XDP_SUPPORT +#if (KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE) +#define HAVE_XDP_QUERY_PROG +#endif #define HAVE_NDO_BPF_NETDEV_BPF #define HAVE_TIMER_SETUP @@ -158,6 +174,7 @@ #define HAVE_NDO_SELECT_QUEUE_SB_DEV + #define dev_open(x) dev_open(x, NULL) #define HAVE_NEW_ETHTOOL_LINK_SETTINGS_ONLY @@ -180,12 +197,14 @@ static inline void *_hinic3_dma_zalloc_coherent(struct device *dev, } #endif +#if (KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE) #ifndef DT_KNL_EMU struct timeval { __kernel_old_time_t tv_sec; /* seconds */ __kernel_suseconds_t tv_usec; /* microseconds */ }; #endif +#endif #ifndef do_gettimeofday #define do_gettimeofday(time) _kc_do_gettimeofday(time) @@ -199,6 +218,8 @@ static inline void _kc_do_gettimeofday(struct timeval *tv) } #endif + + #define HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY #define ETH_GET_HEADLEN_NEED_DEV #define HAVE_GENL_OPS_FIELD_VALIDATE @@ -207,32 +228,75 @@ static inline void _kc_do_gettimeofday(struct timeval *tv) #define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f)) #endif +/*****************************************************************************/ +#if (KERNEL_VERSION(5, 5, 0) > LINUX_VERSION_CODE) +#else /* >= 5.5.0 */ #define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#endif /* 5.5.0 */ +/*****************************************************************************/ +#if (KERNEL_VERSION(5, 6, 0) > LINUX_VERSION_CODE) +#else /* >= 5.6.0 */ #ifndef rtc_time_to_tm #define rtc_time_to_tm rtc_time64_to_tm #endif #define HAVE_NDO_TX_TIMEOUT_TXQ #define HAVE_PROC_OPS +#endif /* 5.6.0 */ +/*****************************************************************************/ +#if (KERNEL_VERSION(5, 7, 0) > LINUX_VERSION_CODE) +#else /* >= 5.7.0 */ #define SUPPORTED_COALESCE_PARAMS #ifndef pci_cleanup_aer_uncorrect_error_status #define pci_cleanup_aer_uncorrect_error_status pci_aer_clear_nonfatal_status #endif +#endif /* 5.7.0 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE) +#else /* >= 5.9.0 */ #define HAVE_XDP_FRAME_SZ +#endif /* 5.9.0 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(5, 10, 0) > LINUX_VERSION_CODE) +#define HAVE_DEVLINK_FW_FILE_NAME_PARAM +#else /* >= 5.10.0 */ +#endif /* 5.10.0 */ #define HAVE_DEVLINK_FW_FILE_NAME_MEMBER -#define HAVE_ENCAPSULATION_TSO +/* ************************************************************************ */ +#if (KERNEL_VERSION(5, 10, 0) > LINUX_VERSION_CODE) +#else /* >= 5.10.0 */ +#if !defined(HAVE_ETHTOOL_COALESCE_EXTACK) && \ + !defined(NO_ETHTOOL_COALESCE_EXTACK) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif +#endif /* 5.10.0 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(5, 10, 0) > LINUX_VERSION_CODE) + +#else /* >= 5.10.0 */ +#if !defined(HAVE_ETHTOOL_RINGPARAM_EXTACK) && \ + !defined(NO_ETHTOOL_RINGPARAM_EXTACK) +#define HAVE_ETHTOOL_RINGPARAM_EXTACK +#endif +#endif /* 5.10.0 */ +/* ************************************************************************ */ +#define HAVE_NDO_UDP_TUNNEL_ADD +#define HAVE_ENCAPSULATION_TSO #define HAVE_ENCAPSULATION_CSUM #ifndef eth_zero_addr static inline void hinic3_eth_zero_addr(u8 *addr) { - memset(addr, 0x00, ETH_ALEN); + (void)memset(addr, 0x00, ETH_ALEN); } #define eth_zero_addr(_addr) hinic3_eth_zero_addr(_addr) @@ -273,13 +337,10 @@ int creat_thread(struct sdk_thread_info *thread_info); void stop_thread(struct sdk_thread_info *thread_info); #define destroy_work(work) - void utctime_to_localtime(u64 utctime, u64 *localtime); - #ifndef HAVE_TIMER_SETUP void initialize_timer(const void *adapter_hdl, struct timer_list *timer); #endif - void add_to_timer(struct timer_list *timer, u64 period); void stop_timer(struct timer_list *timer); void delete_timer(struct timer_list *timer); @@ -304,3 +365,4 @@ u64 ossl_get_real_time(void); #define tasklet_state(tasklet) ((tasklet)->state) #endif +/* ************************************************************************ */