diff --git a/drivers/net/ethernet/nebula-matrix/Kconfig b/drivers/net/ethernet/nebula-matrix/Kconfig index 5101a0ff9ceaa114fdd06507f58cdb1ee92a5907..0264f950b4c41850d079694f1b8f368c22234e8e 100644 --- a/drivers/net/ethernet/nebula-matrix/Kconfig +++ b/drivers/net/ethernet/nebula-matrix/Kconfig @@ -11,7 +11,7 @@ config NET_VENDOR_NEBULA_MATRIX Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all - the questions about Nebual-matrix cards. If you say Y, you will be + the questions about Nebula-matrix cards. If you say Y, you will be asked for your specific card in the following questions. if NET_VENDOR_NEBULA_MATRIX diff --git a/drivers/net/ethernet/nebula-matrix/nbl/Makefile b/drivers/net/ethernet/nebula-matrix/nbl/Makefile index 6a966ae348f90d5740f13e6fc3002c0a0bda56aa..c07f0020ba91b5b4860f535004b9a4483b2be7b6 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/Makefile +++ b/drivers/net/ethernet/nebula-matrix/nbl/Makefile @@ -7,21 +7,39 @@ obj-$(CONFIG_NBL_CORE) := nbl_core.o nbl_core-objs += nbl_common/nbl_common.o \ nbl_common/nbl_event.o \ nbl_channel/nbl_channel.o \ + nbl_channel/nbl_cmdq.o \ nbl_hw/nbl_hw_leonis/nbl_phy_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_fc_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.o \ nbl_hw/nbl_hw_leonis/nbl_flow_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.o \ nbl_hw/nbl_hw_leonis/nbl_queue_leonis.o \ nbl_hw/nbl_hw_leonis/nbl_resource_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.o \ + nbl_hw/nbl_fc.o \ + nbl_hw/nbl_tc_pedit.o \ nbl_hw/nbl_resource.o \ nbl_hw/nbl_interrupt.o \ nbl_hw/nbl_txrx.o \ nbl_hw/nbl_queue.o \ nbl_hw/nbl_vsi.o \ nbl_hw/nbl_adminq.o \ + nbl_hw/nbl_accel.o \ + nbl_hw/nbl_fd.o \ + nbl_core/nbl_lag.o \ nbl_core/nbl_dispatch.o \ nbl_core/nbl_debugfs.o \ nbl_core/nbl_ethtool.o \ nbl_core/nbl_service.o \ + nbl_core/nbl_dev_rdma.o \ + nbl_core/nbl_sysfs.o \ nbl_core/nbl_dev.o \ + nbl_core/nbl_ktls.o \ + nbl_core/nbl_ipsec.o \ + nbl_core/nbl_tc_tun.o \ + nbl_core/nbl_tc.o \ nbl_core/nbl_hwmon.o \ nbl_core/nbl_dev_user.o \ nbl_main.o @@ -39,4 +57,5 @@ nbl_core-objs += nbl_common/nbl_common.o \ ccflags-y += -I$(srctree)/$(src) ccflags-y += -I$(srctree)/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/ ccflags-y += -I$(srctree)/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw +ccflags-y += -I$(srctree)/drivers/net/ethernet/nebula-matrix/nbl/nbl_export diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.c index b00959e24178b757ca1dd8061d4b426a5451ce75..c27a3a668f739bc1fad775e8ee96959cbe94ff5d 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.c @@ -5,6 +5,26 @@ */ #include "nbl_channel.h" +#include "nbl_cmdq.h" + +static int nbl_chan_send_ack(void *priv, struct nbl_chan_ack_info *chan_ack); + +static void nbl_chan_delete_msg_handler(struct nbl_channel_mgt *chan_mgt, u16 msg_type) +{ + u8 chan_type; + struct nbl_chan_info *chan_info; + + nbl_common_free_hash_node(chan_mgt->handle_hash_tbl, &msg_type); + + if (msg_type < NBL_CHAN_MSG_ADMINQ_GET_EMP_VERSION) + chan_type = NBL_CHAN_TYPE_MAILBOX; + else + chan_type = NBL_CHAN_TYPE_ADMINQ; + + chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + if (chan_info && chan_info->clean_task) + nbl_common_flush_task(chan_info->clean_task); +} static int nbl_chan_add_msg_handler(struct nbl_channel_mgt *chan_mgt, u16 msg_type, nbl_chan_resp func, void *priv) @@ -15,7 +35,7 @@ static int nbl_chan_add_msg_handler(struct nbl_channel_mgt *chan_mgt, u16 msg_ty handler.func = func; handler.priv = priv; - ret = nbl_common_alloc_hash_node(chan_mgt->handle_hash_tbl, &msg_type, &handler); + ret = nbl_common_alloc_hash_node(chan_mgt->handle_hash_tbl, &msg_type, &handler, NULL); return ret; } @@ -61,9 +81,7 @@ static int nbl_chan_init_msg_handler(struct nbl_channel_mgt *chan_mgt, u8 user_n static void nbl_chan_remove_msg_handler(struct nbl_channel_mgt *chan_mgt) { - struct nbl_hash_tbl_del_key del_key = {0}; - - nbl_common_remove_hash_table(chan_mgt->handle_hash_tbl, &del_key); + nbl_common_remove_hash_table(chan_mgt->handle_hash_tbl, NULL); chan_mgt->handle_hash_tbl = NULL; @@ -288,7 +306,7 @@ static int nbl_chan_cfg_mailbox_qinfo_map_table(struct nbl_channel_mgt *chan_mgt for (func_id = 0; func_id < NBL_MAX_PF; func_id++) { if (!(pf_mask & (1 << func_id))) phy_ops->cfg_mailbox_qinfo(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), func_id, - common->bus, common->devid, + common->hw_bus, common->devid, NBL_COMMON_TO_PCI_FUNC_ID(common) + func_id); } @@ -301,7 +319,7 @@ static int nbl_chan_cfg_adminq_qinfo_map_table(struct nbl_channel_mgt *chan_mgt) struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); phy_ops->cfg_adminq_qinfo(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), - common->bus, common->devid, + common->hw_bus, common->devid, NBL_COMMON_TO_PCI_FUNC_ID(common)); return 0; @@ -598,7 +616,7 @@ static int nbl_chan_start_rxq(struct nbl_channel_mgt *chan_mgt, u8 chan_type) static int nbl_chan_reset_queue(struct nbl_channel_mgt *chan_mgt, u8 chan_type, bool tx) { struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); - int ret = 0; + int i = 0, j = 0, ret = 0; /* If someone else is doing resetting, don't bother */ if (test_bit(NBL_CHAN_RESETTING, chan_info->state)) @@ -618,6 +636,25 @@ static int nbl_chan_reset_queue(struct nbl_channel_mgt *chan_mgt, u8 chan_type, return 0; } + /* Make sure no one is waiting before we reset. */ + while (i++ < (NBL_CHAN_ACK_WAIT_TIME * 2) / HZ) { + for (j = 0; j < NBL_CHAN_QUEUE_LEN; j++) + if (chan_info->wait[j].status == NBL_MBX_STATUS_WAITING) + break; + + if (j == NBL_CHAN_QUEUE_LEN) + break; + mdelay(1000); + } + + if (j != NBL_CHAN_QUEUE_LEN) { + nbl_warn(NBL_CHAN_MGT_TO_COMMON(chan_mgt), NBL_DEBUG_MBX, + "Some wait_head unreleased, fail to reset"); + clear_bit(NBL_CHAN_RESETTING, chan_info->state); + spin_unlock(&chan_info->txq_lock); + return 0; + } + nbl_chan_shutdown_queue(chan_mgt, chan_type, tx); if (tx) @@ -636,71 +673,57 @@ static bool nbl_chan_check_dma_err(struct nbl_channel_mgt *chan_mgt, u8 chan_typ { struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + if (phy_ops->get_hw_status(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt))) + return false; + if (chan_type == NBL_CHAN_TYPE_MAILBOX) return phy_ops->check_mailbox_dma_err(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), tx); else return phy_ops->check_adminq_dma_err(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), tx); } -static u16 nbl_chan_update_txqueue(struct nbl_channel_mgt *chan_mgt, - struct nbl_chan_info *chan_info, u16 dstid, - enum nbl_chan_msg_type msg_type, - void *arg, size_t arg_len) +static int nbl_chan_update_txqueue(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info, struct nbl_chan_tx_param *param) { - struct device *dev = NBL_COMMON_TO_DEV(chan_mgt->common); - struct nbl_chan_ring *txq; - struct nbl_chan_tx_desc *tx_desc; - struct nbl_chan_buf *tx_buf; - u16 next_to_use; + struct nbl_chan_ring *txq = &chan_info->txq; + struct nbl_chan_tx_desc *tx_desc = NBL_CHAN_TX_RING_TO_DESC(txq, txq->next_to_use); + struct nbl_chan_buf *tx_buf = NBL_CHAN_TX_RING_TO_BUF(txq, txq->next_to_use); - txq = &chan_info->txq; - next_to_use = txq->next_to_use; - tx_buf = NBL_CHAN_TX_RING_TO_BUF(txq, next_to_use); - tx_desc = NBL_CHAN_TX_RING_TO_DESC(txq, next_to_use); - - tx_desc->dstid = dstid; - tx_desc->msg_type = msg_type; - tx_desc->msgid = next_to_use; - if (arg_len > NBL_CHAN_BUF_LEN - sizeof(*tx_desc)) { - dev_err(dev, "%s, arg_len:%ld, too long!", __func__, arg_len); - return -1; - } + if (param->arg_len > NBL_CHAN_BUF_LEN - sizeof(*tx_desc)) + return -EINVAL; + + tx_desc->dstid = param->dstid; + tx_desc->msg_type = param->msg_type; + tx_desc->msgid = param->msgid; - if (arg_len > NBL_CHAN_TX_DESC_EMBEDDED_DATA_LEN) { - memcpy(tx_buf->va, arg, arg_len); + if (param->arg_len > NBL_CHAN_TX_DESC_EMBEDDED_DATA_LEN) { + memcpy(tx_buf->va, param->arg, param->arg_len); tx_desc->buf_addr = tx_buf->pa; - tx_desc->buf_len = arg_len; + tx_desc->buf_len = param->arg_len; tx_desc->data_len = 0; } else { - memcpy(tx_desc->data, arg, arg_len); + memcpy(tx_desc->data, param->arg, param->arg_len); tx_desc->buf_len = 0; - tx_desc->data_len = arg_len; + tx_desc->data_len = param->arg_len; } tx_desc->flags = NBL_CHAN_TX_DESC_AVAIL; /* wmb */ wmb(); - txq->next_to_use++; - if (txq->next_to_use == chan_info->num_txq_entries) - txq->next_to_use = 0; + txq->next_to_use = NBL_NEXT_ID(txq->next_to_use, chan_info->num_txq_entries - 1); txq->tail_ptr++; - return next_to_use; + return 0; } static int nbl_chan_kick_tx_ring(struct nbl_channel_mgt *chan_mgt, struct nbl_chan_info *chan_info) { - struct nbl_phy_ops *phy_ops; - struct nbl_common_info *common = chan_mgt->common; - struct device *dev = NBL_COMMON_TO_DEV(common); - struct nbl_chan_ring *txq; + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + struct nbl_chan_ring *txq = &chan_info->txq; struct nbl_chan_tx_desc *tx_desc; - int i; - - phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); - - txq = &chan_info->txq; + int i = 0; /* mb for doorbell */ mb(); @@ -714,16 +737,14 @@ static int nbl_chan_kick_tx_ring(struct nbl_channel_mgt *chan_mgt, udelay(NBL_CHAN_TX_WAIT_US); i++; - if (!(i % NBL_CHAN_TX_REKICK_WAIT_TIMES)) { + if (!(i % NBL_CHAN_TX_REKICK_WAIT_TIMES)) NBL_UPDATE_QUEUE_TAIL_PTR(chan_info, phy_ops, chan_mgt, txq->tail_ptr, NBL_MB_TX_QID); - } if (i == NBL_CHAN_TX_WAIT_TIMES) { - dev_err(dev, "bus:%u, dev:%u, func:%u, chan send message type: %d timeout\n", - common->bus, common->devid, NBL_COMMON_TO_PCI_FUNC_ID(common), + nbl_err(common, NBL_DEBUG_MBX, "chan send message type: %d timeout\n", tx_desc->msg_type); - return -1; + return -EAGAIN; } } @@ -735,45 +756,50 @@ static void nbl_chan_recv_ack_msg(void *priv, u16 srcid, u16 msgid, void *data, u32 data_len) { struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; - struct nbl_chan_info *chan_info = NULL; struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); - struct device *dev = NBL_COMMON_TO_DEV(common); - struct nbl_chan_waitqueue_head *wait_head; + struct nbl_chan_info *chan_info = NULL; + struct nbl_chan_waitqueue_head *wait_head = NULL; + union nbl_chan_msg_id ack_msgid = {{0}}; u32 *payload = (u32 *)data; - u32 ack_msgid; - u32 ack_msgtype; + u32 ack_datalen = 0, ack_msgtype = 0, copy_len = 0; if (srcid == NBL_CHAN_ADMINQ_FUNCTION_ID) chan_info = NBL_CHAN_MGT_TO_ADMINQ(chan_mgt); else chan_info = NBL_CHAN_MGT_TO_MAILBOX(chan_mgt); + ack_datalen = data_len - 3 * sizeof(u32); ack_msgtype = *payload; - ack_msgid = *(payload + 1); - wait_head = &chan_info->wait[ack_msgid]; + ack_msgid.id = *(u16 *)(payload + 1); + wait_head = &chan_info->wait[ack_msgid.info.loc]; wait_head->ack_err = *(payload + 2); - if (ack_msgtype != wait_head->msg_type) - nbl_debug(common, NBL_DEBUG_MBX, - "ack_msgtype %d donot match msg_type %d\n", - ack_msgtype, wait_head->msg_type); + if (wait_head->msg_type != ack_msgtype) { + nbl_warn(common, NBL_DEBUG_MBX, "Skip ack msg type %d donot match msg type %d\n", + ack_msgtype, wait_head->msg_type); + return; + } + if (wait_head->status != NBL_MBX_STATUS_WAITING) { nbl_warn(common, NBL_DEBUG_MBX, "Skip ack with status %d", wait_head->status); return; } - if (wait_head->ack_err >= 0 && (data_len > 3 * sizeof(u32))) { - if (data_len - 3 * sizeof(u32) != wait_head->ack_data_len) { - dev_err(dev, "%x:%x.%x payload_len donot match ack_data_len!, srcid:%u,\n" - "msgtype:%u, msgid:%u, data_len:%u, ack_data_len:%u\n", - common->bus, common->devid, NBL_COMMON_TO_PCI_FUNC_ID(common), - srcid, ack_msgtype, ack_msgid, data_len, wait_head->ack_data_len); - goto wakeup; - } - memcpy((char *)wait_head->ack_data, payload + 3, data_len - 3 * sizeof(int)); + if (wait_head->msg_index != ack_msgid.info.index) { + nbl_warn(common, NBL_DEBUG_MBX, "Skip ack index %d donot match index %d", + ack_msgid.info.index, wait_head->msg_index); + return; } -wakeup: + if (ack_datalen != wait_head->ack_data_len) + nbl_debug(common, NBL_DEBUG_MBX, "Channel payload_len donot match ack_data_len, msgtype:%u, msgid:%u, rcv_data_len:%u, expect_data_len:%u\n", + ack_msgtype, ack_msgid.id, ack_datalen, wait_head->ack_data_len); + + copy_len = min_t(u32, wait_head->ack_data_len, ack_datalen); + if (wait_head->ack_err >= 0 && copy_len > 0) + memcpy((char *)wait_head->ack_data, payload + 3, copy_len); + wait_head->ack_data_len = (u16)copy_len; + /* wmb */ wmb(); wait_head->acked = 1; @@ -832,6 +858,7 @@ static int nbl_chan_msg_forward_userdev(struct nbl_channel_mgt *chan_mgt, static void nbl_chan_recv_msg(struct nbl_channel_mgt *chan_mgt, void *data, u32 data_len) { + struct nbl_chan_ack_info chan_ack; struct nbl_chan_tx_desc *tx_desc; struct nbl_chan_msg_node_data *msg_handler; struct device *dev = NBL_COMMON_TO_DEV(chan_mgt->common); @@ -840,14 +867,12 @@ static void nbl_chan_recv_msg(struct nbl_channel_mgt *chan_mgt, void *data, u32 tx_desc = data; msg_type = tx_desc->msg_type; - dev_dbg(dev, "%s recv msg_type: %d\n", __func__, tx_desc->msg_type); + dev_dbg(dev, "recv msg_type: %d\n", tx_desc->msg_type); srcid = tx_desc->srcid; msgid = tx_desc->msgid; - if (msg_type >= NBL_CHAN_MSG_MAX) { - dev_err(dev, "Invalid chan message type %u\n", msg_type); - return; - } + if (msg_type >= NBL_CHAN_MSG_MAX) + goto send_warning; if (tx_desc->data_len) { payload = (void *)tx_desc->data; @@ -863,7 +888,7 @@ static void nbl_chan_recv_msg(struct nbl_channel_mgt *chan_mgt, void *data, u32 msg_handler->func(msg_handler->priv, srcid, msgid, payload, payload_len); } - if (chan_mgt->notify) { + if (chan_mgt->notify && msg_type < NBL_CHAN_MSG_MAILBOX_MAX) { mutex_lock(&chan_mgt->notify->lock); if (chan_mgt->notify->eventfd && test_bit(msg_type, chan_mgt->notify->msgtype) && chan_mgt->notify->shm_msg_ring) { @@ -873,9 +898,12 @@ static void nbl_chan_recv_msg(struct nbl_channel_mgt *chan_mgt, void *data, u32 mutex_unlock(&chan_mgt->notify->lock); } - if (warn) - dev_warn(dev, "Recv channel msg_type: %d, but msg_handler is null!\n", - tx_desc->msg_type); +send_warning: + if (warn) { + NBL_CHAN_ACK(chan_ack, srcid, msg_type, msgid, -EPERM, NULL, 0); + nbl_chan_send_ack(chan_mgt, &chan_ack); + dev_warn(dev, "Recv channel msg_type: %d, but msg_handler is null!\n", msg_type); + } } static void nbl_chan_advance_rx_ring(struct nbl_channel_mgt *chan_mgt, @@ -938,7 +966,7 @@ static void nbl_chan_clean_queue(struct nbl_channel_mgt *chan_mgt, struct nbl_ch rxq->next_to_clean = next_to_clean; } -void nbl_chan_clean_queue_subtask(void *priv, u8 chan_type) +static void nbl_chan_clean_queue_subtask(void *priv, u8 chan_type) { struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); @@ -950,70 +978,113 @@ void nbl_chan_clean_queue_subtask(void *priv, u8 chan_type) nbl_chan_clean_queue(chan_mgt, chan_info); } +static int nbl_chan_get_msg_id(struct nbl_chan_info *chan_info, union nbl_chan_msg_id *msgid) +{ + struct nbl_chan_waitqueue_head *wait = NULL; + int valid_loc = chan_info->wait_head_index, i; + + for (i = 0; i < NBL_CHAN_QUEUE_LEN; i++) { + wait = &chan_info->wait[valid_loc]; + + if (wait->status != NBL_MBX_STATUS_WAITING) { + wait->msg_index = NBL_NEXT_ID(wait->msg_index, NBL_CHAN_MSG_INDEX_MAX - 1); + msgid->info.index = wait->msg_index; + msgid->info.loc = valid_loc; + + valid_loc = NBL_NEXT_ID(valid_loc, chan_info->num_txq_entries - 1); + chan_info->wait_head_index = valid_loc; + return 0; + } + + valid_loc = NBL_NEXT_ID(valid_loc, chan_info->num_txq_entries - 1); + } + + return -ENOSPC; +} + static int nbl_chan_send_msg(void *priv, struct nbl_chan_send_info *chan_send) { struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); - struct device *dev = NBL_COMMON_TO_DEV(common); - struct nbl_chan_info *chan_info = NULL; + struct nbl_chan_info *chan_info = NBL_CHAN_GET_INFO(chan_mgt, chan_send->dstid); struct nbl_chan_waitqueue_head *wait_head; - u16 msgid; - int i = NBL_CHAN_TX_WAIT_ACK_TIMES, ret; - int resend_times = 0; + union nbl_chan_msg_id msgid = {{0}}; + struct nbl_chan_tx_param tx_param = {0}; + int i = NBL_CHAN_TX_WAIT_ACK_TIMES, resend_times = 0, ret = 0; + bool need_resend = true; /* neend resend when ack timeout*/ - if (chan_send->dstid == NBL_CHAN_ADMINQ_FUNCTION_ID) - chan_info = NBL_CHAN_MGT_TO_ADMINQ(chan_mgt); - else - chan_info = NBL_CHAN_MGT_TO_MAILBOX(chan_mgt); + if (chan_send->arg_len > NBL_CHAN_BUF_LEN - sizeof(struct nbl_chan_tx_desc)) + return -EINVAL; + + if (test_bit(NBL_CHAN_ABNORMAL, chan_info->state)) + return -EFAULT; resend: spin_lock(&chan_info->txq_lock); - msgid = nbl_chan_update_txqueue(chan_mgt, chan_info, chan_send->dstid, - chan_send->msg_type, - chan_send->arg, chan_send->arg_len); - if (msgid == 0xFFFF) { + ret = nbl_chan_get_msg_id(chan_info, &msgid); + if (ret) { spin_unlock(&chan_info->txq_lock); - dev_err(dev, "chan tx queue full, send msgtype:%u to dstid:%u failed\n", + nbl_err(common, NBL_DEBUG_MBX, "Channel tx wait head full, send msgtype:%u to dstid:%u failed\n", chan_send->msg_type, chan_send->dstid); - return -1; + return ret; } - if (!chan_send->ack) { - ret = nbl_chan_kick_tx_ring(chan_mgt, chan_info); + tx_param.msg_type = chan_send->msg_type; + tx_param.arg = chan_send->arg; + tx_param.arg_len = chan_send->arg_len; + tx_param.dstid = chan_send->dstid; + tx_param.msgid = msgid.id; + + ret = nbl_chan_update_txqueue(chan_mgt, chan_info, &tx_param); + if (ret) { spin_unlock(&chan_info->txq_lock); - if (ret) - goto check_tx_dma_err; - else - return ret; + nbl_err(common, NBL_DEBUG_MBX, "Channel tx queue full, send msgtype:%u to dstid:%u failed\n", + chan_send->msg_type, chan_send->dstid); + return ret; } - wait_head = &chan_info->wait[msgid]; + wait_head = &chan_info->wait[msgid.info.loc]; init_waitqueue_head(&wait_head->wait_queue); + wait_head->acked = 0; wait_head->ack_data = chan_send->resp; wait_head->ack_data_len = chan_send->resp_len; - wait_head->acked = 0; wait_head->msg_type = chan_send->msg_type; - wait_head->need_waked = 1; - wait_head->status = NBL_MBX_STATUS_WAITING; + wait_head->need_waked = chan_send->ack; + wait_head->msg_index = msgid.info.index; + wait_head->status = chan_send->ack ? NBL_MBX_STATUS_WAITING : NBL_MBX_STATUS_IDLE; + ret = nbl_chan_kick_tx_ring(chan_mgt, chan_info); + spin_unlock(&chan_info->txq_lock); - if (ret) + + if (ret) { + wait_head->status = NBL_MBX_STATUS_TIMEOUT; goto check_tx_dma_err; + } + + if (!chan_send->ack) + return 0; + + if (chan_send->dstid != common->mgt_pf && chan_send->msg_type != NBL_CHAN_MSG_KEEP_ALIVE) + need_resend = false; if (test_bit(NBL_CHAN_INTERRUPT_READY, chan_info->state)) { ret = wait_event_timeout(wait_head->wait_queue, wait_head->acked, NBL_CHAN_ACK_WAIT_TIME); if (!ret) { - dev_err(dev, "wait bus:%u, dev:%u, func:%u, chan send message type: %d\n" - "msg id: %u wait ack timeout\n", common->bus, common->devid, - NBL_COMMON_TO_PCI_FUNC_ID(common), chan_send->msg_type, msgid); wait_head->status = NBL_MBX_STATUS_TIMEOUT; + if (!need_resend) + return 0; + nbl_err(common, NBL_DEBUG_MBX, "Channel waiting ack failed, message type: %d, msg id: %u\n", + chan_send->msg_type, msgid.id); goto check_rx_dma_err; } /* rmb for ack */ rmb(); + chan_send->ack_len = wait_head->ack_data_len; + wait_head->status = NBL_MBX_STATUS_IDLE; return wait_head->ack_err; } @@ -1021,34 +1092,35 @@ static int nbl_chan_send_msg(void *priv, struct nbl_chan_send_info *chan_send) while (i--) { nbl_chan_clean_queue(chan_mgt, chan_info); - if (wait_head->acked) + if (wait_head->acked) { + chan_send->ack_len = wait_head->ack_data_len; + wait_head->status = NBL_MBX_STATUS_IDLE; return wait_head->ack_err; + } usleep_range(NBL_CHAN_TX_WAIT_ACK_US_MIN, NBL_CHAN_TX_WAIT_ACK_US_MAX); } wait_head->status = NBL_MBX_STATUS_TIMEOUT; - dev_err(dev, "polling bus:%u, dev:%u, func:%u, chan send message type: %d msg id: %u\n" - "wait ack timeout\n", common->bus, common->devid, - NBL_COMMON_TO_PCI_FUNC_ID(common), chan_send->msg_type, msgid); + nbl_err(common, NBL_DEBUG_MBX, "Channel polling ack failed, message type: %d msg id: %u\n", + chan_send->msg_type, msgid.id); check_rx_dma_err: if (nbl_chan_check_dma_err(chan_mgt, chan_info->chan_type, false)) { - dev_err(dev, "nbl channel rx dma error\n"); + nbl_err(common, NBL_DEBUG_MBX, "nbl channel rx dma error\n"); nbl_chan_reset_queue(chan_mgt, chan_info->chan_type, false); chan_info->rxq_reset_times++; } check_tx_dma_err: if (nbl_chan_check_dma_err(chan_mgt, chan_info->chan_type, true)) { - dev_err(dev, "nbl channel tx dma error\n"); + nbl_err(common, NBL_DEBUG_MBX, "nbl channel tx dma error\n"); nbl_chan_reset_queue(chan_mgt, chan_info->chan_type, true); chan_info->txq_reset_times++; } - resend_times++; - if (resend_times > NBL_CHAN_RESEND_MAX_TIMES) { - dev_err(dev, "nbl channel resend_times %d\n", resend_times); - return -1; + if (++resend_times >= NBL_CHAN_RESEND_MAX_TIMES) { + nbl_err(common, NBL_DEBUG_MBX, "nbl channel resend_times %d\n", resend_times); + return -EFAULT; } i = NBL_CHAN_TX_WAIT_ACK_TIMES; @@ -1079,6 +1151,13 @@ static int nbl_chan_send_ack(void *priv, struct nbl_chan_ack_info *chan_ack) return 0; } +static void nbl_chan_unregister_msg(void *priv, u16 msg_type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + + nbl_chan_delete_msg_handler(chan_mgt, msg_type); +} + static int nbl_chan_register_msg(void *priv, u16 msg_type, nbl_chan_resp func, void *callback_priv) { struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; @@ -1103,19 +1182,6 @@ static bool nbl_chan_check_queue_exist(void *priv, u8 chan_type) return chan_info ? true : false; } -static int nbl_chan_set_queue_interrupt_state(void *priv, u8 chan_type, bool ready) -{ - struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; - struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); - - if (ready) - set_bit(NBL_CHAN_INTERRUPT_READY, chan_info->state); - else - clear_bit(NBL_CHAN_INTERRUPT_READY, chan_info->state); - - return 0; -} - static int nbl_chan_dump_txq(void *priv, struct seq_file *m, u8 type) { struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; @@ -1296,6 +1362,7 @@ static int nbl_chan_setup_keepalive(void *priv, u16 dest_id, u8 chan_type) nbl_chan_keepalive_resp, chan_mgt); nbl_common_alloc_delayed_task(&keepalive->keepalive_task, nbl_chan_keepalive); + keepalive->task_setuped = true; nbl_common_queue_delayed_work_keepalive(&keepalive->keepalive_task, jiffies_to_msecs(keepalive->timeout)); @@ -1308,7 +1375,11 @@ static void nbl_chan_remove_keepalive(void *priv, u8 chan_type) struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + if (!chan_info->keepalive.task_setuped) + return; + nbl_common_release_delayed_task(&chan_info->keepalive.keepalive_task); + chan_info->keepalive.task_setuped = false; } static void nbl_chan_register_chan_task(void *priv, u8 chan_type, struct work_struct *task) @@ -1319,15 +1390,26 @@ static void nbl_chan_register_chan_task(void *priv, u8 chan_type, struct work_st chan_info->clean_task = task; } +static void nbl_chan_set_queue_state(void *priv, enum nbl_chan_state state, u8 chan_type, u8 set) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + + if (set) + set_bit(state, chan_info->state); + else + clear_bit(state, chan_info->state); +} + static struct nbl_channel_ops chan_ops = { .send_msg = nbl_chan_send_msg, .send_ack = nbl_chan_send_ack, .register_msg = nbl_chan_register_msg, + .unregister_msg = nbl_chan_unregister_msg, .cfg_chan_qinfo_map_table = nbl_chan_cfg_qinfo_map_table, .check_queue_exist = nbl_chan_check_queue_exist, .setup_queue = nbl_chan_setup_queue, .teardown_queue = nbl_chan_teardown_queue, - .set_queue_interrupt_state = nbl_chan_set_queue_interrupt_state, .clean_queue_subtask = nbl_chan_clean_queue_subtask, /* for mailbox register msg for userdev */ @@ -1338,9 +1420,14 @@ static struct nbl_channel_ops chan_ops = { .dump_rxq = nbl_chan_dump_rxq, .get_adminq_tx_buf_size = nbl_chan_get_adminq_tx_buf_size, + .init_cmdq = nbl_chan_cmdq_mgt_start, + .deinit_cmdq = nbl_chan_cmdq_mgt_stop, + .send_cmd = nbl_chan_send_cmdq, + .setup_keepalive = nbl_chan_setup_keepalive, .remove_keepalive = nbl_chan_remove_keepalive, .register_chan_task = nbl_chan_register_chan_task, + .set_queue_state = nbl_chan_set_queue_state, }; static int nbl_chan_setup_chan_mgt(struct nbl_adapter *adapter, @@ -1407,6 +1494,11 @@ static void nbl_chan_remove_chan_mgt(struct nbl_common_info *common, devm_kfree(dev, NBL_CHAN_MGT_TO_ADMINQ(&(*chan_mgt_leonis)->chan_mgt)); devm_kfree(dev, NBL_CHAN_MGT_TO_MAILBOX(&(*chan_mgt_leonis)->chan_mgt)); + /* check and remove command queue */ + if ((*chan_mgt_leonis)->chan_mgt.cmdq_mgt) + nbl_chan_cmdq_mgt_stop(dev, &(*chan_mgt_leonis)->chan_mgt, + common->tc_inst_id); + devm_kfree(dev, *chan_mgt_leonis); *chan_mgt_leonis = NULL; } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.h index 237f99229836a4bef346a7b52e741fb893096448..f12cba28efc400c8501390ccfd9b65b9c9855f3c 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.h @@ -25,9 +25,16 @@ #define NBL_CHAN_TX_RING_TO_BUF(tx_ring, i) (&(((tx_ring)->buf)[i])) #define NBL_CHAN_RX_RING_TO_BUF(rx_ring, i) (&(((rx_ring)->buf)[i])) +#define NBL_CHAN_GET_INFO(chan_mgt, id) \ +({ \ + typeof(chan_mgt) _chan_mgt = (chan_mgt); \ + ((id) == NBL_CHAN_ADMINQ_FUNCTION_ID && NBL_CHAN_MGT_TO_ADMINQ(_chan_mgt) ? \ + NBL_CHAN_MGT_TO_ADMINQ(_chan_mgt) : NBL_CHAN_MGT_TO_MAILBOX(_chan_mgt)); \ +}) + #define NBL_CHAN_TX_WAIT_US 100 #define NBL_CHAN_TX_REKICK_WAIT_TIMES 2000 -#define NBL_CHAN_TX_WAIT_TIMES 10000 +#define NBL_CHAN_TX_WAIT_TIMES 30000 #define NBL_CHAN_TX_WAIT_ACK_US_MIN 100 #define NBL_CHAN_TX_WAIT_ACK_US_MAX 120 @@ -37,7 +44,7 @@ #define NBL_CHAN_BUF_LEN 4096 #define NBL_CHAN_TX_DESC_EMBEDDED_DATA_LEN 16 -#define NBL_CHAN_RESEND_MAX_TIMES (5) +#define NBL_CHAN_RESEND_MAX_TIMES (3) #define NBL_CHAN_TX_DESC_AVAIL BIT(0) #define NBL_CHAN_TX_DESC_USED BIT(1) @@ -45,7 +52,7 @@ #define NBL_CHAN_RX_DESC_AVAIL BIT(3) #define NBL_CHAN_RX_DESC_USED BIT(4) -#define NBL_CHAN_ACK_WAIT_TIME (5 * HZ) +#define NBL_CHAN_ACK_WAIT_TIME (3 * HZ) /* adminq */ #define NBL_ADMINQ_QUEUE_LEN 256 @@ -59,10 +66,19 @@ enum { }; enum { - NBL_MBX_STATUS_WAITING = 0, + NBL_MBX_STATUS_IDLE = 0, + NBL_MBX_STATUS_WAITING, NBL_MBX_STATUS_TIMEOUT = -1, }; +struct nbl_chan_tx_param { + enum nbl_chan_msg_type msg_type; + void *arg; + size_t arg_len; + u16 dstid; + u16 msgid; +}; + struct nbl_chan_buf { void *va; dma_addr_t pa; @@ -100,6 +116,17 @@ struct nbl_chan_ring { dma_addr_t dma; }; +#define NBL_CHAN_MSG_INDEX_MAX 64 +#define NBL_CHAN_MSG_LOC_MAX 1024 + +union nbl_chan_msg_id { + struct nbl_chan_msg_id_info { + u16 index:6; + u16 loc:10; + } info; + u16 id; +}; + struct nbl_chan_waitqueue_head { struct wait_queue_head wait_queue; char *ack_data; @@ -109,6 +136,7 @@ struct nbl_chan_waitqueue_head { u16 need_waked; u16 msg_type; u8 status; + u8 msg_index; }; struct nbl_chan_notify_userdev { @@ -130,6 +158,8 @@ struct nbl_chan_keepalive_info { u16 keepalive_dest; u8 success_cnt; u8 fail_cnt; + bool task_setuped; + u8 resv[3]; }; struct nbl_chan_info { @@ -142,6 +172,8 @@ struct nbl_chan_info { struct work_struct *clean_task; struct nbl_chan_keepalive_info keepalive; + u16 wait_head_index; + u16 num_txq_entries; u16 num_rxq_entries; u16 txq_buf_size; @@ -164,6 +196,7 @@ struct nbl_channel_mgt { struct nbl_common_info *common; struct nbl_phy_ops_tbl *phy_ops_tbl; struct nbl_chan_info *chan_info[NBL_CHAN_TYPE_MAX]; + struct nbl_cmdq_mgt *cmdq_mgt; struct nbl_chan_notify_userdev *notify; void *handle_hash_tbl; }; diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.c new file mode 100644 index 0000000000000000000000000000000000000000..bda5b059b620a03751e3b0c807a201f428d10525 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.c @@ -0,0 +1,787 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include +#include "nbl_cmdq.h" + +static u8 g_seq_index; +spinlock_t nbl_tc_flow_inst_lock; /* used to protect global instance resources */ + +static inline void *nbl_cmdq_alloc_dma_mem(struct device *dma_dev, + struct nbl_cmdq_dma_mem *mem, + u32 size) { + mem->size = size; + return dma_alloc_coherent(dma_dev, size, &mem->pa, GFP_KERNEL | __GFP_ZERO); +} + +static inline void nbl_cmdq_free_dma_mem(struct device *dma_dev, + struct nbl_cmdq_dma_mem *mem) { + dma_free_coherent(dma_dev, mem->size, mem->va, mem->pa); + mem->size = 0; + mem->va = NULL; + mem->pa = (dma_addr_t)0; +} + +static inline void +nbl_cmdq_free_queue_ring(struct device *dma_dev, struct nbl_cmd_ring *ring) +{ + nbl_cmdq_free_dma_mem(dma_dev, &ring->desc); +} + +/** + * @brief: free the buffer for the send ring + * @cmd_queue: pointer to the command queue + */ +static enum nbl_cmd_status +nbl_cmdq_alloc_queue_bufs(const struct nbl_cmd_queue *queue, + struct nbl_cmd_ring *ring) +{ + int i; + struct nbl_cmdq_dma_mem *bi; + struct nbl_channel_mgt *chan_mgt = queue->chan_mgt; + struct device *dma_dev = chan_mgt->common->dma_dev; + + /* No mapped memory needed yet, just the buffer info structures */ + ring->in_buffer_dma_head = kcalloc(queue->cmd_ring_depth, sizeof(struct nbl_cmdq_dma_mem), + GFP_ATOMIC); + if (!ring->in_buffer_dma_head) + return -ENOMEM; + + ring->in_buffer_info = (struct nbl_cmdq_dma_mem *)ring->in_buffer_dma_head; + + /* allocate the mapped in buffers */ + ring->in_mem.va = nbl_cmdq_alloc_dma_mem(dma_dev, &ring->in_mem, + queue->sq_buf_size * queue->cmd_ring_depth); + if (!ring->in_mem.va) + goto dealloc_cmd_queue_in_bufs; + + for (i = 0; i < queue->cmd_ring_depth; i++) { + bi = &ring->in_buffer_info[i]; + bi->va = (char *)ring->in_mem.va + i * queue->sq_buf_size; + bi->pa = ring->in_mem.pa + i * queue->sq_buf_size; + bi->size = queue->sq_buf_size; + } + + /* alloc dma_mem array for out buffers */ + ring->out_buffer_dma_head = kcalloc(queue->cmd_ring_depth, sizeof(struct nbl_cmdq_dma_mem), + GFP_ATOMIC); + if (!ring->out_buffer_dma_head) + return -ENOMEM; + + ring->out_buffer_info = (struct nbl_cmdq_dma_mem *)ring->out_buffer_dma_head; + + /* allocate the mapped out buffers */ + ring->out_mem.va = nbl_cmdq_alloc_dma_mem(dma_dev, &ring->out_mem, + queue->sq_buf_size * queue->cmd_ring_depth); + if (!ring->out_mem.va) + goto dealloc_cmd_queue_out_bufs; + + for (i = 0; i < queue->cmd_ring_depth; i++) { + bi = &ring->out_buffer_info[i]; + bi->va = (char *)ring->out_mem.va + i * queue->sq_buf_size; + bi->pa = ring->out_mem.pa + i * queue->sq_buf_size; + bi->size = queue->sq_buf_size; + } + + return NBL_CMDQ_SUCCESS; + +dealloc_cmd_queue_out_bufs: + ring->out_buffer_info = NULL; + kfree(ring->out_buffer_dma_head); + ring->out_buffer_dma_head = NULL; + i = queue->cmd_ring_depth; + + nbl_cmdq_free_dma_mem(dma_dev, &ring->in_mem); + for (i = 0; i < queue->cmd_ring_depth; i++) { + bi = &ring->in_buffer_info[i]; + bi->va = NULL; + bi->pa = 0; + bi->size = 0; + } + +dealloc_cmd_queue_in_bufs: + ring->in_buffer_info = NULL; + kfree(ring->in_buffer_dma_head); + ring->in_buffer_dma_head = NULL; + return -ENOMEM; +} + +/** + * @brief: allocate buffers for the send ring + * @cmd_queue: pointer to the command queue + */ +static enum nbl_cmd_status +nbl_cmdq_alloc_queue_ring(const struct nbl_cmd_queue *queue, + struct nbl_cmd_ring *ring) +{ + u32 size = queue->cmd_ring_depth * sizeof(struct nbl_cmd_desc); + struct nbl_channel_mgt *chan_mgt = queue->chan_mgt; + struct device *dma_dev = chan_mgt->common->dma_dev; + + ring->desc.va = nbl_cmdq_alloc_dma_mem(dma_dev, &ring->desc, size); + if (!ring->desc.va) + return -ENOMEM; + + return NBL_CMDQ_SUCCESS; +} + +/** + * @brief: free the buffer for the send ring + * @cmd_queue: pointer to the command queue + */ +static void +nbl_cmdq_free_queue_bufs(struct device *dma_dev, struct nbl_cmd_ring *ring) +{ + /* free in buffers */ + if (ring->in_mem.va) + nbl_cmdq_free_dma_mem(dma_dev, &ring->in_mem); + + /* free out buffers */ + if (ring->out_mem.va) + nbl_cmdq_free_dma_mem(dma_dev, &ring->out_mem); + + /* free in and out DMA rings */ + kfree(ring->in_buffer_dma_head); + kfree(ring->out_buffer_dma_head); +} + +/** + * @brief: init the send ring of command queue + * @hw: input, pointer to the hardware related properties + * @nbl_cmd_queue: pointer to the command queue + */ +static enum nbl_cmd_status +nbl_cmdq_init_sq_ring(struct nbl_cmd_queue *queue) +{ + enum nbl_cmd_status status; + struct nbl_cmd_ring *ring = &queue->sq_ring; + struct nbl_channel_mgt *chan_mgt = queue->chan_mgt; + struct device *dma_dev = chan_mgt->common->dma_dev; + + /* check if the queue is already initialized */ + if (ring->count > 0) { + status = NBL_CMDQ_NOT_READY; + goto init_cmd_queue_exit; + } + + status = nbl_cmdq_alloc_queue_ring(queue, ring); + if (status) + goto init_cmd_queue_exit; + + status = nbl_cmdq_alloc_queue_bufs(queue, ring); + if (status) + goto init_cmd_queue_free_rings; + + ring->next_to_use = 0; + ring->next_to_clean = 0; + ring->doorbell = 0; + + /* on success */ + ring->count = queue->cmd_ring_depth; + goto init_cmd_queue_exit; + +init_cmd_queue_free_rings: + nbl_cmdq_free_queue_bufs(dma_dev, ring); + nbl_cmdq_free_queue_ring(dma_dev, ring); + +init_cmd_queue_exit: + return status; +} + +static void +nbl_cmdq_init_queue_parameters(struct nbl_cmd_queue *cmd_queue) +{ + cmd_queue->sq_buf_size = NBL_CMDQ_BUF_SIZE; + cmd_queue->cmd_ring_depth = NBL_CMDQ_RING_DEPTH; + cmd_queue->sq_ring.count = 0; +} + +/** + * @brief: shutdown the queue, will free the ring + * @hw: input, pointer to the hardware related properties + */ +static enum nbl_cmd_status +nbl_cmdq_shutdown_queue(struct nbl_cmd_queue *queue, + struct nbl_cmd_ring *ring) +{ + struct nbl_channel_mgt *chan_mgt = queue->chan_mgt; + struct device *dma_dev = chan_mgt->common->dma_dev; + + /* reset cmd queue related registers */ + spin_lock(&queue->sq_lock); + ring->count = 0; + + /* free cmd queue ring */ + nbl_cmdq_free_queue_bufs(dma_dev, ring); + nbl_cmdq_free_queue_ring(dma_dev, ring); + + spin_unlock(&queue->sq_lock); + return NBL_CMDQ_SUCCESS; +} + +static inline enum nbl_cmd_status +nbl_cmdq_check_queue(const struct nbl_cmd_ring *ring, const struct nbl_common_info *common) +{ + if (!ring->count) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq not initialized yet."); + return NBL_CMDQ_CQ_NOT_READY; + } + + return NBL_CMDQ_SUCCESS; +} + +static enum nbl_cmd_status +nbl_cmdq_destroy_queue(struct nbl_cmd_queue *queue) +{ + enum nbl_cmd_status status = NBL_CMDQ_SUCCESS; + struct nbl_cmd_ring *ring = &queue->sq_ring; + struct nbl_common_info *common = queue->chan_mgt->common; + + /* check queue status, abort destroy if queue not ready */ + status = nbl_cmdq_check_queue(ring, common); + if (status == NBL_CMDQ_CQ_NOT_READY) + return status; + + /* shutdown queue */ + return nbl_cmdq_shutdown_queue(queue, ring); +} + +static inline bool +nbl_cmdq_flag_check_cmd_done(const struct nbl_cmd_desc *desc) { + return (desc->flags & NBL_CMDQ_DESC_FLAG_DONE); +} + +/** + * @brief: free command queue ring and return free count + * @cmd_queue: input, pointer to the hardware related properties + * @return: number of free desc in the queue + */ +static enum nbl_cmd_status +nbl_cmdq_clean_sq_ring(struct nbl_cmd_queue *cmd_queue) +{ + struct nbl_cmd_ring *ring = &cmd_queue->sq_ring; + u16 ntc = ring->next_to_clean; + struct nbl_cmd_desc *desc = NBL_CMDQ_GET_DESC(*ring, ntc); + + while (1) { + if (nbl_cmdq_flag_check_cmd_done(desc)) + memset(desc, 0, sizeof(*desc)); + else + break; + + ntc++; + if (ntc == ring->count) + ntc = 0; + + /* next descriptor */ + desc = NBL_CMDQ_GET_DESC(*ring, ntc); + } + + desc = NULL; + ring->next_to_clean = ntc; + return (ring->next_to_clean > ring->next_to_use ? 0 : ring->count) + + ring->next_to_clean - ring->next_to_use - 1; +} + +/** + * @brief: check the command queue to see if command processed + * @desc: input, pointer to the hardware related properties + * @desc: use this descriptor to check the DD bit + */ +static inline bool +nbl_cmdq_flag_check_dd(const struct nbl_cmd_desc *desc) +{ + return (desc->flags & NBL_CMDQ_DESC_FLAG_DD); +} + +static inline bool +nbl_cmdq_flag_check_out_buffer(const struct nbl_cmd_desc *desc) +{ + return (desc->flags & NBL_CMDQ_DESC_FLAG_BUF_OUT); +} + +static inline bool +nbl_cmdq_flag_check_error(const struct nbl_cmd_desc *desc) +{ + return (desc->flags & NBL_CMDQ_DESC_FLAG_ERR); +} + +static inline bool +nbl_cmdq_flag_check_hit(const struct nbl_cmd_desc *desc) +{ + return (desc->flags & NBL_CMDQ_DESC_FLAG_HIT); +} + +static inline void +nbl_cmdq_flag_mark_cmd_done(struct nbl_cmd_desc *desc) { + desc->flags |= NBL_CMDQ_DESC_FLAG_DONE; +} + +static inline bool +nbl_cmdq_flag_check_interface_error(struct nbl_cmd_desc *desc) { + return (desc->flags & NBL_CMDQ_DESC_FLAG_IF_ERR); +} + +static enum nbl_cmd_status +nbl_cmdq_execution_nolock(struct nbl_cmd_queue *queue, + struct nbl_cmd_ring *ring, + const struct nbl_cmd_hdr *hdr, + struct nbl_cmd_desc *desc, + const struct nbl_cmd_content *cmd) +{ + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(queue->chan_mgt); + struct nbl_common_info *common = queue->chan_mgt->common; + + /* clean the cmd send queue to reclaim descriptors */ + if (nbl_cmdq_clean_sq_ring(queue) == 0) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmd send queue full!"); + return NBL_CMDQ_CQ_FULL; + } + + /* fill descriptor */ + desc->block = cpu_to_le16(hdr->block); + desc->module = cpu_to_le16(hdr->module); + desc->table = cpu_to_le16(hdr->table); + desc->opcode = cpu_to_le16(hdr->opcode); + desc->param_high = cpu_to_le32(NBL_CMDQ_HI_DWORD(cmd->in_params)); + desc->param_low = cpu_to_le32(NBL_CMDQ_LO_DWORD(cmd->in_params)); + desc->flags = 0; + desc->seq = g_seq_index++; + if (g_seq_index == 16) + g_seq_index = 0; + + /* data to send */ + if (cmd->in_va && cmd->in) { + desc->datalen = cmd->in_length + NBL_CMDQ_HALF_DESC_LENGTH; + desc->flags |= cpu_to_le16(NBL_CMDQ_DESC_FLAG_BUF_IN); + desc->send_high = cpu_to_le32(NBL_CMDQ_HI_DWORD(cmd->in)); + desc->send_low = cpu_to_le32(NBL_CMDQ_LO_DWORD(cmd->in)); + } + + /* data to receive */ + if (cmd->out_va && cmd->out) { + desc->flags |= cpu_to_le16(NBL_CMDQ_DESC_FLAG_BUF_OUT); + desc->recv_high = cpu_to_le32(NBL_CMDQ_HI_DWORD(cmd->out)); + desc->recv_low = cpu_to_le32(NBL_CMDQ_LO_DWORD(cmd->out)); + } + + /* update next_to_use */ + (ring->next_to_use)++; + (ring->doorbell)++; + if (ring->next_to_use == ring->count) + ring->next_to_use = 0; + /* wmb */ + + wmb(); + phy_ops->update_cmdq_tail(NBL_CHAN_MGT_TO_PHY_PRIV(queue->chan_mgt), + (ring->doorbell) & NBL_CMDQ_DOORBELL_MASK); + return NBL_CMDQ_SUCCESS; +} + +static inline enum nbl_cmd_status +nbl_cmdq_check_content(const struct nbl_cmd_queue *queue, + const struct nbl_cmd_hdr *hdr, + const struct nbl_cmd_content *cmd) +{ + enum nbl_cmd_status status = NBL_CMDQ_SUCCESS; + + if ((cmd->in_va && !cmd->in_length) || + (!cmd->in_va && cmd->in_length) || + (cmd->in_va && cmd->in_length > queue->sq_buf_size)) { + status = NBL_CMDQ_CQ_ERR_PARAMS; + } + + /* check parameters: the receiving part */ + if ((hdr->opcode == NBL_CMD_OP_READ || + hdr->opcode == NBL_CMD_OP_SEARCH) && !cmd->out_va) + status = NBL_CMDQ_CQ_ERR_PARAMS; + + return status; +} + +static inline enum nbl_cmd_status +nbl_cmdq_check_interface_error(struct nbl_cmd_desc *desc, + struct nbl_common_info *common) +{ + u8 interface_err = 0; + enum nbl_cmd_status status = NBL_CMDQ_SUCCESS; + + /* flag error bit: error in firmware cmdq interface */ + if (nbl_cmdq_flag_check_interface_error(desc)) { + /* mark current desc as done by driver */ + nbl_cmdq_flag_mark_cmd_done(desc); + + status = NBL_CMDQ_FAILED; + interface_err = (desc->flags >> NBL_CMDQ_DESC_FLAG_IF_ERR_OFT) & + NBL_CMDQ_DESC_FLAG_IF_ERR_MASK; + switch (interface_err) { + case 0b00: + /* dma error, re-send command */ + /* abort if failed sending command 3 times in a row */ + status = NBL_CMDQ_NEED_RESEND; + break; + case 0b01: + /* driver data error, dont re-send */ + status = NBL_CMDQ_NOBUF_ERR; + break; + case 0b10: + case 0b11: + /* firmware sequence error, reset cmdq */ + status = NBL_CMDQ_NEED_RESET; + break; + default: + /* unknown error */ + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow cmdq unknown error from firmware interface"); + break; + } + } + + return status; +} + +static enum nbl_cmd_status +nbl_cmdq_fetch_response(struct nbl_cmd_queue *queue, struct nbl_cmd_desc *desc, + struct nbl_cmd_content *cmd, struct nbl_cmdq_dma_mem *buffer) +{ + u8 error_code; + const char *buf_start; + enum nbl_cmd_status status = NBL_CMDQ_SUCCESS; + struct nbl_common_info *common = queue->chan_mgt->common; + + /* check descriptor flag error bit for firmware business */ + if (nbl_cmdq_flag_check_error(desc)) { + status = NBL_CMDQ_FAILED; + error_code = desc->errorcode; + if (error_code) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq error code: %d", + error_code); + } else { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow cmdq desc error in flag but no errorcode"); + } + + goto fetch_response_end; + } + + /* check return buffer flag bit */ + if (cmd->out_va && cmd->out && !nbl_cmdq_flag_check_out_buffer(desc)) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq response buffer bit not matched"); + status = NBL_CMDQ_NOBUF_ERR; + goto fetch_response_end; + } + + /* process out buffer */ + if (cmd->out_va && cmd->out && buffer) { + cmd->out_length = le16_to_cpu(desc->datalen) - NBL_CMDQ_HALF_DESC_LENGTH; + if (cmd->out_length > queue->sq_buf_size) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow cmdq buffer larger than allowed.\n"); + status = NBL_CMDQ_CQ_ERR_BUFFER; + goto fetch_response_end; + } + + if ((desc->opcode == NBL_CMD_OP_READ || + desc->opcode == NBL_CMD_OP_SEARCH) && cmd->out_va) { + buf_start = (char *)buffer->va + NBL_CMDQ_HALF_DESC_LENGTH; + memcpy(cmd->out_va, buf_start, cmd->out_length); + } + } + +fetch_response_end: + queue->sq_last_status = status; + return status; +} + +/** + * @brief: send command to firmware, the sync version, will block and wait + * for response. + * @hw: input, pointer to the hardware related properties + * @hdr: command header, including register block, module, table and opcode + * @cmd: command content, including input and output + */ +static enum nbl_cmd_status +nbl_cmdq_do_send(void *priv, const struct nbl_cmd_hdr *hdr, + struct nbl_cmd_content *cmd) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_cmdq_mgt *cmdq_mgt = chan_mgt->cmdq_mgt; + bool hit = false; + bool completed = false; + u32 desc_index = 0; + u32 total_delay = 0; + enum nbl_cmd_status status = NBL_CMDQ_SUCCESS; + struct nbl_cmd_queue *queue = &cmdq_mgt->cmd_queue; + struct nbl_cmd_ring *ring = &queue->sq_ring; + struct nbl_cmd_desc *desc = NULL; + struct nbl_cmdq_dma_mem *in_buffer = NULL; + struct nbl_cmdq_dma_mem *out_buffer = NULL; + struct nbl_common_info *common = queue->chan_mgt->common; + + /* check cmd queue status */ + status = nbl_cmdq_check_queue(ring, common); + if (status) + goto cmd_send_end; + + /* check parameters: the sending part */ + status = nbl_cmdq_check_content(queue, hdr, cmd); + if (status) + goto cmd_send_end; + + /* lock the ring, assign buffer and send command */ + spin_lock(&queue->sq_lock); + + desc_index = ring->next_to_use; + /* assign pre-allocated dma for buffers */ + if (cmd->in_va) { + in_buffer = &ring->in_buffer_info[desc_index]; + memcpy(in_buffer->va, cmd->in_va, cmd->in_length); + cmd->in = in_buffer->pa; + } + + if (cmd->out_va) { + out_buffer = &ring->out_buffer_info[desc_index]; + cmd->out = out_buffer->pa; + } + + desc = NBL_CMDQ_GET_DESC(*ring, desc_index); + status = nbl_cmdq_execution_nolock(queue, ring, hdr, desc, cmd); + + /* check if queue is full */ + if (status == NBL_CMDQ_CQ_FULL) { + spin_unlock(&queue->sq_lock); + goto cmd_send_end; + } + + do { + if (nbl_cmdq_flag_check_dd(desc)) { + completed = true; + break; + } + + total_delay++; + udelay(NBL_CMDQ_SQ_WAIT_USEC); + } while (total_delay < queue->sq_timeout); + + hit = nbl_cmdq_flag_check_hit(desc); + /* check interface error, while holding the lock */ + spin_unlock(&queue->sq_lock); + prefetch(desc); + if (completed && hit) { + status = nbl_cmdq_check_interface_error(desc, common); + if (status) + goto cmd_send_end; + } + + if (completed && hit) { + /* if ready, return output */ + status = nbl_cmdq_fetch_response(queue, desc, cmd, out_buffer); + } else if (!completed) { + /* timeout error */ + status = NBL_CMDQ_TIMEOUT_ERR; + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq firmware timeout!\n"); + } else { + status = NBL_CMDQ_NOHIT_ERR; + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq param error, block:%d module:%d " + "table:%d.\n", desc->block, desc->module, desc->table); + } + + /* mark desc as done by driver */ + nbl_cmdq_flag_mark_cmd_done(desc); + +cmd_send_end: + desc = NULL; + ring = NULL; + queue = NULL; + return status; +} + +static enum nbl_cmd_status +nbl_cmdq_send(void *priv, const void *vhdr, void *vcmd) +{ + enum nbl_cmd_status status; + const struct nbl_cmd_hdr *hdr = (const struct nbl_cmd_hdr *)vhdr; + struct nbl_cmd_content *cmd = (struct nbl_cmd_content *)vcmd; + + /* command execution */ + status = nbl_cmdq_do_send(priv, hdr, cmd); + return status; +} + +static enum nbl_cmd_status +nbl_cmdq_init_ring(struct nbl_cmdq_mgt *cmdq_mgt) +{ + enum nbl_cmd_status ret_code; + struct nbl_cmd_queue *cmd_queue = &cmdq_mgt->cmd_queue; + + /* set send queue write back timeout */ + cmd_queue->sq_timeout = NBL_CMDQ_TIMEOUT; + ret_code = nbl_cmdq_init_sq_ring(cmd_queue); + return ret_code; +} + +/** + * @brief: create the command queue + * @hw: input, pointer to the hardware related properties + */ +static enum nbl_cmd_status +nbl_cmdq_init_queue(struct nbl_cmdq_mgt *cmdq_mgt) +{ + nbl_cmdq_init_queue_parameters(&cmdq_mgt->cmd_queue); + + /* init queue lock */ + spin_lock_init(&cmdq_mgt->cmd_queue.sq_lock); + nbl_cmdq_init_ring(cmdq_mgt); + return 0; +} + +static int nbl_cmdq_init(void *priv, void *param) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + return phy_ops->init_cmdq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), param, 0); +} + +static int nbl_cmdq_destroy(void *priv) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + phy_ops->destroy_cmdq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + return 0; +} + +static int nbl_cmdq_reset(void *priv) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + phy_ops->reset_cmdq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + return 0; +} + +static void nbl_cmdq_get_param(void *priv, void *cmdq_param) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_cmdq_mgt *cmdq_mgt = chan_mgt->cmdq_mgt; + struct nbl_chan_cmdq_init_info *param = + (struct nbl_chan_cmdq_init_info *)cmdq_param; + + param->pa = cmdq_mgt->cmd_queue.sq_ring.desc.pa; + param->len = NBL_CMDQ_RING_DEPTH; +} + +int nbl_chan_send_cmdq(void *priv, const void *hdr, void *cmd) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + int ret; + + if (!chan_mgt->cmdq_mgt) { + return NBL_CMDQ_NOT_READY; + } + + ret = nbl_cmdq_send(priv, hdr, cmd); + if (ret == (int)NBL_CMDQ_NEED_RESET) + ret = nbl_cmdq_reset(priv); + else if (ret == (int)NBL_CMDQ_NEED_RESEND) + nbl_cmdq_send(priv, hdr, cmd); + + return ret; +} + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_cmdq_setup_mgt(struct device *dev, struct nbl_cmdq_mgt **cmdq_mgt) +{ + *cmdq_mgt = devm_kzalloc(dev, sizeof(**cmdq_mgt), GFP_ATOMIC); + if (!*cmdq_mgt) + return -ENOMEM; + + return 0; +} + +static void nbl_cmdq_remove_mgt(struct device *dev, struct nbl_cmdq_mgt **cmdq_mgt) +{ + devm_kfree(dev, *cmdq_mgt); + *cmdq_mgt = NULL; +} + +int nbl_chan_cmdq_mgt_start(struct device *dev, void *priv) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_common_info *common = chan_mgt->common; + struct nbl_cmdq_mgt **cmdq_mgt = &chan_mgt->cmdq_mgt; + struct nbl_chan_cmdq_init_info cmdq_param = {0}; + u8 idx = 0; + int ret = 0; + + /* if cmdq not ready, setup command queue */ + if (!(*cmdq_mgt)) { + idx = nbl_tc_alloc_inst_id(); + if (idx >= NBL_TC_FLOW_INST_COUNT) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq start failed, max tc flow instances reached!"); + return -EPERM; + } + + common->tc_inst_id = idx; + + /* alloc memory for cmdq management */ + ret = nbl_cmdq_setup_mgt(dev, cmdq_mgt); + if (ret) { + nbl_tc_unset_cmdq_info(common->tc_inst_id); + common->tc_inst_id = NBL_TC_FLOW_INST_COUNT; + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow cmdq start failed due to failed memory allocation"); + return ret; + } + + nbl_tc_set_cmdq_info(&nbl_chan_send_cmdq, (void *)chan_mgt, idx); + (*cmdq_mgt)->cmd_queue.chan_mgt = chan_mgt; + ret = nbl_cmdq_init_queue(*cmdq_mgt); + + cmdq_param.vsi_id = common->vsi_id; + cmdq_param.bdf_num = (u16)(common->hw_bus << 8 | common->devid << 3 | + NBL_COMMON_TO_PCI_FUNC_ID(common)); + nbl_cmdq_get_param(chan_mgt, &cmdq_param); + nbl_cmdq_init(chan_mgt, &cmdq_param); + nbl_info(common, NBL_DEBUG_FLOW, "tc flow cmdq inited\n"); + } + + (*cmdq_mgt)->cmdq_refcount++; + nbl_info(common, NBL_DEBUG_FLOW, + "tc flow cmdq ref count: %d\n", (*cmdq_mgt)->cmdq_refcount); + return (int)common->tc_inst_id; +} + +int nbl_chan_cmdq_mgt_stop(struct device *dev, void *priv, u8 inst_id) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_cmdq_mgt **cmdq_mgt = &chan_mgt->cmdq_mgt; + struct nbl_common_info *common = chan_mgt->common; + + if (inst_id >= NBL_TC_FLOW_INST_COUNT) + return 0; + + if (!(*cmdq_mgt)) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq not inited but try to deinit"); + return 0; + } else if ((*cmdq_mgt)->cmdq_refcount == 1) { + /* wait for inflight cmd to finish */ + mdelay(NBL_CMDQ_FLIGHT_DELAY); + nbl_cmdq_destroy(priv); + nbl_cmdq_destroy_queue(&(*cmdq_mgt)->cmd_queue); + nbl_cmdq_remove_mgt(dev, cmdq_mgt); + nbl_tc_unset_cmdq_info(inst_id); + common->tc_inst_id = NBL_TC_FLOW_INST_COUNT; + nbl_info(common, NBL_DEBUG_FLOW, "tc flow cmdq deinited\n"); + } else { + (*cmdq_mgt)->cmdq_refcount--; + nbl_info(common, NBL_DEBUG_FLOW, + "tc flow cmdq ref count: %d\n", (*cmdq_mgt)->cmdq_refcount); + } + + return 0; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.h new file mode 100644 index 0000000000000000000000000000000000000000..742815d3403226d0c771155905c85f31de89b354 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +/* Nebula-matrix DPDK user-network + * Copyright(c) 2021-2030 nBL, Inc. + */ +#ifndef _NBL_CMDQ_H +#define _NBL_CMDQ_H + +#include "nbl_channel.h" +#include "nbl_core.h" + +#define NBL_CMDQ_HI_DWORD(x) ((u32)(((x) >> 32) & 0xFFFFFFFF)) +#define NBL_CMDQ_LO_DWORD(x) ((u32)(x) & 0xFFFFFFFF) + +#define NBL_CMDQ_TIMEOUT 100000 +#define NBL_CMDQ_FLIGHT_DELAY 500 +#define NBL_CMDQ_HALF_DESC_LENGTH 16 + +/* command resend and reset */ +#define NBL_CMDQ_RESEND_MAX_TIMES 3 +#define NBL_CMDQ_RESET_MAX_WAIT 5 + +/* initial value of descriptor */ +#define NBL_CMDQ_DESC_FLAG_DD BIT(0) +#define NBL_CMDQ_DESC_FLAG_ERR BIT(1) +#define NBL_CMDQ_DESC_FLAG_BUF_IN BIT(2) +#define NBL_CMDQ_DESC_FLAG_BUF_OUT BIT(3) +#define NBL_CMDQ_DESC_FLAG_SI BIT(4) +#define NBL_CMDQ_DESC_FLAG_EI BIT(5) +#define NBL_CMDQ_DESC_FLAG_IF_ERR BIT(6) +#define NBL_CMDQ_DESC_FLAG_HIT BIT(7) +#define NBL_CMDQ_DESC_FLAG_IF_ERR_OFT 8 +#define NBL_CMDQ_DESC_FLAG_IF_ERR_MASK (0b11) +#define NBL_CMDQ_DESC_FLAG_DONE BIT(15) + +#define NBL_CMDQ_SQ_WAIT_USEC 1 +#define NBL_CMDQ_BUF_SIZE 256 +#define NBL_CMDQ_RING_DEPTH 4096 /* max: 2^16 */ +#define NBL_CMDQ_RQ_RING_DEPTH 4096 /* max: 2^15 */ +#define NBL_CMDQ_DOORBELL_MASK 0x1FFFF + +struct nbl_cmdq_dma_mem { + void *va; + dma_addr_t pa; + u32 size; +}; + +/** + * @brief: command ring, with pointers to ring/buffer memory + * @dma_head: + * @buffer: + * @cmd_buf: + */ +struct nbl_cmd_ring { + struct nbl_cmdq_dma_mem desc; /* descriptor ring memory */ + struct nbl_cmdq_dma_mem in_mem; + struct nbl_cmdq_dma_mem out_mem; + struct nbl_cmdq_dma_mem *in_buffer_info; /* buffer detail information */ + struct nbl_cmdq_dma_mem *out_buffer_info; /* buffer detail information */ + void *in_buffer_dma_head; /* buffer dma head */ + void *out_buffer_dma_head; /* buffer dma head */ + + u16 count; /* count of descriptors */ + u16 next_to_use; + u16 next_to_clean; + + /* only 17 bit valid for send queue, and 16 for receive queue */ + u32 doorbell; + + /* for queue tracking */ + u32 head; + u32 tail; + u32 len; + u32 cmdq_enable; + u32 cmdq_interrupt; + u32 msgq_curr_rst; + u32 msgq_interrupt; + u32 msgq_enable; + + /* ring base address */ + u32 bah; + u32 bal; +}; + +struct nbl_cmd_queue { + struct nbl_cmd_ring sq_ring; /* command send queue */ + u16 sq_buf_size; + u16 cmd_ring_depth; + spinlock_t sq_lock; /* used to lock the send queue */ + u32 sq_timeout; + enum nbl_cmd_status sq_last_status; + + struct nbl_channel_mgt *chan_mgt; +}; + +struct nbl_cmdq_mgt { + struct nbl_cmd_queue cmd_queue; + u16 cmdq_refcount; +}; + +#pragma pack(1) +/** + * struct nbl_cmd_desc - Admin queue descriptor + * @brief: admin queue descriptor, 32 Bytes + * @flags: basic properties of the descriptor + * @block: firmware divide the register tables into blocks, sections, tables + * @module: same as above + * @table: same as above + * @opcode: add, delete, flush, update etc. + * @errorcode: command error returned by the firmware + * @datalen: valid length of the buffer + * @param_high: and _low, optional parameters for the command + * @recv_high: and _low, buffer address for receiving data + * @send_high: and _low, buffer address for sending data + */ +struct nbl_cmd_desc { + u32 flags:16; + u32 block:5; + u32 module:5; + u32 table:4; + u32 rsv:2; + u32 opcode:8; + u32 errorcode:8; + u32 datalen:12; + u32 seq:4; + u32 param_low; + u32 param_high; + u32 recv_low; + u32 recv_high; + u32 send_low; + u32 send_high; +}; + +struct nbl_cmd_rq_desc { + u32 head_data; + u32 contents[7]; +}; + +struct nbl_cmd_rq_desc_age { + u32 start_offset:17; + u32 reserved0:15; + u32 bitmap0; + u32 bitmap1; + u32 bitmap2; + u32 bitmap3; + u32 reserved1; + u32 reserved2; +}; + +#pragma pack() + +#define NBL_CMDQ_GET_DESC(ring, index) \ + (&(((struct nbl_cmd_desc *)((ring).desc.va))[index])) + +#define NBL_CMDQ_GET_RQ_DESC(ring, index) \ + (&(((struct nbl_cmd_rq_desc *)((ring).desc.va))[(index) + 1])) + +int nbl_chan_cmdq_mgt_start(struct device *dev, void *priv); +int nbl_chan_cmdq_mgt_stop(struct device *dev, void *priv, u8 inst_id); +int nbl_chan_send_cmdq(void *priv, const void *hdr, void *cmd); + +# endif /* _NBL_CMDQ_H */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.c index b6aef330987adad844449d1c36d6c4f7d9cfa4b3..2dc853cdf753642a19a0f76a979e580192be6cad 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.c @@ -12,6 +12,7 @@ struct nbl_common_wq_mgt { struct workqueue_struct *net_dev_wq; struct workqueue_struct *keepalive_wq; struct workqueue_struct *rdma_wq; + struct workqueue_struct *rdma_event_wq; }; void nbl_convert_mac(u8 *mac, u8 *reverse_mac) @@ -34,9 +35,12 @@ void nbl_common_queue_work(struct work_struct *task, bool ctrl_task, bool single queue_work(wq_mgt->net_dev_wq, task); } -void nbl_common_queue_work_rdma(struct work_struct *task) +void nbl_common_queue_work_rdma(struct work_struct *task, bool singlethread) { - queue_work(wq_mgt->rdma_wq, task); + if (singlethread) + queue_work(wq_mgt->rdma_wq, task); + else + queue_work(wq_mgt->rdma_event_wq, task); } void nbl_common_queue_delayed_work(struct delayed_work *task, u32 msec, @@ -82,6 +86,7 @@ void nbl_common_flush_task(struct work_struct *task) void nbl_common_destroy_wq(void) { + destroy_workqueue(wq_mgt->rdma_event_wq); destroy_workqueue(wq_mgt->rdma_wq); destroy_workqueue(wq_mgt->keepalive_wq); destroy_workqueue(wq_mgt->net_dev_wq); @@ -116,12 +121,18 @@ int nbl_common_create_wq(void) goto alloc_net_dev_wq_failed; } - wq_mgt->rdma_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nbl_rdma_wq1"); + wq_mgt->rdma_wq = create_singlethread_workqueue("nbl_rdma_wq1"); if (!wq_mgt->rdma_wq) { pr_err("Failed to create workqueue nbl_rdma_wq1\n"); goto alloc_rdma_wq_failed; } + wq_mgt->rdma_event_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nbl_rdma_wq2"); + if (!wq_mgt->rdma_event_wq) { + pr_err("Failed to create workqueue nbl_rdma_wq2\n"); + goto alloc_rdma_event_wq_failed; + } + wq_mgt->keepalive_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 0, "nbl_keepalive_wq1"); if (!wq_mgt->keepalive_wq) { @@ -132,7 +143,9 @@ int nbl_common_create_wq(void) return 0; alloc_keepalive_wq_failed: - destroy_workqueue(wq_mgt->keepalive_wq); + destroy_workqueue(wq_mgt->rdma_event_wq); +alloc_rdma_event_wq_failed: + destroy_workqueue(wq_mgt->rdma_wq); alloc_rdma_wq_failed: destroy_workqueue(wq_mgt->net_dev_wq); alloc_net_dev_wq_failed: @@ -177,7 +190,7 @@ void *nbl_common_init_index_table(struct nbl_index_tbl_key *key) if (!index_mgt->bitmap) goto alloc_bitmap_failed; - bucket_size = key->index_size / NBL_INDEX_HASH_DIVISOR; + bucket_size = DIV_ROUND_UP(key->index_size, NBL_INDEX_HASH_DIVISOR); index_mgt->key_hash = devm_kcalloc(key->dev, bucket_size, sizeof(struct hlist_head), GFP_KERNEL); if (!index_mgt->key_hash) @@ -189,7 +202,6 @@ void *nbl_common_init_index_table(struct nbl_index_tbl_key *key) memcpy(&index_mgt->tbl_key, key, sizeof(struct nbl_index_tbl_key)); index_mgt->free_index_num = key->index_size; index_mgt->bucket_size = bucket_size; - mutex_init(&index_mgt->lock); return index_mgt; @@ -201,32 +213,65 @@ void *nbl_common_init_index_table(struct nbl_index_tbl_key *key) return NULL; } -void nbl_common_remove_index_table(void *priv) +static void nbl_common_free_index_node(struct nbl_index_mgt *index_mgt, + struct nbl_index_entry_node *idx_node) +{ + int i; + u32 free_index; + + free_index = idx_node->index - index_mgt->tbl_key.start_index; + for (i = 0; i < idx_node->index_num; i++) + clear_bit(free_index + i, index_mgt->bitmap); + index_mgt->free_index_num += idx_node->index_num; + hlist_del(&idx_node->node); + devm_kfree(index_mgt->tbl_key.dev, idx_node); +} + +void nbl_common_remove_index_table(void *priv, struct nbl_index_tbl_del_key *key) { struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; struct device *dev; - struct nbl_index_entry_key_node *key_node; + struct nbl_index_entry_node *idx_node; struct hlist_node *list_node; int i; if (!index_mgt) return; - mutex_lock(&index_mgt->lock); dev = index_mgt->tbl_key.dev; - devm_kfree(dev, index_mgt->bitmap); for (i = 0; i < index_mgt->bucket_size; i++) { - hlist_for_each_entry_safe(key_node, list_node, index_mgt->key_hash + i, node) { - hlist_del(&key_node->node); - devm_kfree(dev, key_node); + hlist_for_each_entry_safe(idx_node, list_node, index_mgt->key_hash + i, node) { + if (key && key->action_func) + key->action_func(key->action_priv, idx_node->index, idx_node->data); + nbl_common_free_index_node(index_mgt, idx_node); } } + devm_kfree(dev, index_mgt->bitmap); devm_kfree(dev, index_mgt->key_hash); - mutex_unlock(&index_mgt->lock); devm_kfree(dev, index_mgt); } +void nbl_common_scan_index_table(void *priv, struct nbl_index_tbl_scan_key *key) +{ + struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; + struct nbl_index_entry_node *idx_node; + struct hlist_node *list_node; + int i; + + if (!index_mgt) + return; + + for (i = 0; i < index_mgt->bucket_size; i++) { + hlist_for_each_entry_safe(idx_node, list_node, index_mgt->key_hash + i, node) { + if (key && key->action_func) + key->action_func(key->action_priv, idx_node->index, idx_node->data); + if (key && key->del) + nbl_common_free_index_node(index_mgt, idx_node); + } + } +} + static u32 nbl_common_calculate_hash_key(void *key, u32 key_size, u32 bucket_size) { u32 i; @@ -245,90 +290,197 @@ static u32 nbl_common_calculate_hash_key(void *key, u32 key_size, u32 bucket_siz return hash_value % bucket_size; } -static int nbl_common_alloc_index(struct nbl_index_mgt *index_mgt, void *key, u32 key_size) +int nbl_common_find_available_idx(unsigned long *addr, u32 size, u32 idx_num, u32 multiple) +{ + u32 first_idx; + u32 next_idx; + u32 cur_idx; + u32 idx_num_tmp; + + first_idx = find_first_zero_bit(addr, size); + /* most find a index */ + if (idx_num == 1) + return first_idx; + + while (first_idx < size) { + if (first_idx % multiple == 0) { + idx_num_tmp = idx_num - 1; + cur_idx = first_idx; + while (cur_idx < size && idx_num_tmp > 0) { + next_idx = find_next_zero_bit(addr, size, cur_idx + 1); + if (next_idx - cur_idx != 1) + break; + idx_num_tmp--; + cur_idx = next_idx; + } + + /* has reach tail, return err */ + if (cur_idx >= size) + return size; + + /* has find available idx, return the begin idx */ + if (!idx_num_tmp) + return first_idx; + + first_idx = first_idx + multiple; + } else { + first_idx = first_idx + 1; + } + + first_idx = find_next_zero_bit(addr, size, first_idx); + } + + return size; +} + +/** + * alloc available index + * it support alloc continuous idx (num > 1) and can select base_idx's multiple + * input + * @key: must not NULL; + * @key_size: must > 0; + * @extra_key: if alloc idx num > 1, e extra_key must not NULL, detail see + struct nbl_index_key_extra + * @data: the node include extra data if not NULL + * @data_size: + * @output_data: optional, return the tbl's data if the output_data not NULL + */ +int nbl_common_alloc_index(void *priv, void *key, struct nbl_index_key_extra *extra_key, + void *data, u32 data_size, void **output_data) { - struct nbl_index_entry_key_node *key_node; + struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; + struct nbl_index_entry_node *idx_node; u32 key_node_size; u32 index = U32_MAX; u32 hash_value; u32 base_index; + u32 key_size = index_mgt->tbl_key.key_size; + u32 idx_num = 1; + u32 idx_multiple = 1; + u32 i; if (!index_mgt->free_index_num) return index; - base_index = find_first_zero_bit(index_mgt->bitmap, index_mgt->tbl_key.index_size); + if (extra_key) { + idx_num = extra_key->index_num; + idx_multiple = extra_key->begin_idx_multiple; + } + + base_index = nbl_common_find_available_idx(index_mgt->bitmap, + index_mgt->tbl_key.index_size, idx_num, + idx_multiple); if (base_index >= index_mgt->tbl_key.index_size) return index; - key_node_size = sizeof(struct nbl_index_entry_key_node) + key_size; - key_node = devm_kzalloc(index_mgt->tbl_key.dev, key_node_size, GFP_KERNEL); - if (!key_node) + key_node_size = sizeof(struct nbl_index_entry_node) + key_size + data_size; + idx_node = devm_kzalloc(index_mgt->tbl_key.dev, key_node_size, GFP_ATOMIC); + if (!idx_node) return index; - set_bit(base_index, index_mgt->bitmap); - index_mgt->free_index_num--; + for (i = 0; i < idx_num; i++) + set_bit(base_index + i, index_mgt->bitmap); + + index_mgt->free_index_num -= idx_num; index = base_index + index_mgt->tbl_key.start_index; hash_value = nbl_common_calculate_hash_key(key, key_size, index_mgt->bucket_size); - key_node->index = index; - memcpy(key_node->data, key, key_size); - hlist_add_head(&key_node->node, index_mgt->key_hash + hash_value); + idx_node->index = index; + idx_node->index_num = idx_num; + memcpy(idx_node->data, key, key_size); + if (data) + memcpy(idx_node->data + key_size, data, data_size); + + if (output_data) + *output_data = idx_node->data + key_size; + + hlist_add_head(&idx_node->node, index_mgt->key_hash + hash_value); return index; } /** - * if the key has alloced a available index, return the index; - * else alloc a new index, store the key, and return the index. + * if the key has alloced available index, return the base index; + * default alloc available index, if not alloc, struct nbl_index_key_extra need + * it support alloc continuous idx (num > 1) and can select base_idx's multiple + * input + * @extra_key: if alloc idx num > 1, the extra_key must not NULL, detail see + struct nbl_index_key_extra */ -int nbl_common_get_index(void *priv, void *key, u32 key_size) +int nbl_common_get_index(void *priv, void *key, struct nbl_index_key_extra *extra_key) { struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; - struct nbl_index_entry_key_node *key_node; + struct nbl_index_entry_node *idx_node; u32 index = U32_MAX; u32 hash_value; - - if (key_size != index_mgt->tbl_key.key_size) - return index; + u32 key_size = index_mgt->tbl_key.key_size; hash_value = nbl_common_calculate_hash_key(key, key_size, index_mgt->bucket_size); - mutex_lock(&index_mgt->lock); - hlist_for_each_entry(key_node, index_mgt->key_hash + hash_value, node) - if (!memcmp(key_node->data, key, key_size)) { - index = key_node->index; - mutex_unlock(&index_mgt->lock); - return index; + hlist_for_each_entry(idx_node, index_mgt->key_hash + hash_value, node) + if (!memcmp(idx_node->data, key, key_size)) { + index = idx_node->index; + goto out; } - index = nbl_common_alloc_index(index_mgt, key, key_size); - mutex_unlock(&index_mgt->lock); + if (extra_key && extra_key->not_alloc_new_node) + goto out; + index = nbl_common_alloc_index(index_mgt, key, extra_key, NULL, 0, NULL); +out: return index; } -void nbl_common_free_index(void *priv, void *key, u32 key_size) +/** + * if the key has alloced available index, return the base index; + * default alloc available index, if not alloc, struct nbl_index_key_extra need + * it support alloc continuous idx (num > 1) and can select base_idx's multiple + * input + * @key: must not NULL; + * @key_size: must > 0; + * @extra_key: if alloc idx num > 1, e extra_key must not NULL, detail see + struct nbl_index_key_extra + * @data: the node include extra data if not NULL + * @data_size: + * @output_data: optional, return the tbl's data if the output_data not NULL + */ +int nbl_common_get_index_with_data(void *priv, void *key, struct nbl_index_key_extra *extra_key, + void *data, u32 data_size, void **output_data) { struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; - struct nbl_index_entry_key_node *key_node; + struct nbl_index_entry_node *idx_node; + u32 index = U32_MAX; u32 hash_value; - u32 free_index; + u32 key_size = index_mgt->tbl_key.key_size; - if (key_size != index_mgt->tbl_key.key_size) - return; + hash_value = nbl_common_calculate_hash_key(key, key_size, index_mgt->bucket_size); + hlist_for_each_entry(idx_node, index_mgt->key_hash + hash_value, node) + if (!memcmp(idx_node->data, key, key_size)) { + index = idx_node->index; + if (output_data) + *output_data = idx_node->data + key_size; + goto out; + } + + if (extra_key && extra_key->not_alloc_new_node) + goto out; + + index = nbl_common_alloc_index(index_mgt, key, extra_key, data, data_size, output_data); +out: + return index; +} + +void nbl_common_free_index(void *priv, void *key) +{ + struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; + struct nbl_index_entry_node *idx_node; + u32 hash_value; + u32 key_size = index_mgt->tbl_key.key_size; hash_value = nbl_common_calculate_hash_key(key, key_size, index_mgt->bucket_size); - mutex_lock(&index_mgt->lock); - hlist_for_each_entry(key_node, index_mgt->key_hash + hash_value, node) - if (!memcmp(key_node->data, key, key_size)) { - free_index = key_node->index - index_mgt->tbl_key.start_index; - clear_bit(free_index, index_mgt->bitmap); - hlist_del(&key_node->node); - devm_kfree(index_mgt->tbl_key.dev, key_node); - index_mgt->free_index_num++; - mutex_unlock(&index_mgt->lock); + hlist_for_each_entry(idx_node, index_mgt->key_hash + hash_value, node) + if (!memcmp(idx_node->data, key, key_size)) { + nbl_common_free_index_node(index_mgt, idx_node); return; } - - mutex_unlock(&index_mgt->lock); } /** @@ -370,16 +522,14 @@ void *nbl_common_init_hash_table(struct nbl_hash_tbl_key *key) /** * alloc a hash node, and add to hlist_head */ -int nbl_common_alloc_hash_node(void *priv, void *key, void *data) +int nbl_common_alloc_hash_node(void *priv, void *key, void *data, void **out_data) { struct nbl_hash_tbl_mgt *tbl_mgt = (struct nbl_hash_tbl_mgt *)priv; struct nbl_hash_entry_node *hash_node; u32 hash_value; - u32 node_size; u16 key_size; u16 data_size; - node_size = sizeof(struct nbl_hash_entry_node); hash_node = devm_kzalloc(tbl_mgt->tbl_key.dev, sizeof(struct nbl_hash_entry_node), GFP_KERNEL); if (!hash_node) @@ -405,6 +555,8 @@ int nbl_common_alloc_hash_node(void *priv, void *key, void *data) hlist_add_head(&hash_node->node, tbl_mgt->hash + hash_value); tbl_mgt->node_num++; + if (out_data) + *out_data = hash_node->data; if (tbl_mgt->tbl_key.lock_need) mutex_unlock(&tbl_mgt->lock); @@ -562,7 +714,7 @@ void nbl_common_remove_hash_table(void *priv, struct nbl_hash_tbl_del_key *key) for (i = 0; i < tbl_mgt->tbl_key.bucket_size; i++) { head = tbl_mgt->hash + i; hlist_for_each_entry_safe(hash_node, safe_node, head, node) { - if (key->action_func) + if (key && key->action_func) key->action_func(key->action_priv, hash_node->key, hash_node->data); nbl_common_remove_hash_node(tbl_mgt, hash_node); } @@ -952,3 +1104,21 @@ void nbl_common_remove_hash_xy_table(void *priv, struct nbl_hash_xy_tbl_del_key dev = tbl_mgt->tbl_key.dev; devm_kfree(dev, tbl_mgt); } + +void nbl_flow_direct_parse_tlv_data(u8 *tlv, u32 length, handle_tlv callback, void *data) +{ + u32 offset = 0; + u16 type, len; + int ret; + + while (offset + NBL_CHAN_FDIR_TLV_HEADER_LEN <= length) { + type = *(u16 *)tlv; + len = *(u16 *)(tlv + 2); + ret = callback(type, len, tlv + 4, data); + if (ret) + break; + + offset += (NBL_CHAN_FDIR_TLV_HEADER_LEN + len); + tlv += (NBL_CHAN_FDIR_TLV_HEADER_LEN + len); + } +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.h index d86e1b9fb0f2773973b565306d7a4158ca288831..c5b43a2de2037847d69f004bd2684ac33b26af53 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.h @@ -24,12 +24,12 @@ struct nbl_index_mgt { struct hlist_head *key_hash; u32 free_index_num; u32 bucket_size; - struct mutex lock; /* support multi thread */ }; -struct nbl_index_entry_key_node { +struct nbl_index_entry_node { struct hlist_node node; u32 index; /* the index for key has alloc from index table */ + u32 index_num; u8 data[]; }; diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core.h index 190d417726e61f058b5b30eb9e3092ff4b761324..6727b7e54cd00a36ef4a86703a95f00b1472b1a5 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core.h @@ -8,13 +8,13 @@ #define _NBL_CORE_H_ #include "nbl_product_base.h" -#include "nbl_def_common.h" +#include "nbl_def_channel.h" #include "nbl_def_phy.h" #include "nbl_def_resource.h" #include "nbl_def_dispatch.h" #include "nbl_def_service.h" #include "nbl_def_dev.h" -#include "nbl_def_channel.h" +#include "nbl_def_common.h" #define NBL_ADAPTER_TO_PDEV(adapter) ((adapter)->pdev) #define NBL_ADAPTER_TO_DEV(adapter) (&((adapter)->pdev->dev)) @@ -65,6 +65,7 @@ #define NBL_CAP_IS_USER(val) NBL_CAP_TEST_BIT(val, NBL_CAP_HAS_USER_BIT) #define NBL_CAP_IS_GRC(val) NBL_CAP_TEST_BIT(val, NBL_CAP_HAS_GRC_BIT) #define NBL_CAP_IS_BLK(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_BLK_BIT) +#define NBL_CAP_IS_OCP(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_OCP_BIT) #define NBL_CAP_IS_DPU_HOST(val) ({ typeof(val) _val = (val); \ !NBL_CAP_TEST_BIT(_val, NBL_CAP_IS_NIC_BIT) && \ NBL_CAP_TEST_BIT(_val, NBL_CAP_DPU_IS_HOST_BIT); }) @@ -89,6 +90,7 @@ enum { NBL_CAP_IS_BLK_BIT, NBL_CAP_HAS_USER_BIT, NBL_CAP_HAS_GRC_BIT, + NBL_CAP_IS_OCP_BIT, NBL_CAP_HAS_FACTORY_CTRL_BIT, }; @@ -101,6 +103,8 @@ enum nbl_adapter_state { NBL_RUNNING, NBL_TESTING, NBL_USER, + NBL_FATAL_ERR, + NBL_XDP, NBL_STATE_NBITS }; @@ -140,15 +144,29 @@ struct nbl_adapter { DECLARE_BITMAP(state, NBL_STATE_NBITS); }; +struct nbl_rep_data { + struct net_device *netdev; + struct nbl_netdev_name_attr dev_name_attr; + struct u64_stats_sync rep_syncp; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u16 rep_vsi_id; + u8 base_queue_id; + u8 rep_queue_num; +}; + struct nbl_netdev_priv { struct nbl_adapter *adapter; + struct nbl_rep_data *rep; struct net_device *netdev; u16 tx_queue_num; u16 rx_queue_num; u16 queue_size; /* default traffic destination in kernel/dpdk/coexist scene */ - u16 default_vsi_index; - u16 default_vsi_id; + u16 data_vsi; + u16 user_vsi; s64 last_st_time; }; @@ -164,6 +182,13 @@ struct nbl_devlink_priv { void *dev_mgt; }; +struct nbl_tc_insts_info { + int (*send_cmdq)(void *priv, const void *hdr, void *cmd); + void *chan_mgt; + void *tc_flow_mgt; + int locked; +}; + struct nbl_software_tool_id_entry { struct list_head node; u16 bus; @@ -171,7 +196,7 @@ struct nbl_software_tool_id_entry { u8 refcount; }; -#define NBL_ST_MAX_DEVICE_NUM 64 +#define NBL_ST_MAX_DEVICE_NUM 96 struct nbl_software_tool_table { DECLARE_BITMAP(devid, NBL_ST_MAX_DEVICE_NUM); int major; @@ -179,10 +204,22 @@ struct nbl_software_tool_table { struct class *cls; }; +extern spinlock_t nbl_tc_flow_inst_lock; + +#define NBL_TC_FLOW_INST_COUNT (NBL_DRIVER_DEV_MAX) + struct nbl_adapter *nbl_core_init(struct pci_dev *pdev, struct nbl_init_param *param); void nbl_core_remove(struct nbl_adapter *adapter); int nbl_core_start(struct nbl_adapter *adapter, struct nbl_init_param *param); void nbl_core_stop(struct nbl_adapter *adapter); +void nbl_tc_set_cmdq_info(int (*send_cmdq)(void *, const void *, void *), + void *priv, u8 index); +void nbl_tc_unset_cmdq_info(u8 index); +void nbl_tc_set_flow_info(void *priv, u8 index); +void *nbl_tc_get_flow_info(u8 index); +void nbl_tc_unset_flow_info(u8 index); +u8 nbl_tc_alloc_inst_id(void); +int nbl_tc_call_inst_cmdq(u8 inst_id, const void *hdr, void *cmd); int nbl_st_init(struct nbl_software_tool_table *st_table); void nbl_st_remove(struct nbl_software_tool_table *st_table); diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.c index c8f20af405bbe3110e7d2974233fb541d41d6bb9..27fbbd41ecd1677d00d0999294696f5d6e286def 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.c @@ -11,7 +11,7 @@ .open = _open_, \ .read = seq_read, \ .llseek = seq_lseek, \ - .release = seq_release, \ + .release = single_release, \ } #define SINGLE_FOPS_WO(_fops_, _open_, _write_) \ @@ -19,7 +19,7 @@ .open = _open_, \ .write = _write_, \ .llseek = seq_lseek, \ - .release = seq_release, \ + .release = single_release, \ } #define COMPLETE_FOPS_RW(_fops_, _open_, _write_) \ @@ -28,7 +28,7 @@ .write = _write_, \ .read = seq_read, \ .llseek = seq_lseek, \ - .release = seq_release, \ + .release = single_release, \ } static int nbl_flow_info_dump(struct seq_file *m, void *v) @@ -41,6 +41,16 @@ static int nbl_flow_info_dump(struct seq_file *m, void *v) return 0; } +static int nbl_fd_info_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + + disp_ops->dump_fd_flow(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), m); + + return 0; +} + static int nbl_mbx_txq_dma_dump(struct seq_file *m, void *v) { struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; @@ -86,6 +96,11 @@ static int nbl_debugfs_flow_info_dump(struct inode *inode, struct file *file) return single_open(file, nbl_flow_info_dump, inode->i_private); } +static int nbl_debugfs_fd_info_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_fd_info_dump, inode->i_private); +} + static int nbl_debugfs_mbx_txq_dma_dump(struct inode *inode, struct file *file) { return single_open(file, nbl_mbx_txq_dma_dump, inode->i_private); @@ -107,6 +122,7 @@ static int nbl_debugfs_adminq_rxq_dma_dump(struct inode *inode, struct file *fil } SINGLE_FOPS_RO(flow_info_fops, nbl_debugfs_flow_info_dump); +SINGLE_FOPS_RO(fd_info_fops, nbl_debugfs_fd_info_dump); SINGLE_FOPS_RO(mbx_txq_fops, nbl_debugfs_mbx_txq_dma_dump); SINGLE_FOPS_RO(mbx_rxq_fops, nbl_debugfs_mbx_rxq_dma_dump); SINGLE_FOPS_RO(adminq_txq_fops, nbl_debugfs_adminq_txq_dma_dump); @@ -163,12 +179,35 @@ static int nbl_debugfs_ring_dump(struct inode *inode, struct file *file) SINGLE_FOPS_RO(ring_fops, nbl_debugfs_ring_dump); +static int nbl_stats_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_service_ops *serv_ops = NBL_DEBUGFS_MGT_TO_SERV_OPS(debugfs_mgt); + u64 rx_dropped = 0; + + serv_ops->get_rx_dropped(NBL_DEBUGFS_MGT_TO_SERV_PRIV(debugfs_mgt), &rx_dropped); + + seq_puts(m, "Dump stats:\n"); + seq_printf(m, "rx_dropped: %llu\n", rx_dropped); + + return 0; +} + +static int nbl_debugfs_stats_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_stats_dump, inode->i_private); +} + +SINGLE_FOPS_RO(stats_fops, nbl_debugfs_stats_dump); + static void nbl_serv_debugfs_setup_netops(struct nbl_debugfs_mgt *debugfs_mgt) { debugfs_create_file("txrx_ring_index", 0644, debugfs_mgt->nbl_debugfs_root, debugfs_mgt, &ring_index_fops); debugfs_create_file("txrx_ring", 0444, debugfs_mgt->nbl_debugfs_root, debugfs_mgt, &ring_fops); + debugfs_create_file("stats", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &stats_fops); } static int nbl_ring_stats_dump(struct seq_file *m, void *v) @@ -225,10 +264,98 @@ static void nbl_serv_debugfs_setup_ctrlops(struct nbl_debugfs_mgt *debugfs_mgt) debugfs_mgt, &adminq_rxq_fops); } - if (disp_ops->get_product_fix_cap(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), - NBL_DUMP_FLOW_CAP)) + if (disp_ops->get_product_flex_cap(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), + NBL_DUMP_FLOW_CAP)) debugfs_create_file("flow_info", 0444, debugfs_mgt->nbl_debugfs_root, debugfs_mgt, &flow_info_fops); + + if (disp_ops->get_product_flex_cap(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), + NBL_DUMP_FD_CAP)) + debugfs_create_file("fd_info", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &fd_info_fops); +} + +static int nbl_pmd_debug_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + + seq_printf(m, "pmd_debug = %s\n", debugfs_mgt->pmd_debug ? "on" : "off"); + + return 0; +} + +static int nbl_pmd_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_pmd_debug_dump, inode->i_private); +} + +static ssize_t nbl_pmd_debug_write(struct file *file, const char __user *buf, + size_t count, loff_t *offp) +{ + struct nbl_debugfs_mgt *debugfs_mgt = file_inode(file)->i_private; + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + char buffer[4] = {0}; + size_t size = min(count, sizeof(buffer)); + + if (copy_from_user(buffer, buf, size)) + return -EFAULT; + if (kstrtobool(buffer, &debugfs_mgt->pmd_debug)) + return -EFAULT; + + disp_ops->set_pmd_debug(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), debugfs_mgt->pmd_debug); + return size; +} + +COMPLETE_FOPS_RW(pmd_debug_fops, nbl_pmd_debug_open, nbl_pmd_debug_write); + +static void nbl_serv_debugfs_setup_pmdops(struct nbl_debugfs_mgt *debugfs_mgt) +{ + debugfs_create_file("pmd_debug", 0644, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &pmd_debug_fops); +} + +static int nbl_dvn_desc_req_dump(struct seq_file *m, void *v) +{ + u32 desc_req; + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + + desc_req = disp_ops->get_dvn_desc_req(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt)); + seq_printf(m, "dvn_desc_req split:%d, packed:%d\n", desc_req >> 16, desc_req & 0xFFFF); + + return 0; +} + +static int nbl_dvn_desc_req_open(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_dvn_desc_req_dump, inode->i_private); +} + +static ssize_t nbl_dvn_desc_req_write(struct file *file, const char __user *buf, + size_t count, loff_t *offp) +{ + struct nbl_debugfs_mgt *debugfs_mgt = file_inode(file)->i_private; + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + char buffer[12] = {0}; + size_t size = min(count, sizeof(buffer)); + u32 desc_req = 0; + + if (copy_from_user(buffer, buf, size)) + return -EFAULT; + + if (kstrtouint(buffer, 10, &desc_req)) + return -EFAULT; + + disp_ops->set_dvn_desc_req(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), desc_req); + return size; +} + +COMPLETE_FOPS_RW(dvn_desc_req_fops, nbl_dvn_desc_req_open, nbl_dvn_desc_req_write); + +static void nbl_serv_debugfs_setup_dvn_desc_reqops(struct nbl_debugfs_mgt *debugfs_mgt) +{ + debugfs_create_file("dvn_desc_req", 0644, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &dvn_desc_req_fops); } static void nbl_serv_debugfs_setup_commonops(struct nbl_debugfs_mgt *debugfs_mgt) @@ -250,6 +377,7 @@ void nbl_debugfs_func_init(void *p, struct nbl_init_param *param) struct nbl_adapter *adapter = (struct nbl_adapter *)p; struct nbl_debugfs_mgt **debugfs_mgt = (struct nbl_debugfs_mgt **)&NBL_ADAPTER_TO_DEBUGFS_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NULL; struct nbl_common_info *common; struct device *dev; const char *name; @@ -261,9 +389,11 @@ void nbl_debugfs_func_init(void *p, struct nbl_init_param *param) if (!*debugfs_mgt) return; + NBL_DEBUGFS_MGT_TO_SERV_OPS_TBL(*debugfs_mgt) = NBL_ADAPTER_TO_SERV_OPS_TBL(adapter); NBL_DEBUGFS_MGT_TO_DISP_OPS_TBL(*debugfs_mgt) = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter); NBL_DEBUGFS_MGT_TO_CHAN_OPS_TBL(*debugfs_mgt) = NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); NBL_DEBUGFS_MGT_TO_COMMON(*debugfs_mgt) = common; + disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS((*debugfs_mgt)); name = pci_name(NBL_COMMON_TO_PDEV(common)); (*debugfs_mgt)->nbl_debugfs_root = debugfs_create_dir(name, nbl_get_debugfs_root()); @@ -277,6 +407,14 @@ void nbl_debugfs_func_init(void *p, struct nbl_init_param *param) if (param->caps.has_ctrl) nbl_serv_debugfs_setup_ctrlops(*debugfs_mgt); + if (disp_ops->get_product_fix_cap(NBL_DEBUGFS_MGT_TO_DISP_PRIV((*debugfs_mgt)), + NBL_PMD_DEBUG)) + nbl_serv_debugfs_setup_pmdops(*debugfs_mgt); + + if (disp_ops->get_product_fix_cap(NBL_DEBUGFS_MGT_TO_DISP_PRIV((*debugfs_mgt)), + NBL_DVN_DESC_REQ_SYSFS_CAP)) + nbl_serv_debugfs_setup_dvn_desc_reqops(*debugfs_mgt); + if (param->caps.has_net) { nbl_serv_debugfs_setup_netops(*debugfs_mgt); if (!param->caps.is_vf) diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.h index 855765792087f9c8537ab0f97eb046fda7a65db7..ffbd3ac7111d1cfcd6c836a8411d7a52e82769ce 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.h @@ -10,6 +10,11 @@ #include "nbl_core.h" #define NBL_DEBUGFS_MGT_TO_COMMON(debugfs_mgt) ((debugfs_mgt)->common) +#define NBL_DEBUGFS_MGT_TO_SERV_OPS_TBL(debugfs_mgt) ((debugfs_mgt)->serv_ops_tbl) +#define NBL_DEBUGFS_MGT_TO_SERV_OPS(debugfs_mgt) \ + (NBL_DEBUGFS_MGT_TO_SERV_OPS_TBL(debugfs_mgt)->ops) +#define NBL_DEBUGFS_MGT_TO_SERV_PRIV(debugfs_mgt) \ + (NBL_DEBUGFS_MGT_TO_SERV_OPS_TBL(debugfs_mgt)->priv) #define NBL_DEBUGFS_MGT_TO_DISP_OPS_TBL(debugfs_mgt) ((debugfs_mgt)->disp_ops_tbl) #define NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt) \ (NBL_DEBUGFS_MGT_TO_DISP_OPS_TBL(debugfs_mgt)->ops) @@ -23,6 +28,7 @@ struct nbl_debugfs_mgt { struct dentry *nbl_debugfs_root; + struct nbl_service_ops_tbl *serv_ops_tbl; struct nbl_dispatch_ops_tbl *disp_ops_tbl; struct nbl_channel_ops_tbl *chan_ops_tbl; struct nbl_common_info *common; diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.c index 53bd443b4720f63c367055767806abd1f835cf8d..663602bd2e399cedf5397ed80807ac73ccb4f0c5 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.c @@ -4,21 +4,49 @@ * Author: */ +#include +#include +#include #include "nbl_dev.h" +#include "nbl_lag.h" static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "netif debug level (0=none,...,16=all), adapter debug_mask (<-1)"); +int adaptive_rxbuf_len_disable = 1; +module_param(adaptive_rxbuf_len_disable, int, 0); +MODULE_PARM_DESC(debug, "Disable the rx buffer length adaptive to the MTU"); +static int net_msix_mask_en = 1; +module_param(net_msix_mask_en, int, 0); +MODULE_PARM_DESC(net_msix_mask_en, "net msix interrupt mask enable"); + +int performance_mode = 3; +module_param(performance_mode, int, 0); +MODULE_PARM_DESC(performance_mode, "performance_mode"); + +int restore_eth = 1; +module_param(restore_eth, int, 0); +MODULE_PARM_DESC(restore_eth, "restore_eth"); static struct nbl_dev_board_id_table board_id_table; struct nbl_dev_ops dev_ops; -static const struct net_device_ops netdev_ops_leonis_pf; -static const struct ethtool_ops ethtool_ops_leonis_pf; +struct net_device_ops netdev_ops_leonis_pf; +struct net_device_ops netdev_ops_leonis_vf; +struct net_device_ops netdev_ops_leonis_rep; +struct ethtool_ops ethtool_ops_leonis_pf; +struct ethtool_ops ethtool_ops_leonis_vf; +struct ethtool_ops ethtool_ops_leonis_rep; +struct dcbnl_rtnl_ops dcbnl_ops_leonis_pf; +struct dcbnl_rtnl_ops dcbnl_ops_leonis_vf; +struct tlsdev_ops ktls_ops; +struct xfrmdev_ops xfrm_ops; static int nbl_dev_clean_mailbox_schedule(struct nbl_dev_mgt *dev_mgt); static void nbl_dev_clean_adminq_schedule(struct nbl_task_info *task_info); +static void nbl_dev_remove_rep_res(struct nbl_dev_mgt *dev_mgt); +static void nbl_dev_handle_fatal_err(struct nbl_dev_mgt *dev_mgt); /* ---------- Basic functions ---------- */ static int nbl_dev_get_port_attributes(struct nbl_dev_mgt *dev_mgt) @@ -35,12 +63,20 @@ static int nbl_dev_enable_port(struct nbl_dev_mgt *dev_mgt, bool enable) return serv_ops->enable_port(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), enable); } -static int nbl_dev_alloc_board_id(struct nbl_dev_board_id_table *index_table, u16 bus) +static void nbl_dev_init_port(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + if (restore_eth) + serv_ops->init_port(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +static int nbl_dev_alloc_board_id(struct nbl_dev_board_id_table *index_table, u32 board_key) { int i = 0; for (i = 0; i < NBL_DEV_BOARD_ID_MAX; i++) { - if (index_table->entry[i].bus == bus) { + if (index_table->entry[i].board_key == board_key) { index_table->entry[i].refcount++; return i; } @@ -48,7 +84,7 @@ static int nbl_dev_alloc_board_id(struct nbl_dev_board_id_table *index_table, u1 for (i = 0; i < NBL_DEV_BOARD_ID_MAX; i++) { if (!index_table->entry[i].valid) { - index_table->entry[i].bus = bus; + index_table->entry[i].board_key = board_key; index_table->entry[i].refcount++; index_table->entry[i].valid = true; return i; @@ -58,12 +94,12 @@ static int nbl_dev_alloc_board_id(struct nbl_dev_board_id_table *index_table, u1 return -ENOSPC; } -static void nbl_dev_free_board_id(struct nbl_dev_board_id_table *index_table, u16 bus) +static void nbl_dev_free_board_id(struct nbl_dev_board_id_table *index_table, u32 board_key) { int i = 0; for (i = 0; i < NBL_DEV_BOARD_ID_MAX; i++) { - if (index_table->entry[i].bus == bus && index_table->entry[i].valid) { + if (index_table->entry[i].board_key == board_key && index_table->entry[i].valid) { index_table->entry[i].refcount--; break; } @@ -73,16 +109,21 @@ static void nbl_dev_free_board_id(struct nbl_dev_board_id_table *index_table, u1 memset(&index_table->entry[i], 0, sizeof(index_table->entry[i])); } -static void nbl_dev_set_netdev_priv(struct net_device *netdev, struct nbl_dev_vsi *vsi) +static void nbl_dev_set_netdev_priv(struct net_device *netdev, struct nbl_dev_vsi *vsi, + struct nbl_dev_vsi *user_vsi) { struct nbl_netdev_priv *net_priv = netdev_priv(netdev); net_priv->tx_queue_num = vsi->queue_num; net_priv->rx_queue_num = vsi->queue_num; net_priv->queue_size = vsi->queue_size; + net_priv->rep = NULL; net_priv->netdev = netdev; - net_priv->default_vsi_index = vsi->index; - net_priv->default_vsi_id = vsi->vsi_id; + net_priv->data_vsi = vsi->vsi_id; + if (user_vsi) + net_priv->user_vsi = user_vsi->vsi_id; + else + net_priv->user_vsi = vsi->vsi_id; } /* ---------- Interrupt config ---------- */ @@ -104,6 +145,49 @@ static irqreturn_t nbl_dev_clean_adminq(int __always_unused irq, void *data) return IRQ_HANDLED; } +static __maybe_unused void nbl_dev_notify_ipsec_hard_expire(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + struct net *net = dev_net(NBL_DEV_MGT_TO_NET_DEV(dev_mgt)->netdev); + struct nbl_sa_search_key *param; + struct xfrm_state *x; + + param = (struct nbl_sa_search_key *)data; + x = xfrm_state_lookup(net, param->mark, ¶m->daddr, param->spi, + IPPROTO_ESP, param->family); + if (x) { + x->km.state = XFRM_STATE_EXPIRED; + hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT); + xfrm_state_put_sync(x); + } +} + +static void nbl_dev_handle_ipsec_event(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + ipsec_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_service_ops *serv_ops; + + serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + serv_ops->handle_ipsec_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +static void nbl_dev_clean_ipsec_status(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops; + struct nbl_dev_ctrl *ctrl_dev; + struct nbl_task_info *task_info; + + serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + task_info = NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev); + + if (serv_ops->check_ipsec_status(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt))) + nbl_common_queue_work(&task_info->ipsec_task, true, false); +} + static void nbl_dev_handle_abnormal_event(struct work_struct *work) { struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, @@ -125,6 +209,14 @@ static void nbl_dev_clean_abnormal_status(struct nbl_dev_mgt *dev_mgt) static irqreturn_t nbl_dev_clean_abnormal_event(int __always_unused irq, void *data) { struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)data; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + nbl_dev_rdma_process_abnormal_event(rdma_dev); + + if (serv_ops->get_product_flex_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_SECURITY_ACCEL_CAP)) + nbl_dev_clean_ipsec_status(dev_mgt); nbl_dev_clean_abnormal_status(dev_mgt); @@ -148,7 +240,7 @@ static void nbl_dev_register_net_irq(struct nbl_dev_mgt *dev_mgt, u16 queue_num) struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); msix_info->serv_info[NBL_MSIX_NET_TYPE].num = queue_num; - msix_info->serv_info[NBL_MSIX_NET_TYPE].hw_self_mask_en = 1; + msix_info->serv_info[NBL_MSIX_NET_TYPE].hw_self_mask_en = net_msix_mask_en; } static void nbl_dev_register_ctrl_irq(struct nbl_dev_mgt *dev_mgt) @@ -213,6 +305,7 @@ static void nbl_dev_free_net_irq(struct nbl_dev_mgt *dev_mgt) static int nbl_dev_request_mailbox_irq(struct nbl_dev_mgt *dev_mgt) { struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); u16 local_vector_id; @@ -225,8 +318,8 @@ static int nbl_dev_request_mailbox_irq(struct nbl_dev_mgt *dev_mgt) local_vector_id = msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].base_vector_id; irq_num = msix_info->msix_entries[local_vector_id].vector; - snprintf(dev_common->mailbox_name, sizeof(dev_common->mailbox_name) - 1, "%s-%s", - dev_name(dev), "mailbox"); + snprintf(dev_common->mailbox_name, sizeof(dev_common->mailbox_name), + "nbl_mailbox@pci:%s", pci_name(NBL_COMMON_TO_PDEV(common))); err = devm_request_irq(dev, irq_num, nbl_dev_clean_mailbox, 0, dev_common->mailbox_name, dev_mgt); if (err) { @@ -266,8 +359,8 @@ static int nbl_dev_enable_mailbox_irq(struct nbl_dev_mgt *dev_mgt) return 0; local_vector_id = msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].base_vector_id; - chan_ops->set_queue_interrupt_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), - NBL_CHAN_TYPE_MAILBOX, true); + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_INTERRUPT_READY, + NBL_CHAN_TYPE_MAILBOX, true); return serv_ops->enable_mailbox_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), local_vector_id, true); @@ -289,8 +382,8 @@ static int nbl_dev_disable_mailbox_irq(struct nbl_dev_mgt *dev_mgt) nbl_common_flush_task(&dev_common->clean_mbx_task); local_vector_id = msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].base_vector_id; - chan_ops->set_queue_interrupt_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), - NBL_CHAN_TYPE_MAILBOX, false); + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_INTERRUPT_READY, + NBL_CHAN_TYPE_MAILBOX, false); return serv_ops->enable_mailbox_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), local_vector_id, false); @@ -299,10 +392,12 @@ static int nbl_dev_disable_mailbox_irq(struct nbl_dev_mgt *dev_mgt) static int nbl_dev_request_adminq_irq(struct nbl_dev_mgt *dev_mgt, struct nbl_task_info *task_info) { struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); u16 local_vector_id; u32 irq_num; + char *irq_name; int err; if (!msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].num) @@ -310,9 +405,12 @@ static int nbl_dev_request_adminq_irq(struct nbl_dev_mgt *dev_mgt, struct nbl_ta local_vector_id = msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].base_vector_id; irq_num = msix_info->msix_entries[local_vector_id].vector; + irq_name = msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].irq_name; + snprintf(irq_name, NBL_STRING_NAME_LEN, "nbl_adminq@pci:%s", + pci_name(NBL_COMMON_TO_PDEV(common))); err = devm_request_irq(dev, irq_num, nbl_dev_clean_adminq, - 0, "adminq_irq", task_info); + 0, irq_name, task_info); if (err) { dev_err(dev, "Request adminq irq handler failed err: %d\n", err); return err; @@ -350,8 +448,8 @@ static int nbl_dev_enable_adminq_irq(struct nbl_dev_mgt *dev_mgt) return 0; local_vector_id = msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].base_vector_id; - chan_ops->set_queue_interrupt_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_TYPE_ADMINQ, - true); + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_INTERRUPT_READY, + NBL_CHAN_TYPE_ADMINQ, true); return serv_ops->enable_adminq_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), local_vector_id, true); @@ -369,8 +467,8 @@ static int nbl_dev_disable_adminq_irq(struct nbl_dev_mgt *dev_mgt) return 0; local_vector_id = msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].base_vector_id; - chan_ops->set_queue_interrupt_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_TYPE_ADMINQ, - false); + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_INTERRUPT_READY, + NBL_CHAN_TYPE_ADMINQ, false); return serv_ops->enable_adminq_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), local_vector_id, false); @@ -379,20 +477,25 @@ static int nbl_dev_disable_adminq_irq(struct nbl_dev_mgt *dev_mgt) static int nbl_dev_request_abnormal_irq(struct nbl_dev_mgt *dev_mgt) { struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); - u16 local_vector_id; + char *irq_name; u32 irq_num; int err; + u16 local_vector_id; if (!msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].num) return 0; local_vector_id = msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].base_vector_id; irq_num = msix_info->msix_entries[local_vector_id].vector; + irq_name = msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].irq_name; + snprintf(irq_name, NBL_STRING_NAME_LEN, "nbl_abnormal@pci:%s", + pci_name(NBL_COMMON_TO_PDEV(common))); err = devm_request_irq(dev, irq_num, nbl_dev_clean_abnormal_event, - 0, "abnormal_irq", dev_mgt); + 0, irq_name, dev_mgt); if (err) { dev_err(dev, "Request abnormal_irq irq handler failed err: %d\n", err); return err; @@ -581,6 +684,101 @@ static void nbl_dev_clear_interrupt_scheme(struct nbl_dev_mgt *dev_mgt) nbl_dev_free_msix_intr(dev_mgt); } +static void nbl_fw_tracer_clean_saved_traces_array(struct nbl_health_reporters *reps) +{ + mutex_destroy(&reps->temp_st_arr.lock); + mutex_destroy(&reps->reboot_st_arr.lock); +} + +static void nbl_dev_destroy_health(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + + if (!IS_ERR_OR_NULL(ctrl_dev->health_reporters.fw_temp_reporter)) + devl_health_reporter_destroy(ctrl_dev->health_reporters.fw_temp_reporter); + + if (!IS_ERR_OR_NULL(ctrl_dev->health_reporters.fw_reboot_reporter)) + devl_health_reporter_destroy(ctrl_dev->health_reporters.fw_reboot_reporter); + + nbl_fw_tracer_clean_saved_traces_array(&ctrl_dev->health_reporters); +} + +static void nbl_fw_temp_save_trace(struct nbl_health_reporters *reps, u8 temp, + u64 uptime) +{ + struct nbl_fw_temp_trace_data *trace_data; + + mutex_lock(&reps->temp_st_arr.lock); + trace_data = &reps->temp_st_arr.trace_data[reps->temp_st_arr.saved_traces_index]; + trace_data->timestamp = uptime; + trace_data->temp_num = temp; + + reps->temp_st_arr.saved_traces_index = + (reps->temp_st_arr.saved_traces_index + 1) & (NBL_SAVED_TRACES_NUM - 1); + mutex_unlock(&reps->temp_st_arr.lock); +} + +static void nbl_fw_reboot_save_trace(struct nbl_health_reporters *reps) +{ + struct nbl_fw_reboot_trace_data *trace_data; + struct timespec64 ts; + struct tm tm; + + ktime_get_real_ts64(&ts); + time64_to_tm(ts.tv_sec, 0, &tm); + mutex_lock(&reps->reboot_st_arr.lock); + trace_data = &reps->reboot_st_arr.trace_data[reps->reboot_st_arr.saved_traces_index]; + snprintf(trace_data->local_time, NBL_TIME_LEN, "%04ld-%02d-%02d %02d:%02d:%02d UTC", + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, + tm.tm_sec); + snprintf(reps->reporter_ctx.reboot_report_time, NBL_TIME_LEN, + "%04ld-%02d-%02d %02d:%02d:%02d", + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, + tm.tm_sec); + + reps->reboot_st_arr.saved_traces_index = + (reps->reboot_st_arr.saved_traces_index + 1) & (NBL_SAVED_TRACES_NUM - 1); + mutex_unlock(&reps->reboot_st_arr.lock); +} + +static void nbl_dev_health_report_temp_task(struct work_struct *work) +{ + struct nbl_fw_reporter_ctx fw_reporter_cxt; + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + report_temp_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_health_reporters *reps = &ctrl_dev->health_reporters; + int err; + + fw_reporter_cxt.temp_num = reps->reporter_ctx.temp_num; + if (!reps->fw_temp_reporter) + return; + + err = devlink_health_report(reps->fw_temp_reporter, "nbl_fw_temp", &fw_reporter_cxt); + if (err) + dev_err(dev, "failed to report nbl_fw_temp health\n"); +} + +static void nbl_dev_health_report_reboot_task(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + report_reboot_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_health_reporters *reps = &ctrl_dev->health_reporters; + int err; + + if (!reps->fw_reboot_reporter) + return; + err = devlink_health_report(reps->fw_reboot_reporter, "nbl_fw_reboot", &reps->reporter_ctx); + if (err) { + dev_err(dev, "failed to report nbl_fw_reboot health\n"); + } +} + /* ---------- Channel config ---------- */ static int nbl_dev_setup_chan_qinfo(struct nbl_dev_mgt *dev_mgt, u8 chan_type) { @@ -621,6 +819,17 @@ static int nbl_dev_remove_chan_queue(struct nbl_dev_mgt *dev_mgt, u8 chan_type) return ret; } +static bool nbl_dev_should_chan_keepalive(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + bool ret = true; + + ret = serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_KEEP_ALIVE); + + return ret; +} + static int nbl_dev_setup_chan_keepalive(struct nbl_dev_mgt *dev_mgt, u8 chan_type) { struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); @@ -628,6 +837,9 @@ static int nbl_dev_setup_chan_keepalive(struct nbl_dev_mgt *dev_mgt, u8 chan_typ struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); u16 dest_func_id = NBL_COMMON_TO_MGT_PF(common); + if (!nbl_dev_should_chan_keepalive(dev_mgt)) + return 0; + if (chan_type != NBL_CHAN_TYPE_MAILBOX) return -EOPNOTSUPP; @@ -649,17 +861,6 @@ static void nbl_dev_remove_chan_keepalive(struct nbl_dev_mgt *dev_mgt, u8 chan_t chan_ops->remove_keepalive(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type); } -static bool nbl_dev_should_chan_keepalive(struct nbl_dev_mgt *dev_mgt) -{ - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - bool ret = true; - - ret &= serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - NBL_TASK_KEEP_ALIVE); - - return ret; -} - static void nbl_dev_register_chan_task(struct nbl_dev_mgt *dev_mgt, u8 chan_type, struct work_struct *task) { @@ -690,6 +891,47 @@ static int nbl_dev_clean_mailbox_schedule(struct nbl_dev_mgt *dev_mgt) return 0; } +static void nbl_dev_prepare_eswitch_reset(struct nbl_dev_mgt *dev_mgt) +{ + nbl_dev_remove_rep_res(dev_mgt); +} + +static void nbl_dev_prepare_reset_task(struct work_struct *work) +{ + int ret; + enum nbl_core_reset_event event = NBL_CORE_FATAL_ERR_EVENT; + struct nbl_reset_task_info *task_info = container_of(work, struct nbl_reset_task_info, + task); + struct nbl_dev_common *common_dev = container_of(task_info, struct nbl_dev_common, + reset_task); + struct nbl_dev_mgt *dev_mgt = common_dev->dev_mgt; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_chan_send_info chan_send; + + nbl_event_notify(NBL_EVENT_RESET_EVENT, &event, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + serv_ops->netdev_stop(dev_mgt->net_dev->netdev); + nbl_dev_prepare_eswitch_reset(dev_mgt); + netif_device_detach(dev_mgt->net_dev->netdev); /* to avoid ethtool operation */ + nbl_dev_remove_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_ACK_RESET_EVENT, NULL, + 0, NULL, 0, 0); + /* notify ctrl dev, finish reset event process */ + ret = chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_ABNORMAL, + NBL_CHAN_TYPE_MAILBOX, true); + + /* sleep to avoid send_msg is running */ + usleep_range(10, 20); + + /* ctrl dev must shutdown phy reg read/write after ctrl dev has notify emp shutdown dev */ + if (!NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)) + serv_ops->set_hw_status(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_HW_FATAL_ERR); +} + static void nbl_dev_clean_adminq_task(struct work_struct *work) { struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, @@ -710,16 +952,17 @@ static void nbl_dev_fw_heartbeat_task(struct work_struct *work) struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, fw_hb_task); struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - if (task_info->fw_resetting) return; if (!serv_ops->check_fw_heartbeat(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt))) { dev_notice(NBL_COMMON_TO_DEV(common), "FW reset detected"); task_info->fw_resetting = true; - + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_ABNORMAL, + NBL_CHAN_TYPE_ADMINQ, true); nbl_common_queue_delayed_work(&task_info->fw_reset_task, MSEC_PER_SEC, true, false); } } @@ -730,32 +973,49 @@ static void nbl_dev_fw_reset_task(struct work_struct *work) struct nbl_task_info *task_info = container_of(delayed_work, struct nbl_task_info, fw_reset_task); struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); if (serv_ops->check_fw_reset(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt))) { dev_notice(NBL_COMMON_TO_DEV(common), "FW recovered"); - nbl_dev_disable_adminq_irq(dev_mgt); nbl_dev_free_adminq_irq(dev_mgt, task_info); + msleep(NBL_DEV_FW_RESET_WAIT_TIME); // wait adminq timeout nbl_dev_remove_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); nbl_dev_setup_chan_qinfo(dev_mgt, NBL_CHAN_TYPE_ADMINQ); nbl_dev_setup_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); nbl_dev_request_adminq_irq(dev_mgt, task_info); nbl_dev_enable_adminq_irq(dev_mgt); + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_ABNORMAL, + NBL_CHAN_TYPE_ADMINQ, false); + if (NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)) { nbl_dev_get_port_attributes(dev_mgt); nbl_dev_enable_port(dev_mgt, true); } task_info->fw_resetting = false; + nbl_fw_reboot_save_trace(&ctrl_dev->health_reporters); + nbl_common_queue_work(&task_info->report_reboot_task, true, false); return; } nbl_common_queue_delayed_work(delayed_work, MSEC_PER_SEC, true, false); } +static void nbl_dev_offload_network_task(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + offload_network_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->check_offload_status(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + static void nbl_dev_adapt_desc_gother_task(struct work_struct *work) { struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, @@ -776,11 +1036,24 @@ static void nbl_dev_recovery_abnormal_task(struct work_struct *work) serv_ops->recovery_abnormal(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); } +static void nbl_dev_ctrl_reset_task(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + reset_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + + nbl_dev_handle_fatal_err(dev_mgt); +} + static void nbl_dev_ctrl_task_schedule(struct nbl_task_info *task_info) { struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_OFFLOAD_NETWORK_CAP)) + nbl_common_queue_work(&task_info->offload_network_task, true, true); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_TASK_FW_HB_CAP)) nbl_common_queue_work(&task_info->fw_hb_task, true, false); @@ -822,6 +1095,7 @@ static void nbl_dev_ctrl_task_stop(struct nbl_dev_mgt *dev_mgt) return; del_timer_sync(&task_info->serv_timer); + task_info->timer_setup = false; } static void nbl_dev_chan_notify_flr_resp(void *priv, u16 src_id, u16 msg_id, @@ -829,10 +1103,15 @@ static void nbl_dev_chan_notify_flr_resp(void *priv, u16 src_id, u16 msg_id, { struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); u16 vfid; + u16 vsi_id; vfid = *(u16 *)data; serv_ops->process_flr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vfid); + + vsi_id = serv_ops->covert_vfid_to_vsi_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vfid); + nbl_dev_rdma_process_flr_event(rdma_dev, vsi_id); } static void nbl_dev_ctrl_register_flr_chan_msg(struct nbl_dev_mgt *dev_mgt) @@ -849,6 +1128,127 @@ static void nbl_dev_ctrl_register_flr_chan_msg(struct nbl_dev_mgt *dev_mgt) nbl_dev_chan_notify_flr_resp, dev_mgt); } +static struct nbl_dev_temp_alarm_info temp_alarm_info[NBL_TEMP_STATUS_MAX] = { + {LOGLEVEL_WARNING, "High temperature on sensors0 resumed.\n"}, + {LOGLEVEL_WARNING, "High temperature on sensors0 observed, security(WARNING).\n"}, + {LOGLEVEL_CRIT, "High temperature on sensors0 observed, security(CRITICAL).\n"}, + {LOGLEVEL_EMERG, "High temperature on sensors0 observed, security(EMERGENCY).\n"}, +}; + +static void nbl_dev_handle_temp_ext(struct nbl_dev_mgt *dev_mgt, u8 *data, u16 data_len) +{ + u16 temp = (u16)*data; + u64 uptime = 0; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + enum nbl_dev_temp_status old_temp_status = ctrl_dev->temp_status; + enum nbl_dev_temp_status new_temp_status = NBL_TEMP_STATUS_NORMAL; + + /* no resume if temp exceed NBL_TEMP_EMERG_THRESHOLD, even if the temp resume nomal. + * Because the hw has shutdown. + */ + if (old_temp_status == NBL_TEMP_STATUS_EMERG) + return; + + /* if temp in (85-105) and not in normal_status, no resume to avoid alarm oscillate */ + if (temp > NBL_TEMP_NOMAL_THRESHOLD && + temp < NBL_TEMP_WARNING_THRESHOLD && + old_temp_status > NBL_TEMP_STATUS_NORMAL) + return; + + if (temp >= NBL_TEMP_WARNING_THRESHOLD && + temp < NBL_TEMP_CRIT_THRESHOLD) + new_temp_status = NBL_TEMP_STATUS_WARNING; + else if (temp >= NBL_TEMP_CRIT_THRESHOLD && + temp < NBL_TEMP_EMERG_THRESHOLD) + new_temp_status = NBL_TEMP_STATUS_CRIT; + else if (temp >= NBL_TEMP_EMERG_THRESHOLD) + new_temp_status = NBL_TEMP_STATUS_EMERG; + + if (new_temp_status == old_temp_status) + return; + + ctrl_dev->temp_status = new_temp_status; + + /* temp fall only alarm when the alarm need to resume */ + if (new_temp_status < old_temp_status && new_temp_status != NBL_TEMP_STATUS_NORMAL) + return; + + if (data_len > sizeof(u16)) + uptime = *(u64 *)(data + sizeof(u16)); + if (new_temp_status != NBL_TEMP_STATUS_NORMAL) { + ctrl_dev->health_reporters.reporter_ctx.temp_num = temp; + nbl_fw_temp_save_trace(&ctrl_dev->health_reporters, temp, uptime); + nbl_common_queue_work(&ctrl_dev->task_info.report_temp_task, false, false); + } + nbl_log(common, temp_alarm_info[new_temp_status].logvel, + "[%llu] %s", uptime, temp_alarm_info[new_temp_status].alarm_info); + + if (new_temp_status == NBL_TEMP_STATUS_EMERG) { + ctrl_dev->task_info.reset_event = NBL_HW_FATAL_ERR_EVENT; + nbl_common_queue_work(&ctrl_dev->task_info.reset_task, false, false); + } +} + +static const char *nbl_log_level_name(int level) +{ + switch (level) { + case NBL_EMP_ALERT_LOG_FATAL: + return "FATAL"; + case NBL_EMP_ALERT_LOG_ERROR: + return "ERROR"; + case NBL_EMP_ALERT_LOG_WARNING: + return "WARNING"; + case NBL_EMP_ALERT_LOG_INFO: + return "INFO"; + default: + return "UNKNOWN"; + } +} + +static void nbl_dev_handle_emp_log_ext(struct nbl_dev_mgt *dev_mgt, u8 *data, u16 data_len) +{ + struct nbl_emp_alert_log_event *log_event = (struct nbl_emp_alert_log_event *)data; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + nbl_log(common, LOGLEVEL_INFO, "[FW][%llu] <%s> %.*s", log_event->uptime, + nbl_log_level_name(log_event->level), data_len - sizeof(u64) - sizeof(u8), + log_event->data); +} + +static void nbl_dev_chan_notify_evt_alert_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + struct nbl_chan_param_emp_alert_event *alert_param = + (struct nbl_chan_param_emp_alert_event *)data; + + switch (alert_param->type) { + case NBL_EMP_EVENT_TEMP_ALERT: + nbl_dev_handle_temp_ext(dev_mgt, alert_param->data, alert_param->len); + return; + case NBL_EMP_EVENT_LOG_ALERT: + nbl_dev_handle_emp_log_ext(dev_mgt, alert_param->data, alert_param->len); + return; + default: + return; + } +} + +static void nbl_dev_ctrl_register_emp_ext_alert_chan_msg(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + /* draco use mailbox communication with emp */ + if (!chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_MSG_ADMINQ_EXT_ALERT, + nbl_dev_chan_notify_evt_alert_resp, dev_mgt); +} + static int nbl_dev_setup_ctrl_dev_task(struct nbl_dev_mgt *dev_mgt) { struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); @@ -857,6 +1257,13 @@ static int nbl_dev_setup_ctrl_dev_task(struct nbl_dev_mgt *dev_mgt) task_info->dev_mgt = dev_mgt; + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_OFFLOAD_NETWORK_CAP)) { + nbl_common_alloc_task(&task_info->offload_network_task, + nbl_dev_offload_network_task); + task_info->timer_setup = true; + } + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_TASK_FW_HB_CAP)) { nbl_common_alloc_task(&task_info->fw_hb_task, nbl_dev_fw_heartbeat_task); @@ -875,6 +1282,11 @@ static int nbl_dev_setup_ctrl_dev_task(struct nbl_dev_mgt *dev_mgt) task_info->timer_setup = true; } + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_IPSEC_AGE_CAP)) { + nbl_common_alloc_task(&task_info->ipsec_task, nbl_dev_handle_ipsec_event); + task_info->timer_setup = true; + } if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_TASK_ADAPT_DESC_GOTHER)) { nbl_common_alloc_task(&task_info->adapt_desc_gother_task, @@ -888,6 +1300,18 @@ static int nbl_dev_setup_ctrl_dev_task(struct nbl_dev_mgt *dev_mgt) nbl_dev_recovery_abnormal_task); task_info->timer_setup = true; } + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_HEALTH_REPORT_TEMP_CAP)) + nbl_common_alloc_task(&task_info->report_temp_task, + &nbl_dev_health_report_temp_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_HEALTH_REPORT_REBOOT_CAP)) + nbl_common_alloc_task(&task_info->report_reboot_task, + &nbl_dev_health_report_reboot_task); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_RESET_CTRL_CAP)) + nbl_common_alloc_task(&task_info->reset_task, &nbl_dev_ctrl_reset_task); nbl_common_alloc_task(&task_info->clean_abnormal_irq_task, nbl_dev_handle_abnormal_event); @@ -912,6 +1336,10 @@ static void nbl_dev_remove_ctrl_dev_task(struct nbl_dev_mgt *dev_mgt) nbl_common_release_task(&task_info->clean_abnormal_irq_task); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_OFFLOAD_NETWORK_CAP)) + nbl_common_release_task(&task_info->offload_network_task); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_TASK_FW_RESET_CAP)) nbl_common_release_delayed_task(&task_info->fw_reset_task); @@ -924,6 +1352,10 @@ static void nbl_dev_remove_ctrl_dev_task(struct nbl_dev_mgt *dev_mgt) NBL_TASK_CLEAN_ADMINDQ_CAP)) nbl_common_release_task(&task_info->clean_adminq_task); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_IPSEC_AGE_CAP)) + nbl_common_release_task(&task_info->ipsec_task); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_TASK_ADAPT_DESC_GOTHER)) nbl_common_release_task(&task_info->adapt_desc_gother_task); @@ -931,23 +1363,24 @@ static void nbl_dev_remove_ctrl_dev_task(struct nbl_dev_mgt *dev_mgt) if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_RECOVERY_ABNORMAL_STATUS)) nbl_common_release_task(&task_info->recovery_abnormal_task); -} - -static int nbl_dev_setup_customized_p4(struct nbl_dev_mgt *dev_mgt) -{ - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - if (!serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_P4_CAP)) - return 0; + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_RESET_CTRL_CAP)) + nbl_common_release_task(&task_info->reset_task); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_HEALTH_REPORT_TEMP_CAP)) + nbl_common_release_task(&task_info->report_temp_task); - return serv_ops->init_p4(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_HEALTH_REPORT_REBOOT_CAP)) + nbl_common_release_task(&task_info->report_reboot_task); } -static int nbl_dev_update_ring_num(struct nbl_dev_mgt *dev_mgt) +static int nbl_dev_update_template_config(struct nbl_dev_mgt *dev_mgt) { struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->update_ring_num(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + return serv_ops->update_template_config(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); } /* ---------- Dev init process ---------- */ @@ -957,6 +1390,7 @@ static int nbl_dev_setup_common_dev(struct nbl_adapter *adapter, struct nbl_init struct nbl_dev_common *common_dev; struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_board_port_info board_info = { 0 }; int board_id; common_dev = devm_kzalloc(NBL_ADAPTER_TO_DEV(adapter), @@ -972,6 +1406,10 @@ static int nbl_dev_setup_common_dev(struct nbl_adapter *adapter, struct nbl_init NBL_TASK_CLEAN_MAILBOX_CAP)) nbl_common_alloc_task(&common_dev->clean_mbx_task, nbl_dev_clean_mailbox_task); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_RESET_CAP)) + nbl_common_alloc_task(&common_dev->reset_task.task, &nbl_dev_prepare_reset_task); + if (param->caps.is_nic) { board_id = serv_ops->get_board_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); if (board_id < 0) @@ -983,8 +1421,12 @@ static int nbl_dev_setup_common_dev(struct nbl_adapter *adapter, struct nbl_init NBL_VSI_DATA); serv_ops->get_eth_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_COMMON_TO_VSI_ID(common), - &NBL_COMMON_TO_ETH_MODE(common), &NBL_COMMON_TO_ETH_ID(common)); + &NBL_COMMON_TO_ETH_MODE(common), &NBL_COMMON_TO_ETH_ID(common), + &NBL_COMMON_TO_LOGIC_ETH_ID(common)); + + serv_ops->get_board_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &board_info); + NBL_COMMON_TO_ETH_MAX_SPEED(common) = nbl_port_speed_to_speed(board_info.eth_speed); nbl_dev_register_chan_task(dev_mgt, NBL_CHAN_TYPE_MAILBOX, &common_dev->clean_mbx_task); NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) = common_dev; @@ -994,6 +1436,10 @@ static int nbl_dev_setup_common_dev(struct nbl_adapter *adapter, struct nbl_init return 0; get_board_id_fail: + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_RESET_CAP)) + nbl_common_release_task(&common_dev->reset_task.task); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_TASK_CLEAN_MAILBOX_CAP)) nbl_common_release_task(&common_dev->clean_mbx_task); @@ -1013,6 +1459,10 @@ static void nbl_dev_remove_common_dev(struct nbl_adapter *adapter) nbl_dev_register_chan_task(dev_mgt, NBL_CHAN_TYPE_MAILBOX, NULL); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_RESET_CAP)) + nbl_common_release_task(&common_dev->reset_task.task); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_TASK_CLEAN_MAILBOX_CAP)) nbl_common_release_task(&common_dev->clean_mbx_task); @@ -1023,46 +1473,239 @@ static void nbl_dev_remove_common_dev(struct nbl_adapter *adapter) NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) = NULL; } -static int nbl_dev_setup_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +static void nbl_devlink_fmsg_fill_temp_trace(struct devlink_fmsg *fmsg, + struct nbl_fw_temp_trace_data *trace_data) { - struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - struct nbl_dev_ctrl *ctrl_dev; - struct device *dev = NBL_ADAPTER_TO_DEV(adapter); - struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - int i, ret = 0; - - if (param->caps.is_nic) - NBL_COMMON_TO_BOARD_ID(common) = - nbl_dev_alloc_board_id(&board_id_table, common->bus); + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_u64_pair_put(fmsg, "timestamp", trace_data->timestamp); + devlink_fmsg_u8_pair_put(fmsg, "high temperature", trace_data->temp_num); + devlink_fmsg_obj_nest_end(fmsg); +} - ctrl_dev = devm_kzalloc(dev, sizeof(struct nbl_dev_ctrl), GFP_KERNEL); - if (!ctrl_dev) - goto alloc_fail; - NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev)->adapter = adapter; - NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt) = ctrl_dev; +static int nbl_fw_temp_trace_get_entry(struct nbl_dev_ctrl *ctrl_dev, struct devlink_fmsg *fmsg) +{ + struct nbl_health_reporters *reps = &ctrl_dev->health_reporters; + struct nbl_fw_temp_trace_data *trace_data = reps->temp_st_arr.trace_data; + u8 index, start_index, end_index; + u8 saved_traces_index; - nbl_dev_register_ctrl_irq(dev_mgt); + if (!trace_data[0].timestamp) + return -ENOMSG; - ret = serv_ops->init_chip(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); - if (ret) { - dev_err(dev, "ctrl dev chip_init failed\n"); - goto chip_init_fail; + mutex_lock(&reps->temp_st_arr.lock); + saved_traces_index = reps->temp_st_arr.saved_traces_index; + if (trace_data[saved_traces_index].timestamp) + start_index = saved_traces_index; + else + start_index = 0; + devlink_fmsg_arr_pair_nest_start(fmsg, "dump nbl fw traces"); + end_index = (saved_traces_index - 1) & (NBL_SAVED_TRACES_NUM - 1); + index = start_index; + for (; index != end_index; ) { + nbl_devlink_fmsg_fill_temp_trace(fmsg, &trace_data[index]); + index = (index + 1) & (NBL_SAVED_TRACES_NUM - 1); } + nbl_devlink_fmsg_fill_temp_trace(fmsg, &trace_data[index]); + devlink_fmsg_arr_pair_nest_end(fmsg); + mutex_unlock(&reps->temp_st_arr.lock); - ret = serv_ops->start_mgt_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); - if (ret) { - dev_err(dev, "ctrl dev start_mgt_flow failed\n"); - goto mgt_flow_fail; - } + return 0; +} +static int nbl_fw_temp_reporter_disgnose(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) +{ + struct nbl_dev_mgt *dev_mgt = devlink_health_reporter_priv(reporter); + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); - for (i = 0; i < NBL_CHAN_TYPE_MAX; i++) { - ret = nbl_dev_setup_chan_qinfo(dev_mgt, i); - if (ret) { - dev_err(dev, "ctrl dev setup chan qinfo failed\n"); - goto setup_chan_q_fail; - } - } + return nbl_fw_temp_trace_get_entry(ctrl_dev, fmsg); +} + +static int nbl_fw_temp_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack *extack) +{ + if (priv_ctx) { + struct nbl_fw_reporter_ctx *fw_reporter_ctx = + (struct nbl_fw_reporter_ctx *)priv_ctx; + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_u32_pair_put(fmsg, "high temperature", fw_reporter_ctx->temp_num); + devlink_fmsg_obj_nest_end(fmsg); + } + return 0; +} + +static void nbl_fw_tracer_init_saved_traces_array(struct nbl_health_reporters *reps) +{ + reps->temp_st_arr.saved_traces_index = 0; + reps->reboot_st_arr.saved_traces_index = 0; + mutex_init(&reps->temp_st_arr.lock); + mutex_init(&reps->reboot_st_arr.lock); +} + +static struct devlink_health_reporter_ops nbl_fw_temp_reporter_ops = { + .name = "nbl_fw_temp", + .diagnose = nbl_fw_temp_reporter_disgnose, + .dump = nbl_fw_temp_reporter_dump, +}; + +static void nbl_devlink_fmsg_fill_reboot_trace(struct devlink_fmsg *fmsg, + struct nbl_fw_reboot_trace_data *trace_data) +{ + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_string_pair_put(fmsg, "reboot time", trace_data->local_time); + devlink_fmsg_obj_nest_end(fmsg); +} + +static int nbl_fw_reboot_trace_get_entry(struct nbl_dev_ctrl *ctrl_dev, struct devlink_fmsg *fmsg) +{ + struct nbl_health_reporters *reps = &ctrl_dev->health_reporters; + struct nbl_fw_reboot_trace_data *trace_data = reps->reboot_st_arr.trace_data; + u8 index, start_index, end_index; + u8 saved_traces_index; + + if (!trace_data[0].local_time[0]) + return -ENOMSG; + + mutex_lock(&reps->reboot_st_arr.lock); + saved_traces_index = reps->reboot_st_arr.saved_traces_index; + if (trace_data[saved_traces_index].local_time[0]) + start_index = saved_traces_index; + else + start_index = 0; + devlink_fmsg_arr_pair_nest_start(fmsg, "dump nbl fw traces"); + end_index = (saved_traces_index - 1) & (NBL_SAVED_TRACES_NUM - 1); + index = start_index; + for (; index != end_index; ) { + nbl_devlink_fmsg_fill_reboot_trace(fmsg, &trace_data[index]); + index = (index + 1) & (NBL_SAVED_TRACES_NUM - 1); + } + nbl_devlink_fmsg_fill_reboot_trace(fmsg, &trace_data[index]); + devlink_fmsg_arr_pair_nest_end(fmsg); + mutex_unlock(&reps->reboot_st_arr.lock); + + return 0; +} +static int nbl_fw_reboot_reporter_disgnose(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) +{ + struct nbl_dev_mgt *dev_mgt = devlink_health_reporter_priv(reporter); + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + + return nbl_fw_reboot_trace_get_entry(ctrl_dev, fmsg); +} + +static int nbl_fw_reboot_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack *extack) +{ + if (priv_ctx) { + struct nbl_fw_reporter_ctx *fw_reporter_ctx = + (struct nbl_fw_reporter_ctx *)priv_ctx; + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_string_pair_put(fmsg, "reboot time", + fw_reporter_ctx->reboot_report_time); + devlink_fmsg_obj_nest_end(fmsg); + } + return 0; +} + +static struct devlink_health_reporter_ops nbl_fw_reboot_reporter_ops = { + .name = "nbl_fw_reboot", + .diagnose = nbl_fw_reboot_reporter_disgnose, + .dump = nbl_fw_reboot_reporter_dump, +}; + +static void nbl_setup_devlink_reporter(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_health_reporters *reps = &ctrl_dev->health_reporters; + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct devlink *devlink = dev_common->devlink; + struct devlink_health_reporter_ops *fw_reboot_ops; + struct devlink_health_reporter_ops *fw_temp_ops; + const u64 graceful_period = 0; + + fw_temp_ops = &nbl_fw_temp_reporter_ops; + fw_reboot_ops = &nbl_fw_reboot_reporter_ops; + + nbl_fw_tracer_init_saved_traces_array(&ctrl_dev->health_reporters); + reps->fw_temp_reporter = + devl_health_reporter_create(devlink, fw_temp_ops, graceful_period, dev_mgt); + if (IS_ERR(reps->fw_temp_reporter)) { + dev_err(dev, "failed to create fw temp reporter err = %ld\n", + PTR_ERR(reps->fw_temp_reporter)); + return; + } + reps->fw_reboot_reporter = + devl_health_reporter_create(devlink, fw_reboot_ops, graceful_period, dev_mgt); + if (IS_ERR(reps->fw_reboot_reporter)) { + dev_err(dev, "failed to create fw reboot reporter err = %ld\n", + PTR_ERR(reps->fw_reboot_reporter)); + if (reps->fw_temp_reporter) + devl_health_reporter_destroy(reps->fw_temp_reporter); + + return; + } +} + +static int nbl_dev_health_init(struct nbl_dev_mgt *dev) +{ + nbl_setup_devlink_reporter(dev); + return 0; +} + +static int nbl_dev_setup_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_ctrl *ctrl_dev; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int i, ret = 0; + u32 board_key; + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + if (param->caps.is_nic) + NBL_COMMON_TO_BOARD_ID(common) = + nbl_dev_alloc_board_id(&board_id_table, board_key); + + dev_info(dev, "board_key 0x%x alloc board id 0x%x\n", + board_key, NBL_COMMON_TO_BOARD_ID(common)); + + ctrl_dev = devm_kzalloc(dev, sizeof(struct nbl_dev_ctrl), GFP_KERNEL); + if (!ctrl_dev) + goto alloc_fail; + NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev)->adapter = adapter; + NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt) = ctrl_dev; + + nbl_dev_register_ctrl_irq(dev_mgt); + + ret = serv_ops->init_chip(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (ret) { + dev_err(dev, "ctrl dev chip_init failed\n"); + goto chip_init_fail; + } + + ret = serv_ops->start_mgt_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (ret) { + dev_err(dev, "ctrl dev start_mgt_flow failed\n"); + goto mgt_flow_fail; + } + + for (i = 0; i < NBL_CHAN_TYPE_MAX; i++) { + ret = nbl_dev_setup_chan_qinfo(dev_mgt, i); + if (ret) { + dev_err(dev, "ctrl dev setup chan qinfo failed\n"); + goto setup_chan_q_fail; + } + } + + nbl_dev_ctrl_register_flr_chan_msg(dev_mgt); + nbl_dev_ctrl_register_emp_ext_alert_chan_msg(dev_mgt); ret = nbl_dev_setup_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); if (ret) { @@ -1084,17 +1727,14 @@ static int nbl_dev_setup_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_p } } - ret = nbl_dev_setup_customized_p4(dev_mgt); - if (ret) - goto customize_p4_fail; + nbl_dev_update_template_config(dev_mgt); - nbl_dev_update_ring_num(dev_mgt); + serv_ops->cfg_eth_bond_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), true); + serv_ops->cfg_fd_update_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), true); + serv_ops->cfg_mirror_outputport_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), true); return 0; -customize_p4_fail: - if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_RESTOOL_CAP)) - serv_ops->remove_st(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), nbl_get_st_table()); setup_ctrl_dev_st_fail: nbl_dev_remove_ctrl_dev_task(dev_mgt); setup_ctrl_dev_task_fail: @@ -1107,7 +1747,7 @@ static int nbl_dev_setup_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_p devm_kfree(dev, ctrl_dev); NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt) = NULL; alloc_fail: - nbl_dev_free_board_id(&board_id_table, common->bus); + nbl_dev_free_board_id(&board_id_table, board_key); return ret; } @@ -1116,11 +1756,17 @@ static void nbl_dev_remove_ctrl_dev(struct nbl_adapter *adapter) struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_dev_ctrl **ctrl_dev = &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + u32 board_key; if (!*ctrl_dev) return; + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + serv_ops->cfg_fd_update_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), false); + serv_ops->cfg_eth_bond_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), false); + serv_ops->cfg_mirror_outputport_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), false); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_RESTOOL_CAP)) serv_ops->remove_st(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), nbl_get_st_table()); @@ -1134,604 +1780,504 @@ static void nbl_dev_remove_ctrl_dev(struct nbl_adapter *adapter) *ctrl_dev = NULL; /* If it is not nic, this free function will do nothing, so no need check */ - nbl_dev_free_board_id(&board_id_table, common->bus); + nbl_dev_free_board_id(&board_id_table, board_key); } -static int nbl_dev_netdev_open(struct net_device *netdev) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); +/* NBL_DEV_NET_OPS(ops_name, func, leonis_pf_capable, leonis_vf_capable, + * bootis_capable, virtio_capable) + */ - return serv_ops->netdev_open(netdev); -} +#define NBL_DEV_NET_OPS_TBL \ +do { \ + NBL_DEV_NET_OPS(ndo_open, serv_ops->netdev_open, 1, 1, 1, 1); \ + NBL_DEV_NET_OPS(ndo_stop, serv_ops->netdev_stop, 1, 1, 1, 1); \ + NBL_DEV_NET_OPS(ndo_validate_addr, eth_validate_addr, 1, 1, 1, 1); \ + NBL_DEV_NET_OPS(ndo_get_stats64, serv_ops->get_stats64, 1, 1, 1, 1); \ + NBL_DEV_NET_OPS(ndo_set_rx_mode, serv_ops->set_rx_mode, 1, 1, 1, 0); \ + NBL_DEV_NET_OPS(ndo_change_rx_flags, serv_ops->change_rx_flags, 1, 1, 0, 0); \ + NBL_DEV_NET_OPS(ndo_set_mac_address, serv_ops->set_mac, 1, 1, 1, 1); \ + NBL_DEV_NET_OPS(ndo_vlan_rx_add_vid, serv_ops->rx_add_vid, 1, 1, 1, 0); \ + NBL_DEV_NET_OPS(ndo_vlan_rx_kill_vid, serv_ops->rx_kill_vid, 1, 1, 1, 0); \ + NBL_DEV_NET_OPS(ndo_features_check, serv_ops->features_check, 1, 1, 1, 1); \ + NBL_DEV_NET_OPS(ndo_set_features, serv_ops->set_features, 1, 1, 0, 0); \ + NBL_DEV_NET_OPS(ndo_set_vf_spoofchk, serv_ops->set_vf_spoofchk, 1, 0, 0, 0); \ + NBL_DEV_NET_OPS(ndo_tx_timeout, serv_ops->tx_timeout, 1, 1, 0, 0); \ + NBL_DEV_NET_OPS(ndo_bridge_getlink, serv_ops->bridge_getlink, 1, 0, 0, 0); \ + NBL_DEV_NET_OPS(ndo_bridge_setlink, serv_ops->bridge_setlink, 1, 0, 0, 0); \ + NBL_DEV_NET_OPS(ndo_set_vf_link_state, serv_ops->set_vf_link_state, 1, 0, 0, 0); \ + NBL_DEV_NET_OPS(ndo_set_vf_mac, serv_ops->set_vf_mac, 1, 0, 0, 0); \ + NBL_DEV_NET_OPS(ndo_set_vf_rate, serv_ops->set_vf_rate, 1, 0, 0, 0); \ + NBL_DEV_NET_OPS(ndo_get_vf_config, serv_ops->get_vf_config, 1, 0, 0, 0); \ + NBL_DEV_NET_OPS(ndo_get_vf_stats, serv_ops->get_vf_stats, 1, 0, 0, 0); \ + NBL_DEV_NET_OPS(ndo_select_queue, serv_ops->select_queue, 1, 0, 0, 0); \ +} while (0) + +#define NBL_REP_DEV_NET_OPS_TBL \ +do { \ + NBL_DEV_NET_OPS(ndo_open, serv_ops->rep_netdev_open); \ + NBL_DEV_NET_OPS(ndo_stop, serv_ops->rep_netdev_stop); \ + NBL_DEV_NET_OPS(ndo_start_xmit, serv_ops->rep_start_xmit); \ + NBL_DEV_NET_OPS(ndo_validate_addr, eth_validate_addr); \ + NBL_DEV_NET_OPS(ndo_get_stats64, serv_ops->rep_get_stats64); \ + NBL_DEV_NET_OPS(ndo_set_rx_mode, serv_ops->rep_set_rx_mode); \ + NBL_DEV_NET_OPS(ndo_set_mac_address, serv_ops->rep_set_mac); \ + NBL_DEV_NET_OPS(ndo_vlan_rx_add_vid, serv_ops->rep_rx_add_vid); \ + NBL_DEV_NET_OPS(ndo_vlan_rx_kill_vid, serv_ops->rep_rx_kill_vid); \ + NBL_DEV_NET_OPS(ndo_features_check, serv_ops->features_check); \ +} while (0) + +#define NBL_DEV_NET_COMPAT_OPS_TBL \ +do { \ + NBL_DEV_NET_OPS(ndo_change_mtu, serv_ops->change_mtu, 1, 1, 1, 0);\ + NBL_DEV_NET_OPS(ndo_setup_tc, serv_ops->setup_tc, 1, 1, 1, 0);\ + NBL_DEV_NET_OPS(ndo_set_vf_vlan, serv_ops->set_vf_vlan, 1, 0, 0, 0);\ + NBL_DEV_NET_OPS(ndo_set_vf_trust, serv_ops->set_vf_trust, 1, 0, 0, 0);\ + NBL_DEV_NET_OPS(ndo_get_phys_port_name, serv_ops->get_phys_port_name, 1, 1, 0, 0);\ + NBL_DEV_NET_OPS(ndo_get_port_parent_id, serv_ops->get_port_parent_id, 1, 0, 0, 0);\ +} while (0) + +#define NBL_REP_DEV_NET_COMPAT_OPS_TBL \ +do { \ + NBL_DEV_NET_OPS(ndo_change_mtu, serv_ops->change_rep_mtu); \ + NBL_DEV_NET_OPS(ndo_setup_tc, serv_ops->rep_setup_tc); \ + NBL_DEV_NET_OPS(ndo_get_phys_port_name, serv_ops->rep_get_phys_port_name);\ + NBL_DEV_NET_OPS(ndo_get_port_parent_id, serv_ops->rep_get_port_parent_id);\ +} while (0) + +#define NBL_DEV_NET_XDP_OPS_TBL \ +do { \ + NBL_DEV_NET_OPS(ndo_bpf, serv_ops->set_xdp, 1, 0, 0, 0); \ +} while (0) -static int nbl_dev_netdev_stop(struct net_device *netdev) +static int nbl_dev_setup_netops_leonis(void *priv, struct net_device *netdev, + struct nbl_init_param *param) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + struct net_device_ops *netdev_ops; struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->netdev_stop(netdev); -} - -static netdev_tx_t nbl_dev_start_xmit(struct sk_buff *skb, struct net_device *netdev) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_resource_pt_ops *pt_ops = NBL_DEV_MGT_TO_RES_PT_OPS(dev_mgt); + static bool pf_inited, vf_inited, rep_inited; + bool is_vf = param->caps.is_vf; + bool is_rep = param->is_rep; - return pt_ops->start_xmit(skb, netdev); -} + if (is_rep) { + netdev_ops = &netdev_ops_leonis_rep; -static void nbl_dev_netdev_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + if (!rep_inited) { +#define NBL_DEV_NET_OPS(ops, func) \ + do {netdev_ops->NBL_NAME(ops) = func; ; } while (0) + NBL_REP_DEV_NET_OPS_TBL; +#undef NBL_DEV_NET_OPS - serv_ops->get_stats64(netdev, stats); -} +#define NBL_DEV_NET_OPS(ops, func) \ + do {netdev_ops->NBL_NAME(ops) = func; ; } while (0) + NBL_REP_DEV_NET_COMPAT_OPS_TBL; +#undef NBL_DEV_NET_OPS -static void nbl_dev_netdev_set_rx_mode(struct net_device *netdev) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + rep_inited = true; + } + serv_ops->set_netdev_ops(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + &netdev_ops_leonis_rep, false); + } else if (!is_vf) { + netdev_ops = &netdev_ops_leonis_pf; + + if (!pf_inited) { +#define NBL_DEV_NET_OPS(ops, func, leonis_pf_capable, leonis_vf_capable, bootis_capable, \ + virtio_capable) \ + do {netdev_ops->NBL_NAME(ops) = leonis_pf_capable ? func : NULL; ; } while (0) + NBL_DEV_NET_OPS_TBL; +#undef NBL_DEV_NET_OPS + +#define NBL_DEV_NET_OPS(ops, func, leonis_pf_capable, leonis_vf_capable, bootis_capable, \ + virtio_capable) \ + do {netdev_ops->NBL_NAME(ops) = leonis_pf_capable ? func : NULL; ; } while (0) + NBL_DEV_NET_COMPAT_OPS_TBL; +#undef NBL_DEV_NET_OPS + +#define NBL_DEV_NET_OPS(ops, func, leonis_pf_capable, leonis_vf_capable, bootis_capable, \ + virtio_capable) \ + do {netdev_ops->NBL_NAME(ops) = leonis_pf_capable ? func : NULL; ; } while (0) + NBL_DEV_NET_XDP_OPS_TBL; +#undef NBL_DEV_NET_OPS + + netdev_ops->ndo_start_xmit = pt_ops->start_xmit; + netdev_ops->ndo_xdp_xmit = pt_ops->xdp_xmit; + + pf_inited = true; + } + serv_ops->set_netdev_ops(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + &netdev_ops_leonis_pf, true); + /* set rep_ops first, cuz pf may turn on switch_dev without sriov enabled */ + serv_ops->set_netdev_ops(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + &netdev_ops_leonis_rep, false); + } else { + netdev_ops = &netdev_ops_leonis_vf; + + if (!vf_inited) { +#define NBL_DEV_NET_OPS(ops, func, leonis_pf_capable, leonis_vf_capable, bootis_capable, \ + virtio_capable) \ + do {netdev_ops->NBL_NAME(ops) = leonis_vf_capable ? func : NULL; ; } while (0) + NBL_DEV_NET_OPS_TBL; +#undef NBL_DEV_NET_OPS + +#define NBL_DEV_NET_OPS(ops, func, leonis_pf_capable, leonis_vf_capable, bootis_capable, \ + virtio_capable) \ + do {netdev_ops->NBL_NAME(ops) = leonis_vf_capable ? func : NULL; ; } while (0) + NBL_DEV_NET_COMPAT_OPS_TBL; +#undef NBL_DEV_NET_OPS + + netdev_ops->ndo_start_xmit = pt_ops->start_xmit; + + vf_inited = true; + } + } - serv_ops->set_rx_mode(netdev); + netdev->netdev_ops = netdev_ops; + return 0; } -static void nbl_dev_netdev_change_rx_flags(struct net_device *netdev, int flag) +static void nbl_dev_remove_netops(struct net_device *netdev) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - serv_ops->change_rx_flags(netdev, flag); + netdev->netdev_ops = NULL; } -static int nbl_dev_netdev_set_mac(struct net_device *netdev, void *p) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->set_mac(netdev, p); -} +/* OPS(ops_name, func, leonis_pf_capable, leonis_vf_capable, bootis_capable, virtio_capable) */ +#define NBL_DEV_ETHTOOL_OPS_TBL \ +do { \ + NBL_DEV_ETHTOOL_OPS(get_drvinfo, serv_ops->get_drvinfo, 1, 1, 1, 1); \ + NBL_DEV_ETHTOOL_OPS(get_module_eeprom, serv_ops->get_module_eeprom, 1, 0, 1, 0); \ + NBL_DEV_ETHTOOL_OPS(get_module_info, serv_ops->get_module_info, 1, 0, 1, 0); \ + NBL_DEV_ETHTOOL_OPS(get_eeprom_len, serv_ops->get_eeprom_length, 1, 0, 1, 0); \ + NBL_DEV_ETHTOOL_OPS(get_eeprom, serv_ops->get_eeprom, 1, 0, 1, 0); \ + NBL_DEV_ETHTOOL_OPS(get_strings, serv_ops->get_strings, 1, 1, 1, 1); \ + NBL_DEV_ETHTOOL_OPS(get_sset_count, serv_ops->get_sset_count, 1, 1, 1, 1); \ + NBL_DEV_ETHTOOL_OPS(get_ethtool_stats, serv_ops->get_ethtool_stats, 1, 1, 1, 1); \ + NBL_DEV_ETHTOOL_OPS(get_channels, serv_ops->get_channels, 1, 1, 1, 1); \ + NBL_DEV_ETHTOOL_OPS(set_channels, serv_ops->set_channels, 1, 1, 1, 1); \ + NBL_DEV_ETHTOOL_OPS(get_link, serv_ops->get_link, 1, 1, 1, 1); \ + NBL_DEV_ETHTOOL_OPS(get_link_ksettings, serv_ops->get_ksettings, 1, 1, 1, 0); \ + NBL_DEV_ETHTOOL_OPS(set_link_ksettings, serv_ops->set_ksettings, 1, 0, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(get_ringparam, serv_ops->get_ringparam, 1, 1, 1, 1); \ + NBL_DEV_ETHTOOL_OPS(set_ringparam, serv_ops->set_ringparam, 1, 1, 1, 1); \ + NBL_DEV_ETHTOOL_OPS(get_coalesce, serv_ops->get_coalesce, 1, 1, 1, 0); \ + NBL_DEV_ETHTOOL_OPS(set_coalesce, serv_ops->set_coalesce, 1, 1, 1, 0); \ + NBL_DEV_ETHTOOL_OPS(get_rxnfc, serv_ops->get_rxnfc, 1, 1, 0, 1); \ + NBL_DEV_ETHTOOL_OPS(set_rxnfc, serv_ops->set_rxnfc, 1, 1, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(get_rxfh_indir_size, serv_ops->get_rxfh_indir_size, 1, 1, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(get_rxfh_key_size, serv_ops->get_rxfh_key_size, 1, 1, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(get_rxfh, serv_ops->get_rxfh, 1, 1, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(set_rxfh, serv_ops->set_rxfh, 1, 1, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(get_msglevel, serv_ops->get_msglevel, 1, 1, 1, 1); \ + NBL_DEV_ETHTOOL_OPS(set_msglevel, serv_ops->set_msglevel, 1, 1, 1, 1); \ + NBL_DEV_ETHTOOL_OPS(get_regs_len, serv_ops->get_regs_len, 1, 1, 1, 0); \ + NBL_DEV_ETHTOOL_OPS(get_regs, serv_ops->get_ethtool_dump_regs, 1, 1, 1, 0); \ + NBL_DEV_ETHTOOL_OPS(get_per_queue_coalesce, serv_ops->get_per_queue_coalesce, 1, 1, 0, 0);\ + NBL_DEV_ETHTOOL_OPS(set_per_queue_coalesce, serv_ops->set_per_queue_coalesce, 1, 1, 0, 0);\ + NBL_DEV_ETHTOOL_OPS(self_test, serv_ops->self_test, 1, 0, 1, 0); \ + NBL_DEV_ETHTOOL_OPS(get_priv_flags, serv_ops->get_priv_flags, 1, 0, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(set_priv_flags, serv_ops->set_priv_flags, 1, 0, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(set_pauseparam, serv_ops->set_pause_param, 1, 0, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(get_pauseparam, serv_ops->get_pause_param, 1, 0, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(set_fecparam, serv_ops->set_fec_param, 1, 0, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(get_fecparam, serv_ops->get_fec_param, 1, 0, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(get_ts_info, serv_ops->get_ts_info, 1, 1, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(set_phys_id, serv_ops->set_phys_id, 1, 0, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(nway_reset, serv_ops->nway_reset, 1, 0, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(flash_device, serv_ops->flash_device, 1, 0, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(get_dump_flag, serv_ops->get_dump_flag, 1, 0, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(get_dump_data, serv_ops->get_dump_data, 1, 0, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(set_dump, serv_ops->set_dump, 1, 0, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(set_wol, serv_ops->set_wol, 1, 0, 0, 0); \ + NBL_DEV_ETHTOOL_OPS(get_wol, serv_ops->get_wol, 1, 0, 0, 0); \ +} while (0) + +/* OPS(ops_name, func) */ +#define NBL_REP_DEV_ETHTOOL_OPS_TBL \ +do { \ + NBL_DEV_ETHTOOL_OPS(get_drvinfo, serv_ops->get_drvinfo); \ + NBL_DEV_ETHTOOL_OPS(get_strings, serv_ops->get_rep_strings); \ + NBL_DEV_ETHTOOL_OPS(get_sset_count, serv_ops->get_rep_sset_count); \ + NBL_DEV_ETHTOOL_OPS(get_ethtool_stats, serv_ops->get_rep_ethtool_stats); \ + NBL_DEV_ETHTOOL_OPS(get_link, serv_ops->get_link); \ + NBL_DEV_ETHTOOL_OPS(get_link_ksettings, serv_ops->get_ksettings); \ + NBL_DEV_ETHTOOL_OPS(get_ringparam, serv_ops->get_ringparam); \ +} while (0) -static int nbl_dev_netdev_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +static int nbl_dev_setup_ethtool_ops_leonis(void *priv, struct net_device *netdev, + struct nbl_init_param *param) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + struct ethtool_ops *ethtool_ops; struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + static bool pf_inited, vf_inited, rep_inited; + bool is_vf = param->caps.is_vf; + bool is_rep = param->is_rep; + + if (is_rep) { + ethtool_ops = ðtool_ops_leonis_rep; + + if (!rep_inited) { +#define NBL_DEV_ETHTOOL_OPS(ops, func) \ + do {ethtool_ops->NBL_NAME(ops) = func; ; } while (0) + NBL_REP_DEV_ETHTOOL_OPS_TBL; +#undef NBL_DEV_ETHTOOL_OPS + rep_inited = true; + } + } else if (!is_vf) { + ethtool_ops = ðtool_ops_leonis_pf; - return serv_ops->rx_add_vid(netdev, proto, vid); -} + if (!pf_inited) { +#define NBL_DEV_ETHTOOL_OPS(ops, func, leonis_pf_capable, leonis_vf_capable, bootis_capable, \ + virtio_capable) \ + do {ethtool_ops->NBL_NAME(ops) = leonis_pf_capable ? func : NULL; ; } while (0) + NBL_DEV_ETHTOOL_OPS_TBL; +#undef NBL_DEV_ETHTOOL_OPS -static int nbl_dev_netdev_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + ethtool_ops->get_eth_ctrl_stats = serv_ops->get_eth_ctrl_stats; + ethtool_ops->get_pause_stats = serv_ops->get_pause_stats; - return serv_ops->rx_kill_vid(netdev, proto, vid); -} + ethtool_ops->get_eth_mac_stats = serv_ops->get_eth_mac_stats; -static netdev_features_t -nbl_dev_netdev_features_check(struct sk_buff *skb, struct net_device *netdev, - netdev_features_t features) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + ethtool_ops->get_fec_stats = serv_ops->get_fec_stats; + ethtool_ops->get_link_ext_state = serv_ops->get_link_ext_state; - return serv_ops->features_check(skb, netdev, features); -} + ethtool_ops->get_link_ext_stats = serv_ops->get_link_ext_stats; + ethtool_ops->get_rmon_stats = serv_ops->get_rmon_stats; + pf_inited = true; + } + } else { + ethtool_ops = ðtool_ops_leonis_vf; + + if (!vf_inited) { +#define NBL_DEV_ETHTOOL_OPS(ops, func, leonis_pf_capable, leonis_vf_capable, bootis_capable, \ + virtio_capable) \ + do {ethtool_ops->NBL_NAME(ops) = leonis_vf_capable ? func : NULL; ; } while (0) + NBL_DEV_ETHTOOL_OPS_TBL; +#undef NBL_DEV_ETHTOOL_OPS + vf_inited = true; + } + } -static void nbl_dev_netdev_tx_timeout(struct net_device *netdev, u32 txqueue) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - serv_ops->tx_timeout(netdev, txqueue); -} + ethtool_ops->supported_coalesce_params = + ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_RX_MAX_FRAMES | + ETHTOOL_COALESCE_TX_USECS | ETHTOOL_COALESCE_TX_MAX_FRAMES; -static int nbl_dev_netdev_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_ITR_DYNAMIC)) + ethtool_ops->supported_coalesce_params |= + ETHTOOL_COALESCE_USE_ADAPTIVE; - return serv_ops->change_mtu(netdev, new_mtu); + netdev->ethtool_ops = ethtool_ops; + return 0; } -static int nbl_dev_ndo_get_phys_port_name(struct net_device *netdev, char *name, size_t len) +static void nbl_dev_remove_ethtool_ops(struct net_device *netdev) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_phys_port_name(netdev, name, len); + netdev->ethtool_ops = NULL; } -static int -nbl_dev_ndo_get_port_parent_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) +#define NBL_DEV_KTLS_OPS_TBL \ +do { \ + NBL_DEV_KTLS_OPS(tls_dev_add, serv_ops->add_tls_dev); \ + NBL_DEV_KTLS_OPS(tls_dev_del, serv_ops->del_tls_dev); \ + NBL_DEV_KTLS_OPS(tls_dev_resync, serv_ops->resync_tls_dev); \ +} while (0) + +static int nbl_dev_setup_ktls_ops(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + static bool ktls_inited; - return serv_ops->get_port_parent_id(netdev, ppid); -} - -static const struct net_device_ops netdev_ops_leonis_pf = { - .ndo_open = nbl_dev_netdev_open, - .ndo_stop = nbl_dev_netdev_stop, - .ndo_start_xmit = nbl_dev_start_xmit, - .ndo_validate_addr = eth_validate_addr, - .ndo_get_stats64 = nbl_dev_netdev_get_stats64, - .ndo_set_rx_mode = nbl_dev_netdev_set_rx_mode, - .ndo_change_rx_flags = nbl_dev_netdev_change_rx_flags, - .ndo_set_mac_address = nbl_dev_netdev_set_mac, - .ndo_vlan_rx_add_vid = nbl_dev_netdev_rx_add_vid, - .ndo_vlan_rx_kill_vid = nbl_dev_netdev_rx_kill_vid, - .ndo_features_check = nbl_dev_netdev_features_check, - .ndo_tx_timeout = nbl_dev_netdev_tx_timeout, - .ndo_change_mtu = nbl_dev_netdev_change_mtu, - .ndo_get_phys_port_name = nbl_dev_ndo_get_phys_port_name, - .ndo_get_port_parent_id = nbl_dev_ndo_get_port_parent_id, -}; + if (!serv_ops->get_product_flex_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_SECURITY_ACCEL_CAP)) + return 0; -static int nbl_dev_setup_netops_leonis(void *priv, struct net_device *netdev, - struct nbl_init_param *param) -{ - netdev->netdev_ops = &netdev_ops_leonis_pf; + if (!ktls_inited) { +#define NBL_DEV_KTLS_OPS(ops, func) \ + do {ktls_ops.NBL_NAME(ops) = func; ; } while (0) + NBL_DEV_KTLS_OPS_TBL; +#undef NBL_DEV_KTLS_OPS + ktls_inited = true; + } + netdev->hw_features |= NETIF_F_HW_TLS_RX; + netdev->hw_features |= NETIF_F_HW_TLS_TX; + netdev->tlsdev_ops = &ktls_ops; return 0; } -static void nbl_dev_remove_netops(struct net_device *netdev) -{ - netdev->netdev_ops = NULL; -} - -static void nbl_dev_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +static void nbl_dev_remove_ktls_ops(struct net_device *netdev) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + netdev->hw_features &= ~NETIF_F_HW_TLS_RX; + netdev->hw_features &= ~NETIF_F_HW_TLS_TX; - serv_ops->get_drvinfo(netdev, drvinfo); + netdev->tlsdev_ops = NULL; } -static int nbl_dev_get_module_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *data) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_module_eeprom(netdev, eeprom, data); -} +#define NBL_DEV_XFRM_OPS_TBL \ +do { \ + NBL_DEV_XFRM_OPS(xdo_dev_state_add, serv_ops->add_xdo_dev_state); \ + NBL_DEV_XFRM_OPS(xdo_dev_state_delete, serv_ops->delete_xdo_dev_state); \ + NBL_DEV_XFRM_OPS(xdo_dev_state_free, serv_ops->free_xdo_dev_state); \ + NBL_DEV_XFRM_OPS(xdo_dev_offload_ok, serv_ops->xdo_dev_offload_ok); \ + NBL_DEV_XFRM_OPS(xdo_dev_state_advance_esn, serv_ops->xdo_dev_state_advance_esn); \ +} while (0) -static int nbl_dev_get_module_info(struct net_device *netdev, struct ethtool_modinfo *info) +static int nbl_dev_setup_xfrm_ops(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + static bool xfrm_inited; + enum nbl_flex_cap_type cap_type = NBL_SECURITY_ACCEL_CAP; - return serv_ops->get_module_info(netdev, info); -} - -static int nbl_dev_get_eeprom_len(struct net_device *netdev) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + if (!serv_ops->get_product_flex_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), cap_type)) + return 0; + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_MSG_NOTIFY_IPSEC_HARD_EXPIRE, + nbl_dev_notify_ipsec_hard_expire, dev_mgt); + + if (!xfrm_inited) { +#define NBL_DEV_XFRM_OPS(ops, func) \ + do {xfrm_ops.NBL_NAME(ops) = func; ; } while (0) + NBL_DEV_XFRM_OPS_TBL; +#undef NBL_DEV_XFRM_OPS + xfrm_inited = true; + } - return serv_ops->get_eeprom_length(netdev); -} + netdev->features |= NETIF_F_HW_ESP; + netdev->hw_enc_features |= NETIF_F_HW_ESP; + netdev->features |= NETIF_F_HW_ESP_TX_CSUM; + netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM; -static int nbl_dev_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + /* gso_partial_features */ + netdev->gso_partial_features |= NETIF_F_GSO_ESP; + netdev->features |= NETIF_F_GSO_ESP; + netdev->hw_features |= NETIF_F_GSO_ESP; + netdev->hw_enc_features |= NETIF_F_GSO_ESP; - return serv_ops->get_eeprom(netdev, eeprom, bytes); + netdev->xfrmdev_ops = &xfrm_ops; + return 0; } -static void nbl_dev_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +static void nbl_dev_remove_xfrm_ops(struct net_device *netdev) +{ + netdev->features &= ~NETIF_F_HW_ESP; + netdev->hw_enc_features &= ~NETIF_F_HW_ESP; + netdev->features &= ~NETIF_F_HW_ESP_TX_CSUM; + netdev->hw_enc_features &= ~NETIF_F_HW_ESP_TX_CSUM; + + /* gso_partial_features */ + netdev->gso_partial_features &= ~NETIF_F_GSO_ESP; + netdev->features &= ~NETIF_F_GSO_ESP; + netdev->hw_features &= ~NETIF_F_GSO_ESP; + netdev->hw_enc_features &= ~NETIF_F_GSO_ESP; + + netdev->xfrmdev_ops = NULL; +} + +/* OPS(ops_name, func, leonis_pf_capable, leonis_vf_capable, bootis_capable, virtio_capable) */ +#define NBL_DEV_DCBNL_OPS_TBL \ +do { \ + NBL_DEV_DCBNL_OPS(ieee_setets, serv_ops->ieee_setets, 1, 0, 0, 0); \ + NBL_DEV_DCBNL_OPS(ieee_getets, serv_ops->ieee_getets, 1, 0, 0, 0); \ + NBL_DEV_DCBNL_OPS(ieee_setpfc, serv_ops->ieee_setpfc, 1, 0, 0, 0); \ + NBL_DEV_DCBNL_OPS(ieee_getpfc, serv_ops->ieee_getpfc, 1, 0, 0, 0); \ + NBL_DEV_DCBNL_OPS(ieee_setapp, serv_ops->ieee_setapp, 1, 0, 0, 0); \ + NBL_DEV_DCBNL_OPS(ieee_delapp, serv_ops->ieee_delapp, 1, 0, 0, 0); \ + NBL_DEV_DCBNL_OPS(getdcbx, serv_ops->ieee_getdcbx, 1, 0, 0, 0); \ + NBL_DEV_DCBNL_OPS(setdcbx, serv_ops->ieee_setdcbx, 1, 0, 0, 0); \ + NBL_DEV_DCBNL_OPS(getnumtcs, serv_ops->dcbnl_getnumtcs, 1, 0, 0, 0); \ + NBL_DEV_DCBNL_OPS(setpfccfg, serv_ops->dcbnl_setpfccfg, 1, 0, 0, 0); \ + NBL_DEV_DCBNL_OPS(getpfccfg, serv_ops->dcbnl_getpfccfg, 1, 0, 0, 0); \ + NBL_DEV_DCBNL_OPS(getstate, serv_ops->dcbnl_getstate, 1, 0, 0, 0); \ + NBL_DEV_DCBNL_OPS(setstate, serv_ops->dcbnl_setstate, 1, 0, 0, 0); \ + NBL_DEV_DCBNL_OPS(getpfcstate, serv_ops->dcbnl_getpfcstate, 1, 0, 0, 0); \ + NBL_DEV_DCBNL_OPS(getcap, serv_ops->dcbnl_getcap, 1, 0, 0, 0); \ +} while (0) + +static int nbl_dev_setup_dcbnl_ops_leonis(void *priv, struct net_device *netdev, + struct nbl_init_param *param) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + struct dcbnl_rtnl_ops *dcbnl_ops; struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + static bool pf_inited, vf_inited; + bool is_vf = param->caps.is_vf; + + if (!is_vf) { + dcbnl_ops = &dcbnl_ops_leonis_pf; + + if (!pf_inited) { +#define NBL_DEV_DCBNL_OPS(ops, func, leonis_pf_capable, leonis_vf_capable, bootis_capable, \ + virtio_capable) \ + do {dcbnl_ops->NBL_NAME(ops) = leonis_pf_capable ? func : NULL; ; } while (0) + NBL_DEV_DCBNL_OPS_TBL; +#undef NBL_DEV_DCBNL_OPS + pf_inited = true; + } + } else { + dcbnl_ops = &dcbnl_ops_leonis_vf; + + if (!vf_inited) { +#define NBL_DEV_DCBNL_OPS(ops, func, leonis_pf_capable, leonis_vf_capable, bootis_capable, \ + virtio_capable) \ + do {dcbnl_ops->NBL_NAME(ops) = leonis_vf_capable ? func : NULL; ; } while (0) + NBL_DEV_DCBNL_OPS_TBL; +#undef NBL_DEV_DCBNL_OPS + vf_inited = true; + } + } + + netdev->dcbnl_ops = dcbnl_ops; - serv_ops->get_strings(netdev, stringset, data); + return 0; } -static int nbl_dev_get_sset_count(struct net_device *netdev, int sset) +static void nbl_dev_remove_dcbnl_ops(struct net_device *netdev) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_sset_count(netdev, sset); + netdev->dcbnl_ops = NULL; } -static void nbl_dev_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) +static void nbl_dev_set_eth_mac_addr(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + u8 mac[ETH_ALEN]; - serv_ops->get_ethtool_stats(netdev, stats, data); + ether_addr_copy(mac, netdev->dev_addr); + serv_ops->set_eth_mac_addr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + mac, NBL_COMMON_TO_ETH_ID(common)); } -static void nbl_dev_get_channels(struct net_device *netdev, struct ethtool_channels *channels) +static int nbl_dev_cfg_netdev(struct net_device *netdev, struct nbl_dev_mgt *dev_mgt, + struct nbl_init_param *param, + struct nbl_register_net_result *register_result) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_net_ops *net_dev_ops = NBL_DEV_MGT_TO_NETDEV_OPS(dev_mgt); + u64 vlan_features = 0; + int ret = 0; - serv_ops->get_channels(netdev, channels); -} + if (param->pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + if (!param->is_rep) + netdev->watchdog_timeo = 5 * HZ; -static int nbl_dev_set_channels(struct net_device *netdev, struct ethtool_channels *channels) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + vlan_features = register_result->vlan_features ? register_result->vlan_features + : register_result->features; + netdev->hw_features |= nbl_features_to_netdev_features(register_result->hw_features); + netdev->features |= nbl_features_to_netdev_features(register_result->features); + netdev->vlan_features |= nbl_features_to_netdev_features(vlan_features); + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT; - return serv_ops->set_channels(netdev, channels); -} - -static u32 nbl_dev_get_link(struct net_device *netdev) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_link(netdev); -} - -static int -nbl_dev_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_ksettings(netdev, cmd); -} - -static int -nbl_dev_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->set_ksettings(netdev, cmd); -} - -static void nbl_dev_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam, - struct kernel_ethtool_ringparam *k_ringparam, - struct netlink_ext_ack *extack) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - serv_ops->get_ringparam(netdev, ringparam, k_ringparam, extack); -} - -static int nbl_dev_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam, - struct kernel_ethtool_ringparam *k_ringparam, - struct netlink_ext_ack *extack) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->set_ringparam(netdev, ringparam, k_ringparam, extack); -} - -static int nbl_dev_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, - struct kernel_ethtool_coalesce *kernel_ec, - struct netlink_ext_ack *extack) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_coalesce(netdev, ec, kernel_ec, extack); -} - -static int nbl_dev_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, - struct kernel_ethtool_coalesce *kernel_ec, - struct netlink_ext_ack *extack) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->set_coalesce(netdev, ec, kernel_ec, extack); -} - -static int nbl_dev_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_rxnfc(netdev, cmd, rule_locs); -} - -static u32 nbl_dev_get_rxfh_indir_size(struct net_device *netdev) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_rxfh_indir_size(netdev); -} - -static u32 nbl_dev_get_rxfh_key_size(struct net_device *netdev) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_rxfh_key_size(netdev); -} - -static int nbl_dev_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_rxfh(netdev, indir, key, hfunc); -} - -static u32 nbl_dev_get_msglevel(struct net_device *netdev) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_msglevel(netdev); -} - -static void nbl_dev_set_msglevel(struct net_device *netdev, u32 msglevel) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - serv_ops->set_msglevel(netdev, msglevel); -} - -static int nbl_dev_get_regs_len(struct net_device *netdev) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_regs_len(netdev); -} - -static void nbl_dev_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - serv_ops->get_ethtool_dump_regs(netdev, regs, p); -} - -static int nbl_dev_get_per_queue_coalesce(struct net_device *netdev, - u32 q_num, struct ethtool_coalesce *ec) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_per_queue_coalesce(netdev, q_num, ec); -} - -static int nbl_dev_set_per_queue_coalesce(struct net_device *netdev, - u32 q_num, struct ethtool_coalesce *ec) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->set_per_queue_coalesce(netdev, q_num, ec); -} - -static void nbl_dev_self_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - serv_ops->self_test(netdev, eth_test, data); -} - -static u32 nbl_dev_get_priv_flags(struct net_device *netdev) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_priv_flags(netdev); -} - -static int nbl_dev_set_priv_flags(struct net_device *netdev, u32 priv_flags) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->set_priv_flags(netdev, priv_flags); -} - -static int nbl_dev_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->set_pause_param(netdev, param); -} - -static void nbl_dev_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - serv_ops->get_pause_param(netdev, param); -} - -static int nbl_dev_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fec) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->set_fec_param(netdev, fec); -} - -static int nbl_dev_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fec) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_fec_param(netdev, fec); -} - -static int nbl_dev_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *ts_info) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->get_ts_info(netdev, ts_info); -} - -static int nbl_dev_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->set_phys_id(netdev, state); -} - -static int nbl_dev_nway_reset(struct net_device *netdev) -{ - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - return serv_ops->nway_reset(netdev); -} - -static const struct ethtool_ops ethtool_ops_leonis_pf = { - .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | - ETHTOOL_COALESCE_RX_MAX_FRAMES | - ETHTOOL_COALESCE_TX_USECS | - ETHTOOL_COALESCE_TX_MAX_FRAMES | - ETHTOOL_COALESCE_USE_ADAPTIVE, - .get_drvinfo = nbl_dev_get_drvinfo, - .get_module_eeprom = nbl_dev_get_module_eeprom, - .get_module_info = nbl_dev_get_module_info, - .get_eeprom_len = nbl_dev_get_eeprom_len, - .get_eeprom = nbl_dev_get_eeprom, - .get_strings = nbl_dev_get_strings, - .get_sset_count = nbl_dev_get_sset_count, - .get_ethtool_stats = nbl_dev_get_ethtool_stats, - .get_channels = nbl_dev_get_channels, - .set_channels = nbl_dev_set_channels, - .get_link = nbl_dev_get_link, - .get_link_ksettings = nbl_dev_get_link_ksettings, - .set_link_ksettings = nbl_dev_set_link_ksettings, - .get_ringparam = nbl_dev_get_ringparam, - .set_ringparam = nbl_dev_set_ringparam, - .get_coalesce = nbl_dev_get_coalesce, - .set_coalesce = nbl_dev_set_coalesce, - .get_rxnfc = nbl_dev_get_rxnfc, - .get_rxfh_indir_size = nbl_dev_get_rxfh_indir_size, - .get_rxfh_key_size = nbl_dev_get_rxfh_key_size, - .get_rxfh = nbl_dev_get_rxfh, - .get_msglevel = nbl_dev_get_msglevel, - .set_msglevel = nbl_dev_set_msglevel, - .get_regs_len = nbl_dev_get_regs_len, - .get_regs = nbl_dev_get_regs, - .get_per_queue_coalesce = nbl_dev_get_per_queue_coalesce, - .set_per_queue_coalesce = nbl_dev_set_per_queue_coalesce, - .self_test = nbl_dev_self_test, - .get_priv_flags = nbl_dev_get_priv_flags, - .set_priv_flags = nbl_dev_set_priv_flags, - .set_pauseparam = nbl_dev_set_pauseparam, - .get_pauseparam = nbl_dev_get_pauseparam, - .set_fecparam = nbl_dev_set_fecparam, - .get_fecparam = nbl_dev_get_fecparam, - .get_ts_info = nbl_dev_get_ts_info, - .set_phys_id = nbl_dev_set_phys_id, - .nway_reset = nbl_dev_nway_reset, -}; - -static int nbl_dev_setup_ethtool_ops_leonis(void *priv, struct net_device *netdev, - struct nbl_init_param *param) -{ - netdev->ethtool_ops = ðtool_ops_leonis_pf; - - return 0; -} - -static void nbl_dev_remove_ethtool_ops(struct net_device *netdev) -{ - netdev->ethtool_ops = NULL; -} - -static void nbl_dev_set_eth_mac_addr(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev) -{ - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - u8 mac[ETH_ALEN]; - - ether_addr_copy(mac, netdev->dev_addr); - serv_ops->set_eth_mac_addr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - mac, NBL_COMMON_TO_ETH_ID(common)); -} - -static int nbl_dev_cfg_netdev(struct net_device *netdev, struct nbl_dev_mgt *dev_mgt, - struct nbl_init_param *param, - struct nbl_register_net_result *register_result) -{ - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - struct nbl_dev_net_ops *net_dev_ops = NBL_DEV_MGT_TO_NETDEV_OPS(dev_mgt); - int ret = 0; - - if (param->pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; - - netdev->hw_features |= nbl_features_to_netdev_features(register_result->hw_features); - netdev->features |= nbl_features_to_netdev_features(register_result->features); - netdev->vlan_features |= netdev->features; + netdev->priv_flags |= IFF_UNICAST_FLT; SET_DEV_MIN_MTU(netdev, ETH_MIN_MTU); SET_DEV_MAX_MTU(netdev, register_result->max_mtu); netdev->mtu = min_t(u16, register_result->max_mtu, NBL_DEFAULT_MTU); + serv_ops->change_mtu(netdev, netdev->mtu); if (is_valid_ether_addr(register_result->mac)) eth_hw_addr_set(netdev, register_result->mac); @@ -1752,10 +2298,29 @@ static int nbl_dev_cfg_netdev(struct net_device *netdev, struct nbl_dev_mgt *dev if (ret) goto set_ethtool_fail; + ret = net_dev_ops->setup_dcbnl_ops(dev_mgt, netdev, param); + if (ret) + goto set_dcbnl_fail; + + if (!param->is_rep) { + ret = nbl_dev_setup_ktls_ops(dev_mgt, netdev); + if (ret) + goto set_ktls_fail; + + ret = nbl_dev_setup_xfrm_ops(dev_mgt, netdev); + if (ret) + goto set_xfrm_fail; + } nbl_dev_set_eth_mac_addr(dev_mgt, netdev); return 0; +set_xfrm_fail: + nbl_dev_remove_ktls_ops(netdev); +set_ktls_fail: + nbl_dev_remove_dcbnl_ops(netdev); +set_dcbnl_fail: + nbl_dev_remove_ethtool_ops(netdev); set_ethtool_fail: nbl_dev_remove_netops(netdev); set_ops_fail: @@ -1764,6 +2329,9 @@ static int nbl_dev_cfg_netdev(struct net_device *netdev, struct nbl_dev_mgt *dev static void nbl_dev_reset_netdev(struct net_device *netdev) { + nbl_dev_remove_ktls_ops(netdev); + nbl_dev_remove_xfrm_ops(netdev); + nbl_dev_remove_dcbnl_ops(netdev); nbl_dev_remove_ethtool_ops(netdev); nbl_dev_remove_netops(netdev); } @@ -1771,6 +2339,7 @@ static void nbl_dev_reset_netdev(struct net_device *netdev) static int nbl_dev_register_net(struct nbl_dev_mgt *dev_mgt, struct nbl_register_net_result *register_result) { + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct pci_dev *pdev = NBL_COMMON_TO_PDEV(NBL_DEV_MGT_TO_COMMON(dev_mgt)); #ifdef CONFIG_PCI_IOV @@ -1779,7 +2348,7 @@ static int nbl_dev_register_net(struct nbl_dev_mgt *dev_mgt, u16 pf_bdf; u64 pf_bar_start; u64 vf_bar_start, vf_bar_size; - u16 total_vfs, offset, stride; + u16 total_vfs = 0, offset, stride; int pos; u32 val; struct nbl_register_net_param register_param = {0}; @@ -1804,6 +2373,7 @@ static int nbl_dev_register_net(struct nbl_dev_mgt *dev_mgt, vf_bar_start = (u64)(val & PCI_BASE_ADDRESS_MEM_MASK); pci_read_config_dword(pdev, pos + PCI_SRIOV_BAR + 4, &val); vf_bar_start |= ((u64)val << 32); + #ifdef CONFIG_PCI_IOV res = &pdev->resource[PCI_IOV_RESOURCES]; vf_bar_size = resource_size(res); @@ -1820,6 +2390,8 @@ static int nbl_dev_register_net(struct nbl_dev_mgt *dev_mgt, } } + net_dev->total_vfs = total_vfs; + ret = serv_ops->register_net(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), ®ister_param, register_result); @@ -1829,17 +2401,37 @@ static int nbl_dev_register_net(struct nbl_dev_mgt *dev_mgt, return ret; } -static void nbl_dev_unregister_net(struct nbl_adapter *adapter) +static void nbl_dev_unregister_net(struct nbl_dev_mgt *dev_mgt) { - struct nbl_service_ops_tbl *serv_ops_tbl = NBL_ADAPTER_TO_SERV_OPS_TBL(adapter); - struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); int ret; - ret = serv_ops_tbl->ops->unregister_net(serv_ops_tbl->priv); + ret = serv_ops->unregister_net(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); if (ret) dev_err(dev, "unregister net failed\n"); } +static void nbl_dev_get_rep_feature(struct nbl_adapter *adapter, + struct nbl_register_net_result *register_result) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_rep_feature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), register_result); +} + +static void nbl_dev_get_rep_queue_num(struct nbl_adapter *adapter, + u8 *base_queue_id, + u8 *rep_queue_num) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_rep_queue_num(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + base_queue_id, rep_queue_num); +} + static u16 nbl_dev_vsi_alloc_queue(struct nbl_dev_net *net_dev, u16 queue_num) { struct nbl_dev_vsi_controller *vsi_ctrl = &net_dev->vsi_ctrl; @@ -1857,15 +2449,19 @@ static u16 nbl_dev_vsi_alloc_queue(struct nbl_dev_net *net_dev, u16 queue_num) static int nbl_dev_vsi_common_setup(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, struct nbl_dev_vsi *vsi) { - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); int ret = 0; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_vsi_param vsi_param = {0}; vsi->queue_offset = nbl_dev_vsi_alloc_queue(NBL_DEV_MGT_TO_NET_DEV(dev_mgt), vsi->queue_num); + vsi_param.index = vsi->index; + vsi_param.vsi_id = vsi->vsi_id; + vsi_param.queue_offset = vsi->queue_offset; + vsi_param.queue_num = vsi->queue_num; /* Tell serv & res layer the mapping from vsi to queue_id */ - ret = serv_ops->register_vsi_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index, - vsi->vsi_id, vsi->queue_offset, vsi->queue_num); + ret = serv_ops->register_vsi_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &vsi_param); return ret; } @@ -1890,19 +2486,36 @@ static int nbl_dev_vsi_common_start(struct nbl_dev_mgt *dev_mgt, struct net_devi ret = serv_ops->setup_rss(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); if (ret) { - dev_err(dev, "Setup q2vsi failed\n"); + dev_err(dev, "Setup rss failed\n"); goto set_rss_fail; } - ret = serv_ops->enable_napis(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); + ret = serv_ops->setup_rss_indir(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); if (ret) { - dev_err(dev, "Enable napis failed\n"); - goto enable_napi_fail; + dev_err(dev, "Setup rss indir failed\n"); + goto setup_rss_indir_fail; } - return 0; - -enable_napi_fail: + if (vsi->use_independ_irq) { + ret = serv_ops->enable_napis(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); + if (ret) { + dev_err(dev, "Enable napis failed\n"); + goto enable_napi_fail; + } + } + + ret = serv_ops->init_tx_rate(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); + if (ret) { + dev_err(dev, "init tx_rate failed\n"); + goto init_tx_rate_fail; + } + + return 0; + +init_tx_rate_fail: + serv_ops->disable_napis(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); +enable_napi_fail: +setup_rss_indir_fail: serv_ops->remove_rss(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); set_rss_fail: serv_ops->remove_q2vsi(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); @@ -1914,7 +2527,8 @@ static void nbl_dev_vsi_common_stop(struct nbl_dev_mgt *dev_mgt, struct nbl_dev_ { struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - serv_ops->disable_napis(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); + if (vsi->use_independ_irq) + serv_ops->disable_napis(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); serv_ops->remove_rss(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); serv_ops->remove_q2vsi(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); } @@ -1954,16 +2568,20 @@ static void nbl_dev_vsi_data_remove(struct nbl_dev_mgt *dev_mgt, void *vsi_data) nbl_dev_vsi_common_remove(dev_mgt, vsi); } -static int nbl_dev_vsi_data_start(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, +static int nbl_dev_vsi_data_start(void *dev_priv, struct net_device *netdev, void *vsi_data) { + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)dev_priv; struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; int ret; + u16 vid; - ret = serv_ops->start_net_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, vsi->vsi_id); + vid = vsi->register_result.vlan_tci & VLAN_VID_MASK; + ret = serv_ops->start_net_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, vsi->vsi_id, vid, + vsi->register_result.trusted); if (ret) { dev_err(dev, "Set netdev flow table failed\n"); goto set_flow_fail; @@ -1975,17 +2593,7 @@ static int nbl_dev_vsi_data_start(struct nbl_dev_mgt *dev_mgt, struct net_device dev_err(dev, "Set netdev lldp flow failed\n"); goto set_lldp_fail; } - vsi->feature.has_lldp = true; - - ret = serv_ops->enable_lag_protocol(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - vsi->vsi_id, true); - if (ret) { - dev_err(dev, "Set netdev lacp flow failed\n"); - goto set_lacp_fail; - } - - vsi->feature.has_lacp = true; } ret = nbl_dev_vsi_common_start(dev_mgt, netdev, vsi); @@ -1997,10 +2605,6 @@ static int nbl_dev_vsi_data_start(struct nbl_dev_mgt *dev_mgt, struct net_device return 0; common_start_fail: - if (!NBL_COMMON_TO_VF_CAP(common)) - serv_ops->enable_lag_protocol(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id, - false); -set_lacp_fail: if (!NBL_COMMON_TO_VF_CAP(common)) serv_ops->remove_lldp_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); set_lldp_fail: @@ -2009,8 +2613,9 @@ static int nbl_dev_vsi_data_start(struct nbl_dev_mgt *dev_mgt, struct net_device return ret; } -static void nbl_dev_vsi_data_stop(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +static void nbl_dev_vsi_data_stop(void *dev_priv, void *vsi_data) { + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)dev_priv; struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; @@ -2020,9 +2625,6 @@ static void nbl_dev_vsi_data_stop(struct nbl_dev_mgt *dev_mgt, void *vsi_data) if (!NBL_COMMON_TO_VF_CAP(common)) { serv_ops->remove_lldp_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); vsi->feature.has_lldp = false; - serv_ops->enable_lag_protocol(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id, - false); - vsi->feature.has_lacp = false; } serv_ops->stop_net_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); @@ -2049,6 +2651,10 @@ static int nbl_dev_vsi_ctrl_register(struct nbl_dev_mgt *dev_mgt, struct nbl_ini { struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_rep_queue_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + &vsi->queue_num, &vsi->queue_size); nbl_debug(common, NBL_DEBUG_VSI, "Ctrl vsi register, queue_num %d, queue_size %d", vsi->queue_num, vsi->queue_size); @@ -2070,9 +2676,10 @@ static void nbl_dev_vsi_ctrl_remove(struct nbl_dev_mgt *dev_mgt, void *vsi_data) nbl_dev_vsi_common_remove(dev_mgt, vsi); } -static int nbl_dev_vsi_ctrl_start(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, +static int nbl_dev_vsi_ctrl_start(void *dev_priv, struct net_device *netdev, void *vsi_data) { + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)dev_priv; struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); int ret = 0; @@ -2095,8 +2702,9 @@ static int nbl_dev_vsi_ctrl_start(struct nbl_dev_mgt *dev_mgt, struct net_device return ret; } -static void nbl_dev_vsi_ctrl_stop(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +static void nbl_dev_vsi_ctrl_stop(void *dev_priv, void *vsi_data) { + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)dev_priv; struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); @@ -2121,8 +2729,9 @@ static int nbl_dev_vsi_user_register(struct nbl_dev_mgt *dev_mgt, struct nbl_ini struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - serv_ops->get_user_queue_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &vsi->queue_num, - &vsi->queue_size, NBL_COMMON_TO_VSI_ID(common)); + serv_ops->get_user_queue_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + &vsi->queue_num, &vsi->queue_size, + NBL_COMMON_TO_VSI_ID(common)); nbl_debug(common, NBL_DEBUG_VSI, "User vsi register, queue_num %d, queue_size %d", vsi->queue_num, vsi->queue_size); @@ -2144,13 +2753,13 @@ static void nbl_dev_vsi_user_remove(struct nbl_dev_mgt *dev_mgt, void *vsi_data) nbl_dev_vsi_common_remove(dev_mgt, vsi); } -static int nbl_dev_vsi_user_start(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, +static int nbl_dev_vsi_user_start(void *dev_priv, struct net_device *netdev, void *vsi_data) { return 0; } -static void nbl_dev_vsi_user_stop(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +static void nbl_dev_vsi_user_stop(void *dev_priv, void *vsi_data) { } @@ -2163,6 +2772,87 @@ static int nbl_dev_vsi_user_netdev_build(struct nbl_dev_mgt *dev_mgt, static void nbl_dev_vsi_user_netdev_destroy(struct nbl_dev_mgt *dev_mgt, void *vsi_data) { + /* nothing need to do */ +} + +static int nbl_dev_vsi_xdp_register(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + if (!serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_XDP_CAP)) + return 0; + + serv_ops->get_xdp_queue_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + &vsi->queue_num, &vsi->queue_size, + NBL_COMMON_TO_VSI_ID(common)); + + nbl_debug(common, NBL_DEBUG_VSI, "Xdp vsi register, queue_num %d, queue_size %d", + vsi->queue_num, vsi->queue_size); + return 0; +} + +static int nbl_dev_vsi_xdp_setup(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + return nbl_dev_vsi_common_setup(dev_mgt, param, vsi); +} + +static void nbl_dev_vsi_xdp_remove(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + nbl_dev_vsi_common_remove(dev_mgt, vsi); +} + +static int nbl_dev_vsi_xdp_start(void *dev_priv, struct net_device *netdev, + void *vsi_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)dev_priv; + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + int ret = 0; + + ret = nbl_dev_vsi_common_start(dev_mgt, netdev, vsi); + if (ret) + goto start_fail; + + ret = serv_ops->vsi_open(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, + vsi->index, vsi->queue_num, 1); + if (ret) + goto open_fail; + + return ret; + +open_fail: + nbl_dev_vsi_common_stop(dev_mgt, vsi); +start_fail: + return ret; +} + +static void nbl_dev_vsi_xdp_stop(void *dev_priv, void *vsi_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)dev_priv; + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->vsi_stop(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); + nbl_dev_vsi_common_stop(dev_mgt, vsi); +} + +static int nbl_dev_vsi_xdp_netdev_build(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + struct net_device *netdev, void *vsi_data) +{ + return 0; +} + +static void nbl_dev_vsi_xdp_netdev_destroy(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ + /* nothing need to do */ } static struct nbl_dev_vsi_tbl vsi_tbl[NBL_VSI_MAX] = { @@ -2179,6 +2869,8 @@ static struct nbl_dev_vsi_tbl vsi_tbl[NBL_VSI_MAX] = { .vf_support = true, .only_nic_support = false, .in_kernel = true, + .use_independ_irq = true, + .static_queue = true, }, [NBL_VSI_CTRL] = { .vsi_ops = { @@ -2193,6 +2885,8 @@ static struct nbl_dev_vsi_tbl vsi_tbl[NBL_VSI_MAX] = { .vf_support = false, .only_nic_support = true, .in_kernel = true, + .use_independ_irq = true, + .static_queue = true, }, [NBL_VSI_USER] = { .vsi_ops = { @@ -2207,6 +2901,24 @@ static struct nbl_dev_vsi_tbl vsi_tbl[NBL_VSI_MAX] = { .vf_support = false, .only_nic_support = true, .in_kernel = false, + .use_independ_irq = false, + .static_queue = false, + }, + [NBL_VSI_XDP] = { + .vsi_ops = { + .register_vsi = nbl_dev_vsi_xdp_register, + .setup = nbl_dev_vsi_xdp_setup, + .remove = nbl_dev_vsi_xdp_remove, + .start = nbl_dev_vsi_xdp_start, + .stop = nbl_dev_vsi_xdp_stop, + .netdev_build = nbl_dev_vsi_xdp_netdev_build, + .netdev_destroy = nbl_dev_vsi_xdp_netdev_destroy, + }, + .vf_support = false, + .only_nic_support = true, + .in_kernel = true, + .use_independ_irq = false, + .static_queue = false, }, }; @@ -2234,7 +2946,8 @@ static int nbl_dev_vsi_build(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param vsi->vsi_id = serv_ops->get_vsi_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), 0, i); vsi->index = i; vsi->in_kernel = vsi_tbl[i].in_kernel; - + vsi->use_independ_irq = vsi_tbl[i].use_independ_irq; + vsi->static_queue = vsi_tbl[i].static_queue; net_dev->vsi_ctrl.vsi_list[i] = vsi; } @@ -2261,7 +2974,7 @@ static void nbl_dev_vsi_destroy(struct nbl_dev_mgt *dev_mgt) } } -static struct nbl_dev_vsi *nbl_dev_vsi_select(struct nbl_dev_mgt *dev_mgt) +struct nbl_dev_vsi *nbl_dev_vsi_select(struct nbl_dev_mgt *dev_mgt, u8 vsi_index) { struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); struct nbl_dev_vsi *vsi = NULL; @@ -2269,49 +2982,33 @@ static struct nbl_dev_vsi *nbl_dev_vsi_select(struct nbl_dev_mgt *dev_mgt) for (i = 0; i < NBL_VSI_MAX; i++) { vsi = net_dev->vsi_ctrl.vsi_list[i]; - if (vsi && vsi->index == NBL_VSI_DATA) + if (vsi && vsi->index == vsi_index) return vsi; } return NULL; } -static int nbl_dev_vsi_handle_switch_event(u16 type, void *event_data, void *callback_data) +static int nbl_dev_vsi_handle_netdev_event(u16 type, void *event_data, void *callback_data) { struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); struct net_device *netdev = net_dev->netdev; - struct nbl_netdev_priv *net_priv = netdev_priv(netdev); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - struct nbl_event_dev_mode_switch_data *data = - (struct nbl_event_dev_mode_switch_data *)event_data; - struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - struct nbl_dev_vsi *from_vsi = NULL, *to_vsi = NULL; - int op = data->op; - - switch (op) { - case NBL_DEV_KERNEL_TO_USER: - from_vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_DATA]; - to_vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; - break; - case NBL_DEV_USER_TO_KERNEL: - from_vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; - to_vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_DATA]; - break; - default: - nbl_err(common, NBL_DEBUG_VSI, "Unknown switch op %d", op); - return -ENOENT; - } + bool *netdev_state = (bool *)event_data; + struct nbl_dev_vsi *vsi; + int ret; - net_priv->default_vsi_index = to_vsi->index; - net_priv->default_vsi_id = to_vsi->vsi_id; + vsi = nbl_dev_vsi_select(dev_mgt, NBL_VSI_XDP); + if (!vsi) + return 0; - data->ret = serv_ops->switch_traffic_default_dest(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - from_vsi->vsi_id, - to_vsi->vsi_id); - if (data->ret) { - net_priv->default_vsi_index = from_vsi->index; - net_priv->default_vsi_id = from_vsi->vsi_id; + if (*netdev_state) { + ret = vsi->ops->start(dev_mgt, netdev, vsi); + if (ret) + nbl_err(common, NBL_DEBUG_VSI, "xdp-vsi start failed\n"); + } else { + vsi->ops->stop(dev_mgt, vsi); } return 0; @@ -2321,7 +3018,9 @@ static struct nbl_dev_net_ops netdev_ops[NBL_PRODUCT_MAX] = { { .setup_netdev_ops = nbl_dev_setup_netops_leonis, .setup_ethtool_ops = nbl_dev_setup_ethtool_ops_leonis, + .setup_dcbnl_ops = nbl_dev_setup_dcbnl_ops_leonis, }, + }; static void nbl_det_setup_net_dev_ops(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param) @@ -2329,14 +3028,18 @@ static void nbl_det_setup_net_dev_ops(struct nbl_dev_mgt *dev_mgt, struct nbl_in NBL_DEV_MGT_TO_NETDEV_OPS(dev_mgt) = &netdev_ops[param->product_type]; } -static int nbl_dev_setup_net_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +static int nbl_dev_setup_net_dev(struct nbl_adapter *adapter, struct nbl_init_param *param, + struct nbl_rep_data *rep) { + struct nbl_event_callback callback = {0}; struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_dev_net **net_dev = &NBL_DEV_MGT_TO_NET_DEV(dev_mgt); struct device *dev = NBL_ADAPTER_TO_DEV(adapter); struct nbl_dev_vsi *vsi; - u16 total_queue_num = 0, kernel_queue_num = 0, user_queue_num = 0; int i, ret = 0; + u16 total_queue_num = 0, kernel_queue_num = 0, user_queue_num = 0; + u16 dynamic_queue_max = 0, irq_queue_num = 0; *net_dev = devm_kzalloc(dev, sizeof(struct nbl_dev_net), GFP_KERNEL); if (!*net_dev) @@ -2358,14 +3061,26 @@ static int nbl_dev_setup_net_dev(struct nbl_adapter *adapter, struct nbl_init_pa goto vsi_register_fail; } - total_queue_num += vsi->queue_num; + if (vsi->static_queue) { + total_queue_num += vsi->queue_num; + } else { + if (dynamic_queue_max < vsi->queue_num) + dynamic_queue_max = vsi->queue_num; + } + + if (vsi->use_independ_irq) + irq_queue_num += vsi->queue_num; + if (vsi->in_kernel) kernel_queue_num += vsi->queue_num; else user_queue_num += vsi->queue_num; } - /* This must before vsi_setup, or else no queue can be alloced */ + /* all vsi's dynamic only support enable use one at the same time. */ + total_queue_num += dynamic_queue_max; + + /* the total queue set must before vsi stepup */ (*net_dev)->total_queue_num = total_queue_num; (*net_dev)->kernel_queue_num = kernel_queue_num; (*net_dev)->user_queue_num = user_queue_num; @@ -2376,6 +3091,9 @@ static int nbl_dev_setup_net_dev(struct nbl_adapter *adapter, struct nbl_init_pa if (!vsi) continue; + if (!vsi->in_kernel) + continue; + ret = vsi->ops->setup(dev_mgt, param, vsi); if (ret) { dev_err(NBL_DEV_MGT_TO_DEV(dev_mgt), "Vsi %d setup failed", vsi->index); @@ -2383,21 +3101,18 @@ static int nbl_dev_setup_net_dev(struct nbl_adapter *adapter, struct nbl_init_pa } } - nbl_dev_register_net_irq(dev_mgt, kernel_queue_num); + nbl_dev_register_net_irq(dev_mgt, irq_queue_num); nbl_det_setup_net_dev_ops(dev_mgt, param); + callback.callback = nbl_dev_vsi_handle_netdev_event; + callback.callback_data = dev_mgt; + nbl_event_register(NBL_EVENT_NETDEV_STATE_CHANGE, &callback, NBL_COMMON_TO_VSI_ID(common), + NBL_COMMON_TO_BOARD_ID(common)); + return 0; vsi_setup_fail: - while (--i + 1) { - vsi = (*net_dev)->vsi_ctrl.vsi_list[i]; - - if (!vsi) - continue; - - vsi->ops->remove(dev_mgt, vsi); - } vsi_register_fail: nbl_dev_vsi_destroy(dev_mgt); vsi_build_fail: @@ -2416,7 +3131,6 @@ static void nbl_dev_remove_net_dev(struct nbl_adapter *adapter) if (!*net_dev) return; - for (i = 0; i < NBL_VSI_MAX; i++) { vsi = (*net_dev)->vsi_ctrl.vsi_list[i]; @@ -2427,7 +3141,7 @@ static void nbl_dev_remove_net_dev(struct nbl_adapter *adapter) } nbl_dev_vsi_destroy(dev_mgt); - nbl_dev_unregister_net(adapter); + nbl_dev_unregister_net(dev_mgt); devm_kfree(dev, *net_dev); *net_dev = NULL; @@ -2468,178 +3182,684 @@ static int nbl_dev_setup_ops(struct device *dev, struct nbl_dev_ops_tbl **dev_op return 0; } -int nbl_dev_init(void *p, struct nbl_init_param *param) +int nbl_dev_init(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_dev_mgt **dev_mgt = (struct nbl_dev_mgt **)&NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_ops_tbl **dev_ops_tbl = &NBL_ADAPTER_TO_DEV_OPS_TBL(adapter); + struct nbl_service_ops_tbl *serv_ops_tbl = NBL_ADAPTER_TO_SERV_OPS_TBL(adapter); + struct nbl_channel_ops_tbl *chan_ops_tbl = NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + int ret = 0; + + ret = nbl_dev_setup_dev_mgt(common, dev_mgt); + if (ret) + goto setup_mgt_fail; + + NBL_DEV_MGT_TO_SERV_OPS_TBL(*dev_mgt) = serv_ops_tbl; + NBL_DEV_MGT_TO_CHAN_OPS_TBL(*dev_mgt) = chan_ops_tbl; + + ret = nbl_dev_setup_common_dev(adapter, param); + if (ret) + goto setup_common_dev_fail; + + if (param->caps.has_ctrl) { + ret = nbl_dev_setup_ctrl_dev(adapter, param); + if (ret) + goto setup_ctrl_dev_fail; + } + + ret = nbl_dev_setup_net_dev(adapter, param, NULL); + if (ret) + goto setup_net_dev_fail; + + ret = nbl_dev_setup_rdma_dev(adapter, param); + if (ret) + goto setup_rdma_dev_fail; + ret = nbl_dev_setup_ops(dev, dev_ops_tbl, adapter); + if (ret) + goto setup_ops_fail; + + return 0; + +setup_ops_fail: + nbl_dev_remove_rdma_dev(adapter); +setup_rdma_dev_fail: + nbl_dev_remove_net_dev(adapter); +setup_net_dev_fail: + nbl_dev_remove_ctrl_dev(adapter); +setup_ctrl_dev_fail: + nbl_dev_remove_common_dev(adapter); +setup_common_dev_fail: + nbl_dev_remove_dev_mgt(common, dev_mgt); +setup_mgt_fail: + return ret; +} + +void nbl_dev_remove(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_dev_mgt **dev_mgt = (struct nbl_dev_mgt **)&NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_ops_tbl **dev_ops_tbl = &NBL_ADAPTER_TO_DEV_OPS_TBL(adapter); + + nbl_dev_remove_ops(dev, dev_ops_tbl); + + nbl_dev_remove_rdma_dev(adapter); + nbl_dev_remove_net_dev(adapter); + nbl_dev_remove_ctrl_dev(adapter); + nbl_dev_remove_common_dev(adapter); + + nbl_dev_remove_dev_mgt(common, dev_mgt); +} + +static void nbl_dev_notify_dev_prepare_reset(struct nbl_dev_mgt *dev_mgt, + enum nbl_reset_event event) +{ + int func_num = 0; + unsigned long cur_func = 0; + unsigned long next_func = 0; + unsigned long *func_bitmap; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_chan_send_info chan_send; + + func_bitmap = devm_kcalloc(NBL_COMMON_TO_DEV(common), BITS_TO_LONGS(NBL_MAX_FUNC), + sizeof(long), GFP_KERNEL); + if (!func_bitmap) + return; + + serv_ops->get_active_func_bitmaps(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), func_bitmap, + NBL_MAX_FUNC); + memset(dev_mgt->ctrl_dev->task_info.reset_status, 0, + sizeof(dev_mgt->ctrl_dev->task_info.reset_status)); + /* clear ctrl_dev func_id, and do it last */ + clear_bit(NBL_COMMON_TO_MGT_PF(common), func_bitmap); + + cur_func = NBL_COMMON_TO_MGT_PF(common); + while (1) { + next_func = find_next_bit(func_bitmap, NBL_MAX_FUNC, cur_func + 1); + if (next_func >= NBL_MAX_FUNC) + break; + + cur_func = next_func; + dev_mgt->ctrl_dev->task_info.reset_status[cur_func] = NBL_RESET_SEND; + NBL_CHAN_SEND(chan_send, cur_func, NBL_CHAN_MSG_NOTIFY_RESET_EVENT, &event, + sizeof(event), NULL, 0, 0); + chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + func_num++; + if (func_num >= NBL_DEV_BATCH_RESET_FUNC_NUM) { + usleep_range(NBL_DEV_BATCH_RESET_USEC, NBL_DEV_BATCH_RESET_USEC * 2); + func_num = 0; + } + } + + if (func_num) + usleep_range(NBL_DEV_BATCH_RESET_USEC, NBL_DEV_BATCH_RESET_USEC * 2); + + /* ctrl dev need proc last, basecase reset task will close mailbox */ + dev_mgt->ctrl_dev->task_info.reset_status[NBL_COMMON_TO_MGT_PF(common)] = NBL_RESET_SEND; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_NOTIFY_RESET_EVENT, + NULL, 0, NULL, 0, 0); + chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + usleep_range(NBL_DEV_BATCH_RESET_USEC, NBL_DEV_BATCH_RESET_USEC * 2); + + cur_func = NBL_COMMON_TO_MGT_PF(common); + while (1) { + if (dev_mgt->ctrl_dev->task_info.reset_status[cur_func] == NBL_RESET_SEND) + nbl_info(common, NBL_DEBUG_MAIN, "func %ld reset failed", cur_func); + + next_func = find_next_bit(func_bitmap, NBL_MAX_FUNC, cur_func + 1); + if (next_func >= NBL_MAX_FUNC) + break; + + cur_func = next_func; + } + + devm_kfree(NBL_COMMON_TO_DEV(common), func_bitmap); +} + +static void nbl_dev_handle_fatal_err(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_chan_param_notify_fw_reset_info fw_reset = {0}; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev_mgt->net_dev->netdev); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_chan_send_info chan_send; + + if (test_and_set_bit(NBL_FATAL_ERR, adapter->state)) { + nbl_info(common, NBL_DEBUG_MAIN, "dev in fatal_err status already."); + return; + } + + nbl_dev_disable_abnormal_irq(dev_mgt); + nbl_dev_ctrl_task_stop(dev_mgt); + nbl_dev_notify_dev_prepare_reset(dev_mgt, NBL_HW_FATAL_ERR_EVENT); + + /* notify emp shutdown dev */ + fw_reset.type = NBL_FW_HIGH_TEMP_RESET; + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_NOTITY_FW_RESET, &fw_reset, sizeof(fw_reset), NULL, 0, 0); + chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_ABNORMAL, + NBL_CHAN_TYPE_ADMINQ, true); + serv_ops->set_hw_status(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_HW_FATAL_ERR); + nbl_info(common, NBL_DEBUG_MAIN, "dev in fatal_err status."); +} + +/* ---------- Dev start process ---------- */ +static int nbl_dev_start_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + int err = 0; + + err = nbl_dev_request_abnormal_irq(dev_mgt); + if (err) + goto abnormal_request_irq_err; + + err = nbl_dev_enable_abnormal_irq(dev_mgt); + if (err) + goto enable_abnormal_irq_err; + + err = nbl_dev_request_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)->task_info); + if (err) + goto request_adminq_irq_err; + + err = nbl_dev_enable_adminq_irq(dev_mgt); + if (err) + goto enable_adminq_irq_err; + + nbl_dev_health_init(dev_mgt); + + nbl_dev_get_port_attributes(dev_mgt); + nbl_dev_init_port(dev_mgt); + nbl_dev_enable_port(dev_mgt, true); + nbl_dev_ctrl_task_start(dev_mgt); + + return 0; + +enable_adminq_irq_err: + nbl_dev_free_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)->task_info); +request_adminq_irq_err: + nbl_dev_disable_abnormal_irq(dev_mgt); +enable_abnormal_irq_err: + nbl_dev_free_abnormal_irq(dev_mgt); +abnormal_request_irq_err: + return err; +} + +static void nbl_dev_stop_ctrl_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + + if (!NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)) + return; + + nbl_dev_ctrl_task_stop(dev_mgt); + nbl_dev_enable_port(dev_mgt, false); + nbl_dev_disable_adminq_irq(dev_mgt); + nbl_dev_destroy_health(dev_mgt); + nbl_dev_free_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)->task_info); + nbl_dev_disable_abnormal_irq(dev_mgt); + nbl_dev_free_abnormal_irq(dev_mgt); +} + +static void nbl_dev_chan_notify_link_state_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct net_device *netdev = (struct net_device *)priv; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_chan_param_notify_link_state *link_info; + + link_info = (struct nbl_chan_param_notify_link_state *)data; + + serv_ops->set_netdev_carrier_state(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + netdev, link_info->link_state); +} + +static void nbl_dev_register_link_state_chan_msg(struct nbl_dev_mgt *dev_mgt, + struct net_device *netdev) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + if (!chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_MSG_NOTIFY_LINK_STATE, + nbl_dev_chan_notify_link_state_resp, netdev); +} + +static void nbl_dev_chan_notify_reset_event_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + enum nbl_reset_event event = *(enum nbl_reset_event *)data; + + dev_mgt->common_dev->reset_task.event = event; + nbl_common_queue_work(&dev_mgt->common_dev->reset_task.task, false, false); +} + +static void nbl_dev_chan_ack_reset_event_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + + WRITE_ONCE(dev_mgt->ctrl_dev->task_info.reset_status[src_id], NBL_RESET_DONE); +} + +static void nbl_dev_register_reset_event_chan_msg(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + if (!chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_MSG_NOTIFY_RESET_EVENT, + nbl_dev_chan_notify_reset_event_resp, dev_mgt); + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_MSG_ACK_RESET_EVENT, + nbl_dev_chan_ack_reset_event_resp, dev_mgt); +} + +static int nbl_dev_setup_rep_netdev(struct nbl_adapter *adapter, struct nbl_init_param *param, + struct nbl_rep_data *rep) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct net_device *netdev; + struct nbl_netdev_priv *net_priv; + struct nbl_register_net_result register_result = { 0 }; + u16 tx_queue_num = 1, rx_queue_num = 1; + int ret = 0; + + nbl_dev_get_rep_feature(adapter, ®ister_result); + + netdev = alloc_etherdev_mqs(sizeof(struct nbl_netdev_priv), tx_queue_num, rx_queue_num); + if (!netdev) { + dev_err(dev, "Alloc net device failed\n"); + ret = -ENOMEM; + goto alloc_fail; + } + + net_priv = netdev_priv(netdev); + net_priv->adapter = adapter; + rep->netdev = netdev; + net_priv->rep = rep; + net_priv->netdev = netdev; + + SET_NETDEV_DEV(netdev, dev); + ret = nbl_dev_cfg_netdev(netdev, dev_mgt, param, ®ister_result); + if (ret) { + dev_err(dev, "Cfg net device failed, ret=%d\n", ret); + goto cfg_netdev_fail; + } + + netif_carrier_off(netdev); + ret = register_netdev(netdev); + if (ret) { + dev_err(dev, "Register netdev failed, ret=%d\n", ret); + goto register_netdev_fail; + } + return 0; + +register_netdev_fail: +cfg_netdev_fail: + free_netdev(netdev); + rep->netdev = NULL; +alloc_fail: + return ret; +} + +static int nbl_dev_eswitch_load_rep(struct nbl_adapter *adapter, int num_vfs) +{ + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_init_param param; + struct nbl_dev_rep *rep_dev; + int i, ret = 0; + u16 vf_base_vsi_id; + char net_dev_name[IFNAMSIZ]; + + rep_dev = devm_kzalloc(dev, sizeof(struct nbl_dev_rep), GFP_KERNEL); + if (!rep_dev) + return -ENOMEM; + + memset(¶m, 0, sizeof(param)); + + NBL_DEV_MGT_TO_REP_DEV(dev_mgt) = rep_dev; + rep_dev->num_vfs = num_vfs; + param.is_rep = true; + param.pci_using_dac = NBL_COMMON_TO_PCI_USING_DAC(common); + rep_dev->rep = devm_kzalloc(dev, num_vfs * sizeof(struct nbl_rep_data), GFP_KERNEL); + if (!rep_dev->rep) + return -ENOMEM; + + vf_base_vsi_id = serv_ops->get_vf_base_vsi_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + common->mgt_pf); + ret = serv_ops->alloc_rep_data(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), num_vfs, vf_base_vsi_id); + + for (i = 0; i < num_vfs; i++) { + rep_dev->rep[i].rep_vsi_id = vf_base_vsi_id + i; + ret = nbl_dev_setup_rep_netdev(adapter, ¶m, &rep_dev->rep[i]); + if (ret) + return ret; + nbl_dev_get_rep_queue_num(adapter, &rep_dev->rep[i].base_queue_id, + &rep_dev->rep[i].rep_queue_num); + + /* add dev_name sysfs here */ + snprintf(net_dev_name, IFNAMSIZ, "%s_%d", net_dev->netdev->name, i); + nbl_net_add_name_attr(&rep_dev->rep[i].dev_name_attr, net_dev_name); + ret = sysfs_create_file(&rep_dev->rep[i].netdev->dev.kobj, + &rep_dev->rep[i].dev_name_attr.attr); + if (ret) { + dev_err(dev, "nbl rep add rep_id net-fs failed"); + return ret; + } + serv_ops->set_rep_netdev_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + &rep_dev->rep[i]); + } + + dev_info(dev, "nbl dev switch load rep success\n"); + return 0; +} + +static int nbl_dev_eswitch_unload_rep(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_rep *rep_dev = NBL_DEV_MGT_TO_REP_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_rep_data *rep_data = NULL; + struct device *dev; + struct net_device *netdev; + int i; + + if (!rep_dev) + return -ENODEV; + + dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + rep_data = rep_dev->rep; + if (!rep_data) { + devm_kfree(dev, rep_dev); + NBL_DEV_MGT_TO_REP_DEV(dev_mgt) = NULL; + return -ENODEV; + } + + serv_ops->unset_rep_netdev_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + for (i = 0; i < rep_dev->num_vfs; i++) { + netdev = rep_data[i].netdev; + if (!netdev) + continue; + sysfs_remove_file(&netdev->dev.kobj, &rep_data[i].dev_name_attr.attr); + unregister_netdev(netdev); + nbl_dev_reset_netdev(netdev); + free_netdev(netdev); + } + serv_ops->free_rep_data(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + devm_kfree(dev, rep_data); + devm_kfree(dev, rep_dev); + NBL_DEV_MGT_TO_REP_DEV(dev_mgt) = NULL; + + return 0; +} + +static int nbl_dev_eswitch_mode_to_devlink(u16 cur_eswitch_mode, u16 *devlink_eswitch_mode) +{ + switch (cur_eswitch_mode) { + case NBL_ESWITCH_LEGACY: + *devlink_eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + break; + case NBL_ESWITCH_OFFLOADS: + *devlink_eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + break; + default: + *devlink_eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + } + return 0; +} + +static int nbl_dev_eswitch_mode_from_devlink(u16 devlink_eswitch_mode, u16 *cfg_eswitch_mode) +{ + switch (devlink_eswitch_mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + *cfg_eswitch_mode = NBL_ESWITCH_LEGACY; + break; + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + *cfg_eswitch_mode = NBL_ESWITCH_OFFLOADS; + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +int nbl_dev_destroy_rep(void *p) { struct nbl_adapter *adapter = (struct nbl_adapter *)p; - struct device *dev = NBL_ADAPTER_TO_DEV(adapter); - struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); - struct nbl_dev_mgt **dev_mgt = (struct nbl_dev_mgt **)&NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_dev_ops_tbl **dev_ops_tbl = &NBL_ADAPTER_TO_DEV_OPS_TBL(adapter); - struct nbl_service_ops_tbl *serv_ops_tbl = NBL_ADAPTER_TO_SERV_OPS_TBL(adapter); - struct nbl_channel_ops_tbl *chan_ops_tbl = NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + u16 eswitch_mode = 0; int ret = 0; - ret = nbl_dev_setup_dev_mgt(common, dev_mgt); - if (ret) - goto setup_mgt_fail; - - NBL_DEV_MGT_TO_SERV_OPS_TBL(*dev_mgt) = serv_ops_tbl; - NBL_DEV_MGT_TO_CHAN_OPS_TBL(*dev_mgt) = chan_ops_tbl; - - ret = nbl_dev_setup_common_dev(adapter, param); - if (ret) - goto setup_common_dev_fail; - - if (param->caps.has_ctrl) { - ret = nbl_dev_setup_ctrl_dev(adapter, param); + eswitch_mode = serv_ops->get_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (eswitch_mode == NBL_ESWITCH_OFFLOADS) { + ret = nbl_dev_eswitch_unload_rep(dev_mgt); if (ret) - goto setup_ctrl_dev_fail; + return ret; + ret = serv_ops->free_rep_queue_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); } - ret = nbl_dev_setup_net_dev(adapter, param); - if (ret) - goto setup_net_dev_fail; + return ret; +} - ret = nbl_dev_setup_ops(dev, dev_ops_tbl, adapter); - if (ret) - goto setup_ops_fail; +int nbl_dev_create_rep(void *p, int num_vfs) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct net_device *netdev = net_dev->netdev; + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + u16 eswitch_mode = 0; + int ret = 0; - return 0; + eswitch_mode = serv_ops->get_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + dev_info(dev, "dev create rep num_vfs:%d, eswitch_mode:%d\n", num_vfs, eswitch_mode); + if (eswitch_mode == NBL_ESWITCH_OFFLOADS) { + ret = nbl_dev_eswitch_load_rep(adapter, num_vfs); + if (ret) { + nbl_dev_eswitch_unload_rep(dev_mgt); + return ret; + } + ret = serv_ops->alloc_rep_queue_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev); + } -setup_ops_fail: - nbl_dev_remove_net_dev(adapter); -setup_net_dev_fail: - nbl_dev_remove_ctrl_dev(adapter); -setup_ctrl_dev_fail: - nbl_dev_remove_common_dev(adapter); -setup_common_dev_fail: - nbl_dev_remove_dev_mgt(common, dev_mgt); -setup_mgt_fail: return ret; } -void nbl_dev_remove(void *p) +int nbl_dev_setup_vf_config(void *p, int num_vfs) { struct nbl_adapter *adapter = (struct nbl_adapter *)p; - struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->setup_vf_config(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), num_vfs, false); +} + +void nbl_dev_register_dev_name(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); - struct nbl_dev_mgt **dev_mgt = (struct nbl_dev_mgt **)&NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_dev_ops_tbl **dev_ops_tbl = &NBL_ADAPTER_TO_DEV_OPS_TBL(adapter); - nbl_dev_remove_ops(dev, dev_ops_tbl); + /* get pf_name then register it to AF */ + serv_ops->register_dev_name(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + common->vsi_id, net_dev->netdev->name); +} - nbl_dev_remove_net_dev(adapter); - nbl_dev_remove_ctrl_dev(adapter); - nbl_dev_remove_common_dev(adapter); +void nbl_dev_get_dev_name(void *p, char *dev_name) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); - nbl_dev_remove_dev_mgt(common, dev_mgt); + serv_ops->get_dev_name(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->vsi_id, dev_name); } -/* ---------- Dev start process ---------- */ -static int nbl_dev_start_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +void nbl_dev_remove_vf_config(void *p) { - struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); - int err = 0; + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - err = nbl_dev_request_abnormal_irq(dev_mgt); - if (err) - goto abnormal_request_irq_err; + return serv_ops->remove_vf_config(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} - err = nbl_dev_enable_abnormal_irq(dev_mgt); - if (err) - goto enable_abnormal_irq_err; +static int nbl_dev_init_offload_mode(struct nbl_dev_mgt *dev_mgt, u16 vsi_id) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int ret = 0; - err = nbl_dev_request_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)->task_info); - if (err) - goto request_adminq_irq_err; + ret = serv_ops->disable_phy_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->eth_id); + if (ret) + return ret; + serv_ops->init_acl(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + ret = serv_ops->set_upcall_rule(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->eth_id, vsi_id); + if (ret) + goto fail_set_upcall_rule; - err = nbl_dev_enable_adminq_irq(dev_mgt); - if (err) - goto enable_adminq_irq_err; + /* eswitch mode set, start CMDQ or add reference */ + ret = serv_ops->switchdev_init_cmdq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (ret < 0 || ret >= NBL_TC_FLOW_INST_COUNT) + goto fail_init_cmdq; + common->tc_inst_id = ret; - nbl_dev_ctrl_register_flr_chan_msg(dev_mgt); + ret = serv_ops->set_tc_flow_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (ret) + goto fail_set_tc_flow_info; - nbl_dev_get_port_attributes(dev_mgt); - nbl_dev_enable_port(dev_mgt, true); - nbl_dev_ctrl_task_start(dev_mgt); + ret = serv_ops->get_tc_flow_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (ret) + goto fail_get_tc_flow_info; return 0; -enable_adminq_irq_err: - nbl_dev_free_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)->task_info); -request_adminq_irq_err: - nbl_dev_disable_abnormal_irq(dev_mgt); -enable_abnormal_irq_err: - nbl_dev_free_abnormal_irq(dev_mgt); -abnormal_request_irq_err: - return err; +fail_get_tc_flow_info: + serv_ops->unset_tc_flow_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +fail_set_tc_flow_info: + serv_ops->switchdev_deinit_cmdq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +fail_init_cmdq: + serv_ops->unset_upcall_rule(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->eth_id); +fail_set_upcall_rule: + serv_ops->uninit_acl(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + serv_ops->enable_phy_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->eth_id); + return ret; } -static void nbl_dev_stop_ctrl_dev(struct nbl_adapter *adapter) +static int nbl_dev_uninit_offload_mode(struct nbl_dev_mgt *dev_mgt) { - struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int ret = 0; - if (!NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)) - return; + ret = serv_ops->enable_phy_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->eth_id); + if (ret) + return ret; + ret = serv_ops->unset_upcall_rule(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->eth_id); + if (ret) + goto fail_unset_upcall_rule; + serv_ops->uninit_acl(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); - nbl_dev_ctrl_task_stop(dev_mgt); - nbl_dev_enable_port(dev_mgt, false); - nbl_dev_disable_adminq_irq(dev_mgt); - nbl_dev_free_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)->task_info); - nbl_dev_disable_abnormal_irq(dev_mgt); - nbl_dev_free_abnormal_irq(dev_mgt); + return 0; + +fail_unset_upcall_rule: + serv_ops->disable_phy_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->eth_id); + return ret; } -static void nbl_dev_chan_notify_link_state_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_dev_destroy_flow_res(struct nbl_dev_mgt *dev_mgt) { - struct net_device *netdev = (struct net_device *)priv; - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - struct nbl_chan_param_notify_link_state *link_info; - link_info = (struct nbl_chan_param_notify_link_state *)data; + /* unset tc flow info */ + serv_ops->unset_tc_flow_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + serv_ops->get_tc_flow_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); - serv_ops->set_netdev_carrier_state(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - netdev, link_info->link_state); + /* stop CMDQ or reduce its reference */ + serv_ops->switchdev_deinit_cmdq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); } -static void nbl_dev_register_link_state_chan_msg(struct nbl_dev_mgt *dev_mgt, - struct net_device *netdev) +static void nbl_dev_remove_rep_res(struct nbl_dev_mgt *dev_mgt) { - struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); - - if (!chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), - NBL_CHAN_TYPE_MAILBOX)) - return; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_vsi *vsi = dev_mgt->net_dev->vsi_ctrl.vsi_list[NBL_VSI_CTRL]; + u16 cur_eswitch_mode = NBL_ESWITCH_NONE; + + cur_eswitch_mode = serv_ops->get_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (cur_eswitch_mode == NBL_ESWITCH_OFFLOADS) { + nbl_dev_eswitch_unload_rep(dev_mgt); + serv_ops->free_rep_queue_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + nbl_dev_uninit_offload_mode(dev_mgt); + serv_ops->set_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_ESWITCH_NONE); + nbl_dev_destroy_flow_res(dev_mgt); + vsi->ops->stop(dev_mgt, vsi); + } +} - chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), - NBL_CHAN_MSG_NOTIFY_LINK_STATE, - nbl_dev_chan_notify_link_state_resp, netdev); +static int nbl_dev_setup_devlink_port(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, + struct nbl_init_param *param) +{ + return 0; } +static void nbl_dev_remove_devlink_port(struct nbl_dev_mgt *dev_mgt) +{ +} static int nbl_dev_start_net_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) { struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); +#ifdef CONFIG_PCI_ATS + struct pci_dev *pdev = NBL_COMMON_TO_PDEV(common); +#endif struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct net_device *netdev = net_dev->netdev; struct nbl_netdev_priv *net_priv; struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); struct nbl_dev_vsi *vsi; - struct nbl_event_callback callback = {0}; - u16 net_vector_id; + struct nbl_dev_vsi *user_vsi; + struct nbl_dev_vsi *xdp_vsi; + struct nbl_ring_param ring_param = {0}; + u16 net_vector_id, queue_num, xdp_queue_num = 0; int ret; + char dev_name[IFNAMSIZ] = {0}; - vsi = nbl_dev_vsi_select(dev_mgt); + vsi = nbl_dev_vsi_select(dev_mgt, NBL_VSI_DATA); if (!vsi) return -EFAULT; - netdev = alloc_etherdev_mqs(sizeof(struct nbl_netdev_priv), vsi->queue_num, vsi->queue_num); + user_vsi = nbl_dev_vsi_select(dev_mgt, NBL_VSI_USER); + queue_num = vsi->queue_num; + netdev = alloc_etherdev_mqs(sizeof(struct nbl_netdev_priv), queue_num, queue_num); if (!netdev) { dev_err(dev, "Alloc net device failed\n"); ret = -ENOMEM; @@ -2649,23 +3869,49 @@ static int nbl_dev_start_net_dev(struct nbl_adapter *adapter, struct nbl_init_pa SET_NETDEV_DEV(netdev, dev); net_priv = netdev_priv(netdev); net_priv->adapter = adapter; - nbl_dev_set_netdev_priv(netdev, vsi); + nbl_dev_set_netdev_priv(netdev, vsi, user_vsi); net_dev->netdev = netdev; common->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); - serv_ops->set_mask_en(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), 1); + serv_ops->set_mask_en(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), net_msix_mask_en); /* Alloc all queues. * One problem is we now must use the queue_size of data_vsi for all queues. */ - ret = serv_ops->alloc_rings(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, - net_dev->kernel_queue_num, net_dev->kernel_queue_num, - net_priv->queue_size); + xdp_vsi = nbl_dev_vsi_select(dev_mgt, NBL_VSI_XDP); + if (xdp_vsi) + xdp_queue_num = xdp_vsi->queue_num; + + ring_param.tx_ring_num = net_dev->kernel_queue_num; + ring_param.rx_ring_num = net_dev->kernel_queue_num; + ring_param.xdp_ring_offset = net_dev->kernel_queue_num - xdp_queue_num; + ring_param.queue_size = net_priv->queue_size; + ret = serv_ops->alloc_rings(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, &ring_param); if (ret) { dev_err(dev, "Alloc rings failed\n"); goto alloc_rings_fail; } + serv_ops->cpu_affinity_init(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->queue_num); + ret = serv_ops->setup_net_resource_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, + vsi->register_result.vlan_proto, + vsi->register_result.vlan_tci, + vsi->register_result.rate); + if (ret) { + dev_err(dev, "setup net mgt failed\n"); + goto setup_net_mgt_fail; + } + + /* netdev build must before setup_txrx_queues. Because snoop check mac trust the mac + * if pf use ip link cfg the mac for vf. We judge the case will not permit accord queue + * has alloced when vf modify mac. + */ + ret = vsi->ops->netdev_build(dev_mgt, param, netdev, vsi); + if (ret) { + dev_err(dev, "Build netdev failed, selected vsi %d\n", vsi->index); + goto build_netdev_fail; + } + net_vector_id = msix_info->serv_info[NBL_MSIX_NET_TYPE].base_vector_id; ret = serv_ops->setup_txrx_queues(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id, net_dev->total_queue_num, net_vector_id); @@ -2674,20 +3920,21 @@ static int nbl_dev_start_net_dev(struct nbl_adapter *adapter, struct nbl_init_pa goto set_queue_fail; } - ret = serv_ops->setup_net_resource_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev); + ret = serv_ops->init_hw_stats(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); if (ret) { - dev_err(dev, "setup net mgt failed\n"); - goto setup_net_mgt_fail; + dev_err(dev, "init hw stats failed\n"); + goto init_hw_stats_fail; } - nbl_dev_register_link_state_chan_msg(dev_mgt, netdev); - - ret = vsi->ops->netdev_build(dev_mgt, param, netdev, vsi); + ret = nbl_init_lag(dev_mgt, param); if (ret) { - dev_err(dev, "Build netdev failed, selected vsi %d\n", vsi->index); - goto build_netdev_fail; + dev_err(dev, "init bond failed\n"); + goto enable_bond_fail; } + nbl_dev_register_link_state_chan_msg(dev_mgt, netdev); + nbl_dev_register_reset_event_chan_msg(dev_mgt); + ret = vsi->ops->start(dev_mgt, netdev, vsi); if (ret) { dev_err(dev, "Start vsi failed, selected vsi %d\n", vsi->index); @@ -2701,6 +3948,13 @@ static int nbl_dev_start_net_dev(struct nbl_adapter *adapter, struct nbl_init_pa } netif_carrier_off(netdev); + + ret = nbl_dev_setup_devlink_port(dev_mgt, netdev, param); + if (ret) { + dev_err(dev, "Setup devlink_port failed\n"); + goto setup_devlink_port_fail; + } + ret = register_netdev(netdev); if (ret) { dev_err(dev, "Register netdev failed\n"); @@ -2708,27 +3962,66 @@ static int nbl_dev_start_net_dev(struct nbl_adapter *adapter, struct nbl_init_pa } if (!param->caps.is_vf) { - callback.callback = nbl_dev_vsi_handle_switch_event; - callback.callback_data = dev_mgt; - nbl_event_register(NBL_EVENT_DEV_MODE_SWITCH, &callback, - NBL_COMMON_TO_ETH_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_MIRROR_SYSFS_CAP)) + nbl_netdev_add_mirror_sysfs(netdev, net_dev); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_QOS_SYSFS_CAP)) + nbl_netdev_add_sysfs(netdev, net_dev); + if (net_dev->total_vfs) { + ret = serv_ops->setup_vf_resource(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + net_dev->total_vfs); + if (ret) + goto setup_vf_res_fail; + } + } else { + /* vf device need get pf name as its base name */ + nbl_net_add_name_attr(&net_dev->dev_attr.dev_name_attr, dev_name); +#ifdef CONFIG_PCI_ATS + if (pdev->physfn) { + nbl_dev_get_dev_name(adapter, dev_name); + memcpy(net_dev->dev_attr.dev_name_attr.net_dev_name, dev_name, IFNAMSIZ); + ret = sysfs_create_file(&netdev->dev.kobj, + &net_dev->dev_attr.dev_name_attr.attr); + if (ret) { + dev_err(dev, "nbl vf device add dev_name:%s net-fs failed", + dev_name); + goto add_vf_sys_attr_fail; + } + dev_dbg(dev, "nbl vf device get dev_name:%s", dev_name); + } else { + dev_dbg(dev, "nbl vf device no need change name"); + } +#endif } set_bit(NBL_DOWN, adapter->state); return 0; - +setup_vf_res_fail: + nbl_netdev_remove_sysfs(net_dev); + nbl_netdev_remove_mirror_sysfs(net_dev); +#ifdef CONFIG_PCI_ATS +add_vf_sys_attr_fail: +#endif + unregister_netdev(netdev); register_netdev_fail: + nbl_dev_remove_devlink_port(dev_mgt); +setup_devlink_port_fail: nbl_dev_free_net_irq(dev_mgt); request_irq_fail: vsi->ops->stop(dev_mgt, vsi); start_vsi_fail: + nbl_deinit_lag(dev_mgt); +enable_bond_fail: + serv_ops->remove_hw_stats(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +init_hw_stats_fail: + serv_ops->remove_txrx_queues(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); +set_queue_fail: vsi->ops->netdev_destroy(dev_mgt, vsi); build_netdev_fail: serv_ops->remove_net_resource_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); setup_net_mgt_fail: - serv_ops->remove_txrx_queues(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); -set_queue_fail: serv_ops->free_rings(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); alloc_rings_fail: free_netdev(netdev); @@ -2742,9 +4035,10 @@ static void nbl_dev_stop_net_dev(struct nbl_adapter *adapter) struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - struct nbl_event_callback callback = {0}; + struct nbl_event_callback netdev_callback = {0}; struct nbl_dev_vsi *vsi; struct net_device *netdev; + char dev_name[IFNAMSIZ] = {0}; if (!net_dev) return; @@ -2756,19 +4050,38 @@ static void nbl_dev_stop_net_dev(struct nbl_adapter *adapter) return; if (!common->is_vf) { - callback.callback = nbl_dev_vsi_handle_switch_event; - callback.callback_data = dev_mgt; - nbl_event_unregister(NBL_EVENT_DEV_MODE_SWITCH, &callback, - NBL_COMMON_TO_ETH_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + serv_ops->remove_vf_resource(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + nbl_netdev_remove_sysfs(net_dev); + nbl_netdev_remove_mirror_sysfs(net_dev); + } else { + /* remove vf dev_name attr */ + if (memcmp(net_dev->dev_attr.dev_name_attr.net_dev_name, dev_name, IFNAMSIZ)) + nbl_net_remove_dev_attr(net_dev); } + nbl_dev_remove_rep_res(dev_mgt); + serv_ops->change_mtu(netdev, 0); unregister_netdev(netdev); + rtnl_lock(); + netif_device_detach(netdev); + rtnl_unlock(); + + nbl_dev_remove_devlink_port(dev_mgt); + + netdev_callback.callback = nbl_dev_vsi_handle_netdev_event; + netdev_callback.callback_data = dev_mgt; + nbl_event_unregister(NBL_EVENT_NETDEV_STATE_CHANGE, &netdev_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); vsi->ops->netdev_destroy(dev_mgt, vsi); vsi->ops->stop(dev_mgt, vsi); nbl_dev_free_net_irq(dev_mgt); + nbl_deinit_lag(dev_mgt); + + serv_ops->remove_hw_stats(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + serv_ops->remove_net_resource_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); serv_ops->remove_txrx_queues(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); serv_ops->free_rings(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); @@ -2811,6 +4124,114 @@ static void nbl_dev_suspend_net_dev(struct nbl_adapter *adapter) nbl_dev_free_net_irq(dev_mgt); } +static int nbl_dev_get_devlink_eswitch_mode(struct devlink *devlink, u16 *mode) +{ + struct nbl_devlink_priv *priv = devlink_priv(devlink); + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct pci_dev *pdev = NBL_COMMON_TO_PDEV(common); + struct nbl_adapter *adapter = NULL; + u16 cur_eswitch_mode = NBL_ESWITCH_NONE; + + adapter = pci_get_drvdata(pdev); + if (!adapter) + return -EINVAL; + + cur_eswitch_mode = serv_ops->get_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + return nbl_dev_eswitch_mode_to_devlink(cur_eswitch_mode, mode); +} + +static int nbl_dev_set_devlink_eswitch_mode(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + struct nbl_devlink_priv *priv = devlink_priv(devlink); + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct pci_dev *pdev = NBL_COMMON_TO_PDEV(common); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_adapter *adapter = NULL; + struct nbl_dev_vsi *vsi = dev_mgt->net_dev->vsi_ctrl.vsi_list[NBL_VSI_CTRL]; + struct nbl_event_offload_status_data event_data = {0}; + int num_vfs = 0; + u16 cfg_eswitch_mode = NBL_ESWITCH_NONE; + u16 cur_eswitch_mode = NBL_ESWITCH_NONE; + int ret = 0; + + num_vfs = pci_num_vf(pdev); + adapter = pci_get_drvdata(pdev); + if (!adapter) + return -EINVAL; + ret = nbl_dev_eswitch_mode_from_devlink(mode, &cfg_eswitch_mode); + if (ret) + return ret; + cur_eswitch_mode = serv_ops->get_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (cur_eswitch_mode == cfg_eswitch_mode) + return 0; + + if (!vsi) + return -ENOENT; + + if (cfg_eswitch_mode == NBL_ESWITCH_OFFLOADS) { + ret = vsi->ops->start(dev_mgt, dev_mgt->net_dev->netdev, vsi); + if (ret) + return ret; + + ret = nbl_dev_init_offload_mode(dev_mgt, vsi->vsi_id); + if (ret) { + dev_err(dev, "dev fail init offload mode\n"); + return -EBUSY; + } + serv_ops->set_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), cfg_eswitch_mode); + if (num_vfs) { + ret = nbl_dev_create_rep(adapter, num_vfs); + if (ret) + goto fail_cfg_rep; + } + + event_data.pf_vsi_id = NBL_COMMON_TO_VSI_ID(common); + event_data.status = true; + nbl_event_notify(NBL_EVENT_OFFLOAD_STATUS_CHANGED, &event_data, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } else if (cur_eswitch_mode == NBL_ESWITCH_OFFLOADS) { + ret = nbl_dev_uninit_offload_mode(dev_mgt); + if (ret) { + dev_err(dev, "dev fail uninit offload mode\n"); + return -EBUSY; + } + if (num_vfs) { + ret = nbl_dev_destroy_rep(adapter); + if (ret) + goto fail_cfg_rep; + } + serv_ops->set_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), cfg_eswitch_mode); + + nbl_dev_destroy_flow_res(dev_mgt); + + vsi->ops->stop(dev_mgt, vsi); + + event_data.pf_vsi_id = NBL_COMMON_TO_VSI_ID(common); + event_data.status = false; + nbl_event_notify(NBL_EVENT_OFFLOAD_STATUS_CHANGED, &event_data, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } + return 0; + +fail_cfg_rep: + if (cfg_eswitch_mode == NBL_ESWITCH_OFFLOADS) { + serv_ops->set_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), cur_eswitch_mode); + vsi->ops->stop(dev_mgt, vsi); + ret = nbl_dev_uninit_offload_mode(dev_mgt); + if (ret) + dev_err(dev, "dev fail uninit offload mode when rep create fail\n"); + } else if (cur_eswitch_mode == NBL_ESWITCH_OFFLOADS) { + ret = nbl_dev_init_offload_mode(dev_mgt, vsi->vsi_id); + if (ret) + dev_err(dev, "dev fail init offload mode when rep destroy fail\n"); + } + return -EBUSY; +} /* ---------- Devlink config ---------- */ static void nbl_dev_devlink_free(void *devlink_ptr) { @@ -2834,10 +4255,14 @@ static int nbl_dev_setup_devlink(struct nbl_dev_mgt *dev_mgt, struct nbl_init_pa if (!devlink_ops) return -ENOMEM; - devlink_ops->info_get = serv_ops->get_devlink_info; + if (!param->caps.is_vf) { + devlink_ops->eswitch_mode_set = nbl_dev_set_devlink_eswitch_mode; + devlink_ops->eswitch_mode_get = nbl_dev_get_devlink_eswitch_mode; + devlink_ops->info_get = serv_ops->get_devlink_info; - if (param->caps.has_ctrl) - devlink_ops->flash_update = serv_ops->update_devlink_flash; + if (param->caps.has_ctrl) + devlink_ops->flash_update = serv_ops->update_devlink_flash; + } devlink = devlink_alloc(devlink_ops, sizeof(*priv), dev); @@ -2904,8 +4329,7 @@ static int nbl_dev_start_common_dev(struct nbl_adapter *adapter, struct nbl_init goto setup_hwmon_err; } - if (nbl_dev_should_chan_keepalive(dev_mgt)) - nbl_dev_setup_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); + nbl_dev_setup_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); return 0; @@ -2927,9 +4351,7 @@ static void nbl_dev_stop_common_dev(struct nbl_adapter *adapter) { struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); - if (nbl_dev_should_chan_keepalive(dev_mgt)) - nbl_dev_remove_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); - + nbl_dev_remove_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); nbl_dev_remove_hwmon(adapter); nbl_dev_remove_devlink(dev_mgt); nbl_dev_free_mailbox_irq(dev_mgt); @@ -2947,8 +4369,7 @@ static int nbl_dev_resume_common_dev(struct nbl_adapter *adapter, struct nbl_ini if (ret) return ret; - if (nbl_dev_should_chan_keepalive(dev_mgt)) - nbl_dev_setup_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); + nbl_dev_setup_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); return 0; } @@ -2957,9 +4378,7 @@ static void nbl_dev_suspend_common_dev(struct nbl_adapter *adapter) { struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); - if (nbl_dev_should_chan_keepalive(dev_mgt)) - nbl_dev_remove_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); - + nbl_dev_remove_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); nbl_dev_free_mailbox_irq(dev_mgt); } @@ -2982,11 +4401,16 @@ int nbl_dev_start(void *p, struct nbl_init_param *param) if (ret) goto start_net_dev_fail; + ret = nbl_dev_start_rdma_dev(adapter); + if (ret) + goto start_rdma_dev_fail; if (param->caps.has_user) nbl_dev_start_user_dev(adapter); return 0; +start_rdma_dev_fail: + nbl_dev_stop_net_dev(adapter); start_net_dev_fail: nbl_dev_stop_ctrl_dev(adapter); start_ctrl_dev_fail: @@ -3000,6 +4424,7 @@ void nbl_dev_stop(void *p) struct nbl_adapter *adapter = (struct nbl_adapter *)p; nbl_dev_stop_user_dev(adapter); + nbl_dev_stop_rdma_dev(adapter); nbl_dev_stop_ctrl_dev(adapter); nbl_dev_stop_net_dev(adapter); nbl_dev_stop_common_dev(adapter); @@ -3025,8 +4450,14 @@ int nbl_dev_resume(void *p) if (ret) goto start_net_dev_fail; + ret = nbl_dev_resume_rdma_dev(adapter); + if (ret) + goto start_rdma_dev_fail; + return 0; +start_rdma_dev_fail: + nbl_dev_stop_net_dev(adapter); start_net_dev_fail: nbl_dev_stop_ctrl_dev(adapter); start_ctrl_dev_fail: @@ -3038,10 +4469,16 @@ int nbl_dev_resume(void *p) int nbl_dev_suspend(void *p) { struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + nbl_dev_suspend_rdma_dev(adapter); nbl_dev_stop_ctrl_dev(adapter); nbl_dev_suspend_net_dev(adapter); nbl_dev_suspend_common_dev(adapter); + pci_save_state(adapter->pdev); + pci_wake_from_d3(adapter->pdev, common->wol_ena); + pci_set_power_state(adapter->pdev, PCI_D3hot); + return 0; } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.h index 5f9a8a658a651a80d5c6f6437113c78653e3d541..03d40c347f55d30a7aed523eb36258568f53f3cb 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.h @@ -8,16 +8,22 @@ #define _NBL_DEV_H_ #include "nbl_core.h" +#include "nbl_export_rdma.h" #include "nbl_dev_user.h" +#include "nbl_sysfs.h" #define NBL_DEV_MGT_TO_COMMON(dev_mgt) ((dev_mgt)->common) #define NBL_DEV_MGT_TO_DEV(dev_mgt) NBL_COMMON_TO_DEV(NBL_DEV_MGT_TO_COMMON(dev_mgt)) #define NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) ((dev_mgt)->common_dev) #define NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt) ((dev_mgt)->ctrl_dev) #define NBL_DEV_MGT_TO_NET_DEV(dev_mgt) ((dev_mgt)->net_dev) +#define NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt) ((dev_mgt)->rdma_dev) #define NBL_DEV_MGT_TO_USER_DEV(dev_mgt) ((dev_mgt)->user_dev) +#define NBL_DEV_MGT_TO_REP_DEV(dev_mgt) ((dev_mgt)->rep_dev) #define NBL_DEV_COMMON_TO_MSIX_INFO(dev_common) (&(dev_common)->msix_info) #define NBL_DEV_CTRL_TO_TASK_INFO(dev_ctrl) (&(dev_ctrl)->task_info) +#define NBL_DEV_FACTORY_TO_TASK_INFO(dev_factory) (&(dev_factory)->task_info) +#define NBL_DEV_MGT_TO_EMP_CONSOLE(dev_mgt) ((dev_mgt)->emp_console) #define NBL_DEV_MGT_TO_NETDEV_OPS(dev_mgt) ((dev_mgt)->net_dev->ops) #define NBL_DEV_MGT_TO_SERV_OPS_TBL(dev_mgt) ((dev_mgt)->serv_ops_tbl) @@ -38,21 +44,37 @@ #define NBL_KEEPALIVE_TIME_CYCLE (10 * HZ) -enum nbl_dev_mode_switch_op { - NBL_DEV_KERNEL_TO_USER, - NBL_DEV_USER_TO_KERNEL, +#define NBL_DEV_BATCH_RESET_FUNC_NUM (32) +#define NBL_DEV_BATCH_RESET_USEC (1000000) + +#define NBL_TIME_LEN (32) +#define NBL_SAVED_TRACES_NUM (16) + +#define NBL_DEV_FW_RESET_WAIT_TIME (3500) + +enum nbl_reset_status { + NBL_RESET_INIT, + NBL_RESET_SEND, + NBL_RESET_DONE, + NBL_RESET_STATUS_MAX }; struct nbl_task_info { struct nbl_adapter *adapter; struct nbl_dev_mgt *dev_mgt; + struct work_struct offload_network_task; struct work_struct fw_hb_task; struct delayed_work fw_reset_task; struct work_struct clean_adminq_task; + struct work_struct ipsec_task; struct work_struct adapt_desc_gother_task; struct work_struct clean_abnormal_irq_task; struct work_struct recovery_abnormal_task; - + struct work_struct report_temp_task; + struct work_struct report_reboot_task; + struct work_struct reset_task; + enum nbl_reset_event reset_event; + enum nbl_reset_status reset_status[NBL_MAX_FUNC]; struct timer_list serv_timer; unsigned long serv_timer_period; @@ -60,6 +82,11 @@ struct nbl_task_info { bool timer_setup; }; +struct nbl_reset_task_info { + struct work_struct task; + enum nbl_reset_event event; +}; + enum nbl_msix_serv_type { /* virtio_dev has a config vector_id, and the vector_id need is 0 */ NBL_MSIX_VIRTIO_TYPE = 0, @@ -73,6 +100,7 @@ enum nbl_msix_serv_type { }; struct nbl_msix_serv_info { + char irq_name[NBL_STRING_NAME_LEN]; u16 num; u16 base_vector_id; /* true: hw report msix, hw need to mask actively */ @@ -94,10 +122,84 @@ struct nbl_dev_common { struct devlink_ops *devlink_ops; struct devlink *devlink; + struct nbl_reset_task_info reset_task; +}; + +struct nbl_dev_factory { + struct nbl_task_info task_info; +}; + +enum nbl_dev_temp_status { + NBL_TEMP_STATUS_NORMAL = 0, + NBL_TEMP_STATUS_WARNING, + NBL_TEMP_STATUS_CRIT, + NBL_TEMP_STATUS_EMERG, + NBL_TEMP_STATUS_MAX +}; + +enum nbl_emp_log_level { + NBL_EMP_ALERT_LOG_FATAL = 0, + NBL_EMP_ALERT_LOG_ERROR = 1, + NBL_EMP_ALERT_LOG_WARNING = 2, + NBL_EMP_ALERT_LOG_INFO = 3, +}; + +struct nbl_fw_reporter_ctx { + u64 timestamp; + u32 temp_num; + char reboot_report_time[NBL_TIME_LEN]; +}; + +struct nbl_fw_temp_trace_data { + u64 timestamp; + u32 temp_num; +}; + +struct nbl_fw_reboot_trace_data { + char local_time[NBL_TIME_LEN]; +}; + +struct nbl_health_reporters { + struct { + struct nbl_fw_temp_trace_data trace_data[NBL_SAVED_TRACES_NUM]; + u8 saved_traces_index; + struct mutex lock; /* protect reading data of temp_trace_data*/ + } temp_st_arr; + + struct { + struct nbl_fw_reboot_trace_data trace_data[NBL_SAVED_TRACES_NUM]; + u8 saved_traces_index; + struct mutex lock; /* protect reading data of reboot_trace_data*/ + } reboot_st_arr; + + struct nbl_fw_reporter_ctx reporter_ctx; + struct devlink_health_reporter *fw_temp_reporter; + struct devlink_health_reporter *fw_reboot_reporter; }; struct nbl_dev_ctrl { struct nbl_task_info task_info; + enum nbl_dev_temp_status temp_status; + struct nbl_health_reporters health_reporters; +}; + +enum nbl_dev_emp_alert_event { + NBL_EMP_EVENT_TEMP_ALERT = 1, + NBL_EMP_EVENT_LOG_ALERT = 2, + NBL_EMP_EVENT_MAX +}; + +enum nbl_dev_temp_threshold { + NBL_TEMP_NOMAL_THRESHOLD = 85, + NBL_TEMP_WARNING_THRESHOLD = 105, + NBL_TEMP_CRIT_THRESHOLD = 115, + NBL_TEMP_EMERG_THRESHOLD = 120, +}; + +struct nbl_dev_temp_alarm_info { + int logvel; +#define NBL_TEMP_ALARM_STR_LEN 128 + char alarm_info[NBL_TEMP_ALARM_STR_LEN]; }; struct nbl_dev_vsi_controller { @@ -111,17 +213,81 @@ struct nbl_dev_net_ops { struct nbl_init_param *param); int (*setup_ethtool_ops)(void *priv, struct net_device *netdev, struct nbl_init_param *param); + int (*setup_dcbnl_ops)(void *priv, struct net_device *netdev, + struct nbl_init_param *param); +}; + +struct nbl_dev_attr_info { + struct nbl_netdev_name_attr dev_name_attr; }; struct nbl_dev_net { struct net_device *netdev; + struct nbl_dev_attr_info dev_attr; + struct nbl_lag_member *lag_mem; struct nbl_dev_net_ops *ops; + u8 lag_inited; + u8 eth_id; struct nbl_dev_vsi_controller vsi_ctrl; u16 total_queue_num; u16 kernel_queue_num; u16 user_queue_num; - u8 eth_id; - u8 resv; + u16 total_vfs; + struct nbl_net_qos qos_config; + struct nbl_net_mirror mirror_config; +}; + +struct nbl_dev_virtio { + u8 device_msix; +}; + +struct nbl_dev_rdma_event_data { + struct list_head node; + /* Lag event will be processed async, so we need to fully store the param in case it is + * released by caller. + * + * callback_data will always be dev_mgt, which will not be released, so don't bother. + */ + struct nbl_event_param event_data; + void *callback_data; + u16 type; +}; + +struct nbl_dev_rdma { + struct auxiliary_device *adev; + struct auxiliary_device *grc_adev; + struct auxiliary_device *bond_adev; + + struct work_struct abnormal_event_task; + + struct work_struct event_task; + struct list_head event_param_list; + struct mutex event_lock; /* Protect event_param_list */ + + int adev_index; + u32 mem_type; + bool has_rdma; + bool has_grc; + u16 func_id; + u16 lag_id; + bool bond_registered; + bool bond_shaping_configed; + + bool is_halting; + bool event_ready; + bool mirror_enable; + bool has_abnormal_event_task; + atomic_t adev_busy; +}; + +struct nbl_dev_emp_console { + struct nbl_dev_mgt *dev_mgt; + unsigned int id; + atomic_t opened; + wait_queue_head_t wait; + struct cdev cdev; + struct kfifo rx_fifo; + struct ktermios termios; }; struct nbl_dev_user_iommu_group { @@ -131,22 +297,40 @@ struct nbl_dev_user_iommu_group { struct rb_root dma_tree; struct iommu_group *iommu_group; struct device *dev; + struct device *mdev; struct vfio_device *vdev; }; struct nbl_dev_user { - struct vfio_device vdev; - struct device *mdev; + struct vfio_device *vdev; + + struct device mdev; struct notifier_block iommu_notifier; struct device *dev; struct nbl_adapter *adapter; struct nbl_dev_user_iommu_group *group; void *shm_msg_ring; + u64 dma_limit; + atomic_t open_cnt; int minor; + int network_type; bool iommu_status; bool remap_status; - int network_type; - atomic_t open_cnt; + bool user_promisc_mode; + bool user_mcast_mode; + u16 user_vsi; +}; + +struct nbl_vfio_device { + struct vfio_device vdev; + struct nbl_dev_user *user; +}; + +#define NBL_USERDEV_TO_VFIO_DEV(user) ((user)->vdev) +#define NBL_VFIO_DEV_TO_USERDEV(vdev) (*(struct nbl_dev_user **)((vdev) + 1)) +struct nbl_dev_rep { + struct nbl_rep_data *rep; + int num_vfs; }; struct nbl_dev_mgt { @@ -156,6 +340,9 @@ struct nbl_dev_mgt { struct nbl_dev_common *common_dev; struct nbl_dev_ctrl *ctrl_dev; struct nbl_dev_net *net_dev; + struct nbl_dev_rdma *rdma_dev; + struct nbl_dev_emp_console *emp_console; + struct nbl_dev_rep *rep_dev; struct nbl_dev_user *user_dev; }; @@ -171,8 +358,8 @@ struct nbl_dev_vsi_ops { int (*setup)(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, void *vsi_data); void (*remove)(struct nbl_dev_mgt *dev_mgt, void *vsi_data); - int (*start)(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, void *vsi_data); - void (*stop)(struct nbl_dev_mgt *dev_mgt, void *vsi_data); + int (*start)(void *dev_priv, struct net_device *netdev, void *vsi_data); + void (*stop)(void *dev_priv, void *vsi_data); int (*netdev_build)(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, struct net_device *netdev, void *vsi_data); void (*netdev_destroy)(struct nbl_dev_mgt *dev_mgt, void *vsi_data); @@ -191,6 +378,8 @@ struct nbl_dev_vsi { u16 in_kernel; u8 index; bool enable; + bool use_independ_irq; + bool static_queue; }; struct nbl_dev_vsi_tbl { @@ -198,11 +387,13 @@ struct nbl_dev_vsi_tbl { bool vf_support; bool only_nic_support; u16 in_kernel; + bool use_independ_irq; + bool static_queue; }; #define NBL_DEV_BOARD_ID_MAX NBL_DRIVER_DEV_MAX struct nbl_dev_board_id_entry { - u16 bus; + u32 board_key; /* domain << 16 | bus_id */ u8 refcount; bool valid; }; @@ -211,7 +402,28 @@ struct nbl_dev_board_id_table { struct nbl_dev_board_id_entry entry[NBL_DEV_BOARD_ID_MAX]; }; +int nbl_dev_setup_rdma_dev(struct nbl_adapter *adapter, struct nbl_init_param *param); +void nbl_dev_remove_rdma_dev(struct nbl_adapter *adapter); +int nbl_dev_start_rdma_dev(struct nbl_adapter *adapter); +void nbl_dev_stop_rdma_dev(struct nbl_adapter *adapter); +int nbl_dev_resume_rdma_dev(struct nbl_adapter *adapter); +int nbl_dev_suspend_rdma_dev(struct nbl_adapter *adapter); +void nbl_dev_rdma_process_abnormal_event(struct nbl_dev_rdma *rdma_dev); +void nbl_dev_rdma_process_flr_event(struct nbl_dev_rdma *rdma_dev, u16 vsi_id); +size_t nbl_dev_rdma_qos_cfg_store(struct nbl_dev_mgt *dev_mgt, int offset, + const char *buf, size_t count); +size_t nbl_dev_rdma_qos_cfg_show(struct nbl_dev_mgt *dev_mgt, int offset, char *buf); + +int nbl_dev_init_emp_console(struct nbl_adapter *adapter); +void nbl_dev_destroy_emp_console(struct nbl_adapter *adapter); int nbl_dev_setup_hwmon(struct nbl_adapter *adapter); void nbl_dev_remove_hwmon(struct nbl_adapter *adapter); - +struct nbl_dev_vsi *nbl_dev_vsi_select(struct nbl_dev_mgt *dev_mgt, u8 vsi_index); + +int nbl_netdev_add_sysfs(struct net_device *netdev, struct nbl_dev_net *net_dev); +int nbl_netdev_add_mirror_sysfs(struct net_device *netdev, struct nbl_dev_net *net_dev); +void nbl_netdev_remove_sysfs(struct nbl_dev_net *net_dev); +void nbl_netdev_remove_mirror_sysfs(struct nbl_dev_net *net_dev); +void nbl_net_add_name_attr(struct nbl_netdev_name_attr *dev_name_attr, char *rep_name); +void nbl_net_remove_dev_attr(struct nbl_dev_net *net_dev); #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.c new file mode 100644 index 0000000000000000000000000000000000000000..96b961a9b8459f5fdd28c5bf434f3bc96a73b06d --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.c @@ -0,0 +1,1107 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_dev_rdma.h" + +static int nbl_dev_create_rdma_aux_dev(struct nbl_dev_mgt *dev_mgt, u8 type, + struct nbl_core_dev_lag_info *lag_info); +static void nbl_dev_destroy_rdma_aux_dev(struct nbl_dev_rdma *rdma_dev, + struct auxiliary_device **adev); + +static void nbl_dev_rdma_pending_and_flush_event_task(struct nbl_dev_rdma *rdma_dev) +{ + atomic_inc(&rdma_dev->adev_busy); + nbl_common_flush_task(&rdma_dev->event_task); +} + +static void nbl_dev_rdma_resume_event_task(struct nbl_dev_rdma *rdma_dev) +{ + atomic_dec(&rdma_dev->adev_busy); +} + +static int nbl_dev_rdma_bond_active_num(struct nbl_core_dev_info *cdev_info) +{ + int i, count = 0; + + if (!cdev_info->is_lag) + return 0; + + for (i = 0; i < NBL_RDMA_LAG_MAX_PORTS; i++) + if (cdev_info->lag_info.lag_mem[i].active) + count++; + + return count; +} + +static void nbl_dev_rdma_cfg_bond(struct nbl_dev_mgt *dev_mgt, struct nbl_core_dev_info *cdev_info, + bool enable) +{ + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int other_eth_id = -1, i; + + /* TODO: if we need to support bond with more than two ports, need to modify here */ + for (i = 0; i < NBL_LAG_MAX_PORTS; i++) + if (cdev_info->lag_info.lag_mem[i].eth_id != NBL_COMMON_TO_ETH_ID(common)) + other_eth_id = cdev_info->lag_info.lag_mem[i].eth_id; + + if (other_eth_id == -1) { + nbl_warn(common, NBL_DEBUG_MAIN, "Fail to find bond other eth id, rdma cfg abort"); + return; + } + + serv_ops->cfg_bond_shaping(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), enable); + serv_ops->cfg_bgid_back_pressure(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), other_eth_id, enable); + + rdma_dev->bond_shaping_configed = enable; +} + +static int nbl_dev_chan_grc_process_req(void *priv, u8 *req_args, u8 req_len, + void *resp, u16 resp_len) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_chan_rdma_resp param = {0}; + struct nbl_chan_rdma_resp result = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int ret = 0; + + if (!chan_ops) + return 0; + + memcpy(param.resp_data, req_args, req_len); + param.data_len = req_len; + + common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GRC_PROCESS, ¶m, sizeof(param), + &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + if (ret) + return ret; + + resp_len = min(resp_len, result.data_len); + memcpy(resp, result.resp_data, resp_len); + + return 0; +} + +static void nbl_dev_chan_grc_process_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_chan_rdma_resp *param; + struct nbl_chan_rdma_resp result = {0}; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + struct nbl_aux_dev *dev_link = container_of(rdma_dev->grc_adev, struct nbl_aux_dev, adev); + + param = (struct nbl_chan_rdma_resp *)data; + + if (!dev_link->recv) { + err = NBL_CHAN_RESP_ERR; + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GRC_PROCESS, + msg_id, err, &result, sizeof(result)); + chan_ops->send_ack(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_ack); + return; + } + + dev_link->recv(rdma_dev->grc_adev, param->resp_data, param->data_len, &result); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GRC_PROCESS, + msg_id, err, &result, sizeof(result)); + chan_ops->send_ack(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_ack); +} + +static int nbl_dev_grc_process_send(struct pci_dev *pdev, u8 *req_args, u8 req_len, + void *resp, u16 resp_len) +{ + struct nbl_adapter *adapter; + struct nbl_dev_mgt *dev_mgt; + struct nbl_chan_rdma_resp chan_resp = {0}; + struct nbl_dev_rdma *rdma_dev; + struct nbl_aux_dev *dev_link; + + if (!pdev || !req_args || !resp) + return -EINVAL; + + adapter = pci_get_drvdata(pdev); + dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + dev_link = container_of(rdma_dev->grc_adev, struct nbl_aux_dev, adev); + + if (rdma_dev->has_grc) { + int ret = 0; + + if (dev_link->recv) { + dev_link->recv(rdma_dev->grc_adev, req_args, req_len, &chan_resp); + } else { + chan_resp.data_len = 1; + chan_resp.resp_data[0] = 1; + ret = -EINVAL; + } + resp_len = min(chan_resp.data_len, resp_len); + memcpy(resp, chan_resp.resp_data, resp_len); + return ret; + } else { + return nbl_dev_chan_grc_process_req(dev_mgt, req_args, req_len, resp, resp_len); + } +} + +static void nbl_dev_rdma_handle_abnormal_event_task(struct work_struct *work) +{ + struct nbl_dev_rdma *rdma_dev = container_of(work, struct nbl_dev_rdma, + abnormal_event_task); + struct nbl_aux_dev *dev_link = NULL; + + if (rdma_dev->is_halting) + return; + + if (rdma_dev && rdma_dev->grc_adev) + dev_link = container_of(rdma_dev->grc_adev, struct nbl_aux_dev, adev); + else if (rdma_dev && rdma_dev->adev) + dev_link = container_of(rdma_dev->adev, struct nbl_aux_dev, adev); + else if (rdma_dev && rdma_dev->bond_adev) + dev_link = container_of(rdma_dev->bond_adev, struct nbl_aux_dev, adev); + else + return; + + if (dev_link && dev_link->abnormal_event_process) + dev_link->abnormal_event_process(&dev_link->adev); +} + +void nbl_dev_rdma_process_abnormal_event(struct nbl_dev_rdma *rdma_dev) +{ + if (rdma_dev && !rdma_dev->is_halting) + nbl_common_queue_work_rdma(&rdma_dev->abnormal_event_task, false); +} + +void nbl_dev_rdma_process_flr_event(struct nbl_dev_rdma *rdma_dev, u16 vsi_id) +{ + struct nbl_aux_dev *dev_link = container_of(rdma_dev->grc_adev, struct nbl_aux_dev, adev); + + if (rdma_dev && rdma_dev->grc_adev && dev_link->process_flr_event) + dev_link->process_flr_event(rdma_dev->grc_adev, vsi_id); +} + +static int nbl_dev_rdma_register_bond(struct pci_dev *pdev, bool enable) +{ + struct nbl_adapter *adapter; + struct nbl_dev_mgt *dev_mgt; + struct nbl_dev_rdma *rdma_dev; + struct nbl_aux_dev *dev_link; + struct nbl_common_info *common; + struct nbl_service_ops *serv_ops; + + if (!pdev) + return -EINVAL; + + adapter = pci_get_drvdata(pdev); + dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + if (!rdma_dev->bond_adev) + return -EINVAL; + + dev_link = container_of(rdma_dev->bond_adev, struct nbl_aux_dev, adev); + + rdma_dev->bond_registered = enable; + + if (rdma_dev->bond_registered && nbl_dev_rdma_bond_active_num(dev_link->cdev_info) > 1 && + !rdma_dev->bond_shaping_configed) + nbl_dev_rdma_cfg_bond(dev_mgt, dev_link->cdev_info, true); + else if (!rdma_dev->bond_registered && rdma_dev->bond_shaping_configed) + nbl_dev_rdma_cfg_bond(dev_mgt, dev_link->cdev_info, false); + + return 0; +} + +static void nbl_dev_rdma_form_lag_info(struct nbl_core_dev_lag_info *lag_info, + struct nbl_lag_member_list_param *list_param, + struct nbl_common_info *common) +{ + int i; + + lag_info->lag_num = list_param->lag_num; + lag_info->lag_id = list_param->lag_id; + nbl_debug(common, NBL_DEBUG_MAIN, "update lag id %u, lag num %u.", + list_param->lag_id, list_param->lag_num); + + for (i = 0; i < NBL_RDMA_LAG_MAX_PORTS; i++) { + nbl_debug(common, NBL_DEBUG_MAIN, "update lag member %u, eth_id %u, vsi_id %u, active %u.", + i, list_param->member_list[i].eth_id, + list_param->member_list[i].vsi_id, list_param->member_list[i].active); + lag_info->lag_mem[i].vsi_id = list_param->member_list[i].vsi_id; + lag_info->lag_mem[i].eth_id = list_param->member_list[i].eth_id; + lag_info->lag_mem[i].active = list_param->member_list[i].active; + } +} + +static void nbl_dev_rdma_update_bond_member(struct nbl_dev_mgt *dev_mgt, + struct nbl_lag_member_list_param *list_param) +{ + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_core_dev_lag_info lag_info = {0}; + struct nbl_aux_dev *dev_link; + + if (!rdma_dev->bond_adev) { + nbl_err(common, NBL_DEBUG_MAIN, "Something wrong, lag adev err"); + return; + } + + dev_link = container_of(rdma_dev->bond_adev, struct nbl_aux_dev, adev); + rdma_dev->lag_id = list_param->lag_id; + + nbl_dev_rdma_form_lag_info(&lag_info, list_param, common); + + memcpy(&dev_link->cdev_info->lag_info, &lag_info, sizeof(lag_info)); + + if (dev_link->cdev_info->lag_mem_notify) + dev_link->cdev_info->lag_mem_notify(rdma_dev->bond_adev, &lag_info); + + if (rdma_dev->bond_registered && nbl_dev_rdma_bond_active_num(dev_link->cdev_info) > 1 && + !rdma_dev->bond_shaping_configed) + nbl_dev_rdma_cfg_bond(dev_mgt, dev_link->cdev_info, true); + else if (nbl_dev_rdma_bond_active_num(dev_link->cdev_info) < 2 && + rdma_dev->bond_shaping_configed) + nbl_dev_rdma_cfg_bond(dev_mgt, dev_link->cdev_info, false); +} + +static int nbl_dev_rdma_update_adev_mtu(struct nbl_dev_mgt *dev_mgt, + struct nbl_event_param *event_param) +{ + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + int new_mtu = event_param->mtu; + struct nbl_aux_dev *dev_link = NULL; + + if (rdma_dev && rdma_dev->grc_adev) + dev_link = container_of(rdma_dev->grc_adev, struct nbl_aux_dev, adev); + else if (rdma_dev && rdma_dev->adev) + dev_link = container_of(rdma_dev->adev, struct nbl_aux_dev, adev); + else if (rdma_dev && rdma_dev->bond_adev) + dev_link = container_of(rdma_dev->bond_adev, struct nbl_aux_dev, adev); + else + return 0; + + if (dev_link && dev_link->cdev_info && dev_link->cdev_info->change_mtu_notify) + dev_link->cdev_info->change_mtu_notify(&dev_link->adev, new_mtu); + + return 0; +} + +static int nbl_dev_rdma_handle_bond_event(u16 type, void *event_data, void *callback_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_dev_rdma_event_data *data = NULL; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + memcpy(&data->event_data, event_data, sizeof(data->event_data)); + data->type = type; + data->callback_data = callback_data; + + /* Why we need a list here? + * + * First, we have to make sure we don't lose any notify. When we try to queue_work when + * there is already a work being processed, we don't want to lose that notify. e.g. + * + * CONTEXT_0: add_slave0 -> notify_0(lag_num=1) -> add_slave1 -> notify_1(lag_num=2) + * CONTEXT_1: | -- process notify_0 -------> | + * + * Then why not simply use a single variable to store it? e.g. + * + * CONTEXT_0: add_slave0 -> notify_0 -> add_slave1 -> notify_1 + * CONTEXT_1: | -- process notify_1 -> | + * VARIABLE: | -- lag_num = 0 -- | | -- lag_num = 1 --| + * + * or + * + * CONTEXT_0: add_slave0 -> notify_0 -> add_slave1 -> notify_1 + * CONTEXT_1: | process notify_0 | | process notify_1 | + * VARIABLE: | -- lag_num = 0 -- | | -- lag_num = 1 --| + * + * This make sure that we always use the lastest param, functionally correct. + * + * But this will require the task function(nbl_dev_rdma_process_event_task) to lock all its + * body, for that we must make sure that once we get a param, we will use it until we + * finished all the process, or else we will have trouble for using differnet param while + * processing. + * + * But this requirement cannot be fulfilled. Consider this situation: + * CONTEXT_0: rtnl_lock -> add_slave0 -> notify_0 -> add_slave1 -> notify_1 -> event_lock + * CONTEXT_1: | --notify_0 -> event_lock -> ib_func -> rtnl_lock -- | + * + * At this moment, CONTEXT_0 have rtnl_lock but need event_lock, CONTEXT_1 have event_lock + * but need rtnl_lock, thus deadlock. + * + * Based on all of the above, we need a list to fix it. Each time we want to queue work, we + * queue a new entry on the list, and each time a task processing, it dequeues a entry. + * Then the lock only needs to lock the list itself(rather than the whole aux_dev process), + * thus no trouble for deadlock. + */ + mutex_lock(&rdma_dev->event_lock); + /* Always add_tail and dequeue the first, to maintain the order of notify */ + list_add_tail(&data->node, &rdma_dev->event_param_list); + mutex_unlock(&rdma_dev->event_lock); + + if (rdma_dev->event_ready) + nbl_common_queue_work_rdma(&rdma_dev->event_task, true); + + return 0; +} + +static int +nbl_dev_rdma_handle_mirror_outputport_event(u16 type, void *event_data, void *callback_data) +{ + bool mirror_enable = *(bool *)event_data; + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_dev_rdma_event_data *data = NULL; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->type = type; + data->callback_data = callback_data; + if (mirror_enable) + data->event_data.subevent = NBL_SUBEVENT_RELEASE_ADEV; + else + data->event_data.subevent = NBL_SUBEVENT_CREATE_ADEV; + + mutex_lock(&rdma_dev->event_lock); + /* Always add_tail and dequeue the first, to maintain the order of notify */ + list_add_tail(&data->node, &rdma_dev->event_param_list); + mutex_unlock(&rdma_dev->event_lock); + + if (rdma_dev->event_ready) + nbl_common_queue_work_rdma(&rdma_dev->event_task, true); + + return 0; +} + +static int +nbl_dev_rdma_handle_mirror_selectport_event(u16 type, void *event_data, void *callback_data) +{ + bool mirror_enable = *(bool *)event_data; + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_aux_dev *dev_link; + struct auxiliary_device *adev; + + nbl_dev_rdma_pending_and_flush_event_task(rdma_dev); + + adev = rdma_dev->adev ? rdma_dev->adev : rdma_dev->bond_adev; + if (!adev) + goto resume_event_task; + + if (rdma_dev->mirror_enable == mirror_enable) + goto resume_event_task; + + rdma_dev->mirror_enable = mirror_enable; + dev_link = container_of(adev, struct nbl_aux_dev, adev); + if (!dev_link->cdev_info) + goto resume_event_task; + + dev_link->cdev_info->mirror_enable = mirror_enable; + if (dev_link->mirror_enable_notify) + dev_link->mirror_enable_notify(adev, mirror_enable); + +resume_event_task: + nbl_dev_rdma_resume_event_task(rdma_dev); + return 0; +} + +static int nbl_dev_rdma_handle_offload_status(u16 type, void *event_data, void *callback_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_event_offload_status_data *data = + (struct nbl_event_offload_status_data *)event_data; + struct nbl_aux_dev *dev_link; + + nbl_dev_rdma_pending_and_flush_event_task(rdma_dev); + if (!rdma_dev->bond_adev) + goto resume_event_task; + + if (data->pf_vsi_id != NBL_COMMON_TO_VSI_ID(NBL_DEV_MGT_TO_COMMON(dev_mgt))) + goto resume_event_task; + + dev_link = container_of(rdma_dev->bond_adev, struct nbl_aux_dev, adev); + if (dev_link->cdev_info && dev_link->cdev_info->offload_status_notify) + dev_link->cdev_info->offload_status_notify(rdma_dev->bond_adev, data->status); + +resume_event_task: + nbl_dev_rdma_resume_event_task(rdma_dev); + return 0; +} + +static int nbl_dev_rdma_process_adev_event(void *event_data, void *callback_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_event_param *event = (struct nbl_event_param *)event_data; + struct nbl_lag_member_list_param *list_param = &event->param; + struct nbl_rdma_register_param register_param = {0}; + struct nbl_core_dev_lag_info lag_info = {0}; + + switch (event->subevent) { + case NBL_SUBEVENT_CREATE_ADEV: + if (!rdma_dev->adev) { + serv_ops->register_rdma(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common), ®ister_param); + if (register_param.has_rdma) + nbl_dev_create_rdma_aux_dev(dev_mgt, NBL_AUX_DEV_ROCE, NULL); + } + break; + case NBL_SUBEVENT_RELEASE_ADEV: + if (rdma_dev->adev) { + nbl_dev_destroy_rdma_aux_dev(rdma_dev, &rdma_dev->adev); + serv_ops->unregister_rdma(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common)); + } + break; + case NBL_SUBEVENT_CREATE_BOND_ADEV: + if (!rdma_dev->bond_adev) { + serv_ops->register_rdma_bond(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + list_param, ®ister_param); + + nbl_dev_rdma_form_lag_info(&lag_info, list_param, common); + + if (register_param.has_rdma) { + rdma_dev->lag_id = list_param->lag_id; + nbl_dev_create_rdma_aux_dev(dev_mgt, NBL_AUX_DEV_BOND, &lag_info); + } + } + break; + case NBL_SUBEVENT_RELEASE_BOND_ADEV: + if (rdma_dev->bond_adev) { + nbl_dev_destroy_rdma_aux_dev(rdma_dev, &rdma_dev->bond_adev); + serv_ops->unregister_rdma_bond(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + rdma_dev->lag_id); + } + break; + default: + break; + } + + return 0; +} + +static int nbl_dev_rdma_process_event_task(struct work_struct *work) +{ + struct nbl_dev_rdma *rdma_dev = container_of(work, struct nbl_dev_rdma, event_task); + struct nbl_dev_mgt *dev_mgt; + struct nbl_common_info *common; + struct nbl_lag_member_list_param *list_param; + struct nbl_dev_rdma_event_data *data = NULL; + struct nbl_event_param *event_param = NULL; + + if (!!atomic_read(&rdma_dev->adev_busy)) { + msleep(20); + goto queue_rework; + } + + mutex_lock(&rdma_dev->event_lock); + + if (!nbl_list_empty(&rdma_dev->event_param_list)) { + data = list_first_entry(&rdma_dev->event_param_list, + struct nbl_dev_rdma_event_data, node); + list_del(&data->node); + } + + mutex_unlock(&rdma_dev->event_lock); + + if (!data) + return 0; + + dev_mgt = (struct nbl_dev_mgt *)data->callback_data; + common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + event_param = &data->event_data; + list_param = &event_param->param; + + nbl_info(common, NBL_DEBUG_MAIN, "process rdma lag subevent %u.", event_param->subevent); + + switch (event_param->subevent) { + case NBL_SUBEVENT_UPDATE_BOND_MEMBER: + nbl_dev_rdma_update_bond_member(dev_mgt, list_param); + break; + case NBL_SUBEVENT_UPDATE_MTU: + nbl_dev_rdma_update_adev_mtu(dev_mgt, event_param); + break; + default: + nbl_dev_rdma_process_adev_event(event_param, dev_mgt); + break; + } + + kfree(data); + +queue_rework: + /* Always queue it again, because we don't know if there is another param need to process */ + nbl_common_queue_work_rdma(&rdma_dev->event_task, true); + + return 0; +} + +static int nbl_dev_rdma_handle_reset_event(u16 type, void *event_data, void *callback_data) +{ + struct nbl_dev_rdma *rdma_dev = (struct nbl_dev_rdma *)callback_data; + enum nbl_core_reset_event event = *(enum nbl_core_reset_event *)event_data; + struct nbl_aux_dev *dev_link; + struct auxiliary_device *adev; + + nbl_dev_rdma_pending_and_flush_event_task(rdma_dev); + + adev = rdma_dev->adev ? rdma_dev->adev : rdma_dev->bond_adev; + if (!adev) + goto resume_event_task; + + dev_link = container_of(adev, struct nbl_aux_dev, adev); + if (dev_link->reset_event_notify) + dev_link->reset_event_notify(adev, event); + + if (rdma_dev->has_grc && rdma_dev->grc_adev) { + adev = rdma_dev->grc_adev; + dev_link = container_of(adev, struct nbl_aux_dev, adev); + if (dev_link->reset_event_notify) + dev_link->reset_event_notify(adev, event); + } + +resume_event_task: + nbl_dev_rdma_resume_event_task(rdma_dev); + return 0; +} + +static int nbl_dev_rdma_handle_change_mtu_event(u16 type, void *event_data, void *callback_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + int new_mtu = *(int *)event_data; + struct nbl_dev_rdma_event_data *data = NULL; + + /* Move mtu update event to adev task, to avoid adev driver probe hold the rtnl_lock. + * if flush the adev task will dead loop(the os has hold the rtnl_lock before call driver's + * set_mtu ops. + */ + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->type = type; + data->callback_data = callback_data; + data->event_data.mtu = new_mtu; + data->event_data.subevent = NBL_SUBEVENT_UPDATE_MTU; + + mutex_lock(&rdma_dev->event_lock); + /* Always add_tail and dequeue the first, to maintain the order of notify */ + list_add_tail(&data->node, &rdma_dev->event_param_list); + mutex_unlock(&rdma_dev->event_lock); + + if (rdma_dev->event_ready) + nbl_common_queue_work_rdma(&rdma_dev->event_task, true); + + return 0; +} + +size_t nbl_dev_rdma_qos_cfg_store(struct nbl_dev_mgt *dev_mgt, int offset, + const char *buf, size_t count) +{ + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_aux_dev *dev_link; + struct auxiliary_device *adev; + + if (rdma_dev->bond_adev) { + adev = rdma_dev->bond_adev; + dev_link = container_of(rdma_dev->bond_adev, struct nbl_aux_dev, adev); + } else if (rdma_dev->adev) { + adev = rdma_dev->adev; + dev_link = container_of(adev, struct nbl_aux_dev, adev); + } else { + return -EINVAL; + } + + if (dev_link->qos_cfg_store) + return dev_link->qos_cfg_store(adev, offset, buf, count); + + return -EINVAL; +} + +size_t nbl_dev_rdma_qos_cfg_show(struct nbl_dev_mgt *dev_mgt, int offset, char *buf) +{ + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_aux_dev *dev_link; + struct auxiliary_device *adev; + + if (rdma_dev->bond_adev) { + adev = rdma_dev->bond_adev; + dev_link = container_of(rdma_dev->bond_adev, struct nbl_aux_dev, adev); + } else if (rdma_dev->adev) { + adev = rdma_dev->adev; + dev_link = container_of(adev, struct nbl_aux_dev, adev); + } else { + return -EINVAL; + } + + if (dev_link->qos_cfg_show) + return dev_link->qos_cfg_show(adev, offset, buf); + + return -EINVAL; +} + +static struct nbl_core_dev_info * +nbl_dev_rdma_setup_cdev_info(struct nbl_dev_mgt *dev_mgt, u8 type, + struct nbl_core_dev_lag_info *lag_info) +{ + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(common_dev); + struct nbl_core_dev_info *cdev_info = NULL; + u16 base_vector_id = msix_info->serv_info[NBL_MSIX_RDMA_TYPE].base_vector_id; + int irq_num, i; + + cdev_info = kzalloc(sizeof(*cdev_info), GFP_KERNEL); + if (!cdev_info) + goto malloc_cdev_info_err; + + cdev_info->dma_dev = NBL_COMMON_TO_DMA_DEV(common); + cdev_info->pdev = NBL_COMMON_TO_PDEV(common); + cdev_info->hw_addr = serv_ops->get_hw_addr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NULL); + cdev_info->real_hw_addr = serv_ops->get_real_hw_addr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common)); + cdev_info->function_id = serv_ops->get_function_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common)); + cdev_info->netdev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt)->netdev; + + cdev_info->vsi_id = NBL_COMMON_TO_VSI_ID(common); + cdev_info->eth_mode = NBL_COMMON_TO_ETH_MODE(common); + cdev_info->eth_id = NBL_COMMON_TO_ETH_ID(common); + cdev_info->mem_type = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt)->mem_type; + + if (type == NBL_AUX_DEV_GRC) + cdev_info->rdma_cap_num = + serv_ops->get_rdma_cap_num(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + serv_ops->get_real_bdf(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_COMMON_TO_VSI_ID(common), + &cdev_info->real_bus, &cdev_info->real_dev, + &cdev_info->real_function); + + cdev_info->send = nbl_dev_grc_process_send; + + /* grc aux dev needs no interrupt */ + if (type == NBL_AUX_DEV_GRC) + goto out; + + irq_num = msix_info->serv_info[NBL_MSIX_RDMA_TYPE].num; + cdev_info->msix_entries = kcalloc(irq_num, sizeof(*cdev_info->msix_entries), GFP_KERNEL); + if (!cdev_info->msix_entries) + goto malloc_msix_entries_err; + + cdev_info->global_vector_id = kcalloc(irq_num, sizeof(*cdev_info->global_vector_id), + GFP_KERNEL); + if (!cdev_info->global_vector_id) + goto malloc_global_vector_id_err; + + for (i = 0; i < irq_num; i++) { + memcpy(&cdev_info->msix_entries[i], &msix_info->msix_entries[i + base_vector_id], + sizeof(cdev_info->msix_entries[i])); + cdev_info->global_vector_id[i] = + serv_ops->get_global_vector(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + i + base_vector_id); + } + cdev_info->msix_count = irq_num; + + if (type == NBL_AUX_DEV_BOND && lag_info) { + memcpy(&cdev_info->lag_info, lag_info, sizeof(cdev_info->lag_info)); + cdev_info->is_lag = true; + cdev_info->register_bond = nbl_dev_rdma_register_bond; + } + +out: + return cdev_info; + +malloc_global_vector_id_err: + kfree(cdev_info->msix_entries); +malloc_msix_entries_err: + kfree(cdev_info); +malloc_cdev_info_err: + return NULL; +} + +static void nbl_dev_rdma_remove_cdev_info(struct nbl_core_dev_info *cdev_info) +{ + kfree(cdev_info->msix_entries); + kfree(cdev_info->global_vector_id); + kfree(cdev_info); +} + +static void nbl_dev_adev_release(struct device *dev) +{ + struct nbl_aux_dev *dev_link; + + dev_link = container_of(dev, struct nbl_aux_dev, adev.dev); + nbl_dev_rdma_remove_cdev_info(dev_link->cdev_info); + kfree(dev_link); +} + +static int nbl_dev_create_rdma_aux_dev(struct nbl_dev_mgt *dev_mgt, u8 type, + struct nbl_core_dev_lag_info *lag_info) +{ + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_aux_dev *dev_link; + struct auxiliary_device *adev, **temp_adev = NULL; + bool is_grc = false; + int ret = 0; + + dev_link = kzalloc(sizeof(*dev_link), GFP_KERNEL); + if (!dev_link) + return -ENOMEM; + + adev = &dev_link->adev; + + adev->id = type == NBL_AUX_DEV_GRC ? NBL_COMMON_TO_BOARD_ID(common) : rdma_dev->adev_index; + adev->dev.parent = dev; + adev->dev.release = nbl_dev_adev_release; + + switch (type) { + case NBL_AUX_DEV_GRC: + rdma_dev->grc_adev = adev; + adev->name = "nbl.roce_grc"; + temp_adev = &rdma_dev->grc_adev; + is_grc = true; + break; + case NBL_AUX_DEV_ROCE: + rdma_dev->adev = adev; + adev->name = "nbl.roce"; + temp_adev = &rdma_dev->adev; + break; + case NBL_AUX_DEV_BOND: + rdma_dev->bond_adev = adev; + adev->name = "nbl.roce_bond"; + temp_adev = &rdma_dev->bond_adev; + break; + default: + goto unknown_type_err; + } + + dev_link->cdev_info = nbl_dev_rdma_setup_cdev_info(dev_mgt, type, lag_info); + if (!dev_link->cdev_info) { + ret = -ENOMEM; + goto malloc_cdev_info_err; + } + + dev_link->cdev_info->mirror_enable = rdma_dev->mirror_enable; + ret = auxiliary_device_init(adev); + if (ret) { + dev_err(dev, "auxiliary_device_init fail ret= %d", ret); + goto aux_dev_init_err; + } + + ret = __auxiliary_device_add(adev, "nbl"); + if (ret) { + dev_err(dev, "__auxiliary_device_add fail ret= %d", ret); + goto aux_dev_add_err; + } + + dev_info(dev, "nbl plug %d auxiliary device OK", type); + return 0; + +aux_dev_add_err: + /* When uninit, it will call nbl_dev_adev_release, which will free dev_link. + * So just return. + */ + auxiliary_device_uninit(adev); + if (temp_adev) + *temp_adev = NULL; + return ret; +aux_dev_init_err: + nbl_dev_rdma_remove_cdev_info(dev_link->cdev_info); +malloc_cdev_info_err: +unknown_type_err: + kfree(dev_link); + if (temp_adev) + *temp_adev = NULL; + return ret; +} + +static void nbl_dev_destroy_rdma_aux_dev(struct nbl_dev_rdma *rdma_dev, + struct auxiliary_device **adev) +{ + rdma_dev->is_halting = true; + + if (!adev || !*adev) + return; + + if (rdma_dev->has_abnormal_event_task) + nbl_common_flush_task(&rdma_dev->abnormal_event_task); + + auxiliary_device_delete(*adev); + auxiliary_device_uninit(*adev); + + *adev = NULL; + + rdma_dev->is_halting = false; +} + +int nbl_dev_setup_rdma_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_rdma *rdma_dev; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(common_dev); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_rdma_register_param register_param = {0}; + struct nbl_event_callback event_callback = {0}; + bool has_grc = false; + + /* This must be performed after ctrl dev setup */ + if (param->caps.has_ctrl) + serv_ops->setup_rdma_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + serv_ops->register_rdma(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common), ®ister_param); + + if (param->caps.has_grc) + has_grc = true; + + if (!register_param.has_rdma && !has_grc) + return 0; + + rdma_dev = devm_kzalloc(NBL_ADAPTER_TO_DEV(adapter), + sizeof(struct nbl_dev_rdma), GFP_KERNEL); + if (!rdma_dev) + return -ENOMEM; + + rdma_dev->has_rdma = register_param.has_rdma; + rdma_dev->has_grc = has_grc; + rdma_dev->mem_type = register_param.mem_type; + rdma_dev->adev_index = register_param.id; + msix_info->serv_info[NBL_MSIX_RDMA_TYPE].num += register_param.intr_num; + + nbl_common_alloc_task(&rdma_dev->event_task, (void *)nbl_dev_rdma_process_event_task); + INIT_LIST_HEAD(&rdma_dev->event_param_list); + mutex_init(&rdma_dev->event_lock); + if (!NBL_COMMON_TO_VF_CAP(common)) { + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_bond_event; + nbl_event_register(NBL_EVENT_RDMA_BOND_UPDATE, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_mirror_selectport_event; + nbl_event_register(NBL_EVENT_MIRROR_SELECTPORT, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } else { + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_mirror_outputport_event; + nbl_event_register(NBL_EVENT_MIRROR_OUTPUTPORT_DEVLAYER, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } + + NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt) = rdma_dev; + + return 0; +} + +void nbl_dev_remove_rdma_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_rdma_event_data *data, *data_safe; + struct nbl_event_callback event_callback = {0}; + + if (!rdma_dev) + return; + + if (!NBL_COMMON_TO_VF_CAP(common)) { + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_bond_event; + nbl_event_unregister(NBL_EVENT_RDMA_BOND_UPDATE, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_mirror_selectport_event; + nbl_event_unregister(NBL_EVENT_MIRROR_SELECTPORT, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } else { + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_mirror_outputport_event; + nbl_event_unregister(NBL_EVENT_MIRROR_OUTPUTPORT_DEVLAYER, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } + + mutex_lock(&rdma_dev->event_lock); + list_for_each_entry_safe(data, data_safe, &rdma_dev->event_param_list, node) { + list_del(&data->node); + kfree(data); + } + + mutex_unlock(&rdma_dev->event_lock); + nbl_common_release_task(&rdma_dev->event_task); + + if (rdma_dev->has_rdma) + serv_ops->unregister_rdma(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common)); + + if (NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)) + serv_ops->remove_rdma_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + devm_kfree(NBL_ADAPTER_TO_DEV(adapter), rdma_dev); + NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt) = NULL; +} + +int nbl_dev_start_rdma_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_event_callback event_callback = {0}; + int ret; + + if (!rdma_dev || (!rdma_dev->has_rdma && !rdma_dev->has_grc)) + return 0; + + if (!!NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)) { + nbl_common_alloc_task(&rdma_dev->abnormal_event_task, + nbl_dev_rdma_handle_abnormal_event_task); + rdma_dev->has_abnormal_event_task = true; + } + + if (chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_TYPE_MAILBOX)) + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_MSG_GRC_PROCESS, + nbl_dev_chan_grc_process_resp, dev_mgt); + + if (rdma_dev->has_grc) { + ret = nbl_dev_create_rdma_aux_dev(dev_mgt, NBL_AUX_DEV_GRC, NULL); + if (ret) + return ret; + } + + if (rdma_dev->has_rdma) { + ret = nbl_dev_create_rdma_aux_dev(dev_mgt, NBL_AUX_DEV_ROCE, NULL); + if (ret) + goto create_rdma_aux_err; + } + + if (!NBL_COMMON_TO_VF_CAP(common)) { + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_offload_status; + nbl_event_register(NBL_EVENT_OFFLOAD_STATUS_CHANGED, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } + + event_callback.callback_data = rdma_dev; + event_callback.callback = nbl_dev_rdma_handle_reset_event; + nbl_event_register(NBL_EVENT_RESET_EVENT, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_change_mtu_event; + nbl_event_register(NBL_EVENT_CHANGE_MTU, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + rdma_dev->event_ready = true; + nbl_common_queue_work_rdma(&rdma_dev->event_task, true); + + return 0; + +create_rdma_aux_err: + nbl_dev_destroy_rdma_aux_dev(rdma_dev, &rdma_dev->grc_adev); + return ret; +} + +void nbl_dev_stop_rdma_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_event_callback event_callback = {0}; + + if (!rdma_dev) + return; + + if (!NBL_COMMON_TO_VF_CAP(common)) { + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_offload_status; + nbl_event_unregister(NBL_EVENT_OFFLOAD_STATUS_CHANGED, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } + + rdma_dev->event_ready = false; + nbl_common_flush_task(&rdma_dev->event_task); + + event_callback.callback_data = rdma_dev; + event_callback.callback = nbl_dev_rdma_handle_reset_event; + nbl_event_unregister(NBL_EVENT_RESET_EVENT, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_change_mtu_event; + nbl_event_unregister(NBL_EVENT_CHANGE_MTU, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + nbl_dev_destroy_rdma_aux_dev(rdma_dev, &rdma_dev->bond_adev); + nbl_dev_destroy_rdma_aux_dev(rdma_dev, &rdma_dev->adev); + nbl_dev_destroy_rdma_aux_dev(rdma_dev, &rdma_dev->grc_adev); + + if (rdma_dev->has_abnormal_event_task) + nbl_common_release_task(&rdma_dev->abnormal_event_task); +} + +int nbl_dev_resume_rdma_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + + if (!rdma_dev || (!rdma_dev->has_rdma && !rdma_dev->has_grc)) + return 0; + + if (rdma_dev->has_abnormal_event_task) + nbl_common_alloc_task(&rdma_dev->abnormal_event_task, + nbl_dev_rdma_handle_abnormal_event_task); + + nbl_common_alloc_task(&rdma_dev->event_task, nbl_dev_rdma_process_event_task); + + return 0; +} + +int nbl_dev_suspend_rdma_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + + if (!rdma_dev) + return 0; + + nbl_common_release_task(&rdma_dev->event_task); + + if (rdma_dev->has_abnormal_event_task) + nbl_common_release_task(&rdma_dev->abnormal_event_task); + + return 0; +} + diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.h new file mode 100644 index 0000000000000000000000000000000000000000..45f7547c129aa4a6d32932588fd062729d30d40b --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_DEV_RDMA_H_ +#define _NBL_DEV_RDMA_H_ + +#include "nbl_dev.h" +#include "nbl_export_rdma.h" + +enum { + NBL_AUX_DEV_GRC = 0, + NBL_AUX_DEV_ROCE, + NBL_AUX_DEV_BOND, +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.c index 9b5e1bd9fc07ae638f49ae833f312bfef2b6d8b2..d20651907cb98dfcbc4b0b4c821b8319665d136d 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.c @@ -7,6 +7,9 @@ #include "nbl_service.h" extern int device_driver_attach(struct device_driver *drv, struct device *dev); +#define VENDOR_PHYTIUM 0x70 +#define VENDOR_MASK 0xFF +#define VENDOR_OFFSET 24 static struct nbl_userdev { struct cdev cdev; @@ -33,7 +36,6 @@ struct nbl_userdev_dma { unsigned long vaddr; size_t size; unsigned long pfn; - unsigned int ref_cnt; }; bool nbl_dma_iommu_status(struct pci_dev *pdev) @@ -46,15 +48,20 @@ bool nbl_dma_iommu_status(struct pci_dev *pdev) return 0; } -bool nbl_dma_remap_status(struct pci_dev *pdev) +bool nbl_dma_remap_status(struct pci_dev *pdev, u64 *dma_limit) { struct device *dev = &pdev->dev; struct iommu_domain *domain; + dma_addr_t dma_mask = dma_get_mask(dev); + *dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); domain = iommu_get_domain_for_dev(dev); if (!domain) return 0; + if (domain->geometry.force_aperture) + *dma_limit = min_t(u64, *dma_limit, domain->geometry.aperture_end); + if (domain->type & IOMMU_DOMAIN_IDENTITY) return 0; @@ -70,55 +77,48 @@ static void nbl_user_change_kernel_network(struct nbl_dev_user *user) { struct nbl_adapter *adapter = user->adapter; struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); - struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - struct nbl_event_dev_mode_switch_data data = {0}; struct net_device *netdev = net_dev->netdev; + int ret; if (user->network_type == NBL_KERNEL_NETWORK) return; - rtnl_lock(); - clear_bit(NBL_USER, adapter->state); - - data.op = NBL_DEV_USER_TO_KERNEL; - nbl_event_notify(NBL_EVENT_DEV_MODE_SWITCH, &data, NBL_COMMON_TO_ETH_ID(common), - NBL_COMMON_TO_BOARD_ID(common)); - if (data.ret) - goto unlock; + ret = serv_ops->switch_traffic_default_dest(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_DEV_USER_TO_KERNEL); + if (ret) { + netdev_err(netdev, "network changes to kernel space failed %d\n", ret); + return; + } user->network_type = NBL_KERNEL_NETWORK; netdev_info(netdev, "network changes to kernel space\n"); - -unlock: - rtnl_unlock(); } static int nbl_user_change_user_network(struct nbl_dev_user *user) { struct nbl_adapter *adapter = user->adapter; struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); - struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct net_device *netdev = net_dev->netdev; - struct nbl_event_dev_mode_switch_data data = {0}; int ret = 0; - rtnl_lock(); + if (user->network_type == NBL_USER_NETWORK) + return 0; - data.op = NBL_DEV_KERNEL_TO_USER; - nbl_event_notify(NBL_EVENT_DEV_MODE_SWITCH, &data, NBL_COMMON_TO_ETH_ID(common), - NBL_COMMON_TO_BOARD_ID(common)); - if (data.ret) - goto unlock; + ret = serv_ops->switch_traffic_default_dest(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_DEV_KERNEL_TO_USER); + + if (ret) { + netdev_err(netdev, "network changes to user space failed %u\n", ret); + return ret; + } - set_bit(NBL_USER, adapter->state); user->network_type = NBL_USER_NETWORK; netdev_info(netdev, "network changes to user\n"); -unlock: - rtnl_unlock(); - return ret; } @@ -136,12 +136,25 @@ static int nbl_cdev_open(struct inode *inode, struct file *filep) if (!p) return -ENODEV; + if (test_bit(NBL_FATAL_ERR, p->state)) + return -EIO; + dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(p); user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); opened = atomic_cmpxchg(&user->open_cnt, 0, 1); if (opened) return -EBUSY; + rtnl_lock(); + if (test_bit(NBL_XDP, p->state)) { + atomic_set(&user->open_cnt, 0); + rtnl_unlock(); + return -EIO; + } + + set_bit(NBL_USER, p->state); + rtnl_unlock(); + filep->private_data = p; return 0; @@ -152,11 +165,17 @@ static int nbl_cdev_release(struct inode *inode, struct file *filp) struct nbl_adapter *adapter = filp->private_data; struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); chan_ops->clear_listener_info(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt)); nbl_user_change_kernel_network(user); + serv_ops->config_fd_flow_state(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_CHAN_FDIR_RULE_ISOLATE, NBL_FD_STATE_FLUSH); + serv_ops->clear_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), user->user_vsi); atomic_set(&user->open_cnt, 0); + user->user_promisc_mode = 0; + clear_bit(NBL_USER, adapter->state); return 0; } @@ -212,8 +231,8 @@ static int nbl_userdev_common_mmap(struct nbl_adapter *adapter, struct vm_area_s return -EINVAL; if (index == NBL_DEV_SHM_MSG_RING_INDEX) { - struct page *page = virt_to_page((void *)((unsigned long)user->shm_msg_ring + - (pgoff << PAGE_SHIFT))); + struct page *page = virt_to_page((unsigned long)user->shm_msg_ring + + (pgoff << PAGE_SHIFT)); vma->vm_pgoff = pgoff; ret = remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), req_len, vma->vm_page_prot); @@ -237,24 +256,34 @@ static int nbl_cdev_mmap(struct file *filep, struct vm_area_struct *vma) return nbl_userdev_common_mmap(adapter, vma); } -static int nbl_userdev_register_net(struct nbl_adapter *adapter, void *resp) +static int nbl_userdev_register_net(struct nbl_adapter *adapter, void *resp, + struct nbl_chan_send_info *chan_send) { struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); struct nbl_register_net_result *result = (struct nbl_register_net_result *)resp; struct nbl_dev_vsi *vsi; + int ret = 0; vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; + memset(result, 0, sizeof(*result)); result->tx_queue_num = vsi->queue_num; result->rx_queue_num = vsi->queue_num; result->rdma_enable = 0; result->queue_offset = vsi->queue_offset; + result->trusted = 1; - return 0; + if (vsi->queue_num == 0) + ret = -ENOSPC; + + chan_send->ack_len = sizeof(struct nbl_register_net_result); + + return ret; } -static int nbl_userdev_alloc_txrx_queues(struct nbl_adapter *adapter, void *resp) +static int nbl_userdev_alloc_txrx_queues(struct nbl_adapter *adapter, void *resp, + struct nbl_chan_send_info *chan_send) { struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); @@ -264,11 +293,15 @@ static int nbl_userdev_alloc_txrx_queues(struct nbl_adapter *adapter, void *resp vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; result = (struct nbl_chan_param_alloc_txrx_queues *)resp; result->queue_num = vsi->queue_num; + result->vsi_id = vsi->vsi_id; + + chan_send->ack_len = sizeof(struct nbl_chan_param_alloc_txrx_queues); return 0; } -static int nbl_userdev_get_vsi_id(struct nbl_adapter *adapter, void *resp) +static int nbl_userdev_get_vsi_id(struct nbl_adapter *adapter, void *resp, + struct nbl_chan_send_info *chan_send) { struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); @@ -279,6 +312,8 @@ static int nbl_userdev_get_vsi_id(struct nbl_adapter *adapter, void *resp) result = (struct nbl_chan_param_get_vsi_id *)resp; result->vsi_id = vsi->vsi_id; + chan_send->ack_len = sizeof(struct nbl_chan_param_get_vsi_id); + return 0; } @@ -322,18 +357,14 @@ static long nbl_userdev_channel_ioctl(struct nbl_adapter *adapter, unsigned long switch (msg->msg_type) { case NBL_CHAN_MSG_REGISTER_NET: - ret = nbl_userdev_register_net(adapter, resp); + ret = nbl_userdev_register_net(adapter, resp, &chan_send); break; case NBL_CHAN_MSG_ALLOC_TXRX_QUEUES: - ret = nbl_userdev_alloc_txrx_queues(adapter, resp); + ret = nbl_userdev_alloc_txrx_queues(adapter, resp, &chan_send); break; case NBL_CHAN_MSG_GET_VSI_ID: - ret = nbl_userdev_get_vsi_id(adapter, resp); - break; - case NBL_CHAN_MSG_ADD_MACVLAN: - WARN_ON(1); + ret = nbl_userdev_get_vsi_id(adapter, resp, &chan_send); break; - case NBL_CHAN_MSG_DEL_MACVLAN: case NBL_CHAN_MSG_UNREGISTER_NET: case NBL_CHAN_MSG_ADD_MULTI_RULE: case NBL_CHAN_MSG_DEL_MULTI_RULE: @@ -353,6 +384,7 @@ static long nbl_userdev_channel_ioctl(struct nbl_adapter *adapter, unsigned long break; } + msg->ack_length = chan_send.ack_len; msg->ack_err = ret; ret = copy_to_user((void __user *)arg, msg, sizeof(*msg)); @@ -364,7 +396,10 @@ static long nbl_userdev_channel_ioctl(struct nbl_adapter *adapter, unsigned long static long nbl_userdev_switch_network(struct nbl_adapter *adapter, unsigned long arg) { struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_dev_vsi *vsi; int timeout = 50; int type; @@ -388,10 +423,16 @@ static long nbl_userdev_switch_network(struct nbl_adapter *adapter, unsigned lon } /* todolist: concurreny about adapter->state */ - if (type == NBL_USER_NETWORK) + vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; + if (type == NBL_USER_NETWORK) { nbl_user_change_user_network(user); - else + serv_ops->set_promisc_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + vsi->vsi_id, user->user_promisc_mode); + serv_ops->cfg_multi_mcast(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + vsi->vsi_id, user->user_mcast_mode); + } else { nbl_user_change_kernel_network(user); + } return 0; } @@ -481,10 +522,81 @@ static long nbl_userdev_get_bar_size(struct nbl_adapter *adapter, unsigned long return ret; } +static long nbl_userdev_get_dma_limit(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + + /** + * Linux kernel perfers to use 32-bit IOVAs, when 32-bit address space has been used. + * Then attempt to use high address space. + * The order of allocation is from high address to low address. + * + * DPDK setting the starting address at 4GB, please reference eal_get_baseaddr. + * So dpdk iova almost not conflict with kernel. + * Like heap-stack, kernel alloc iova from high to low, dpdk alloc iova from low to high. + * + * But in the scene, linux kernel config is passthrough, + * nbl device has been modify to DMA by sysfs, + * concurrent dpdk attach a device base uio, now dpdk use pa as iova. + * Now pa maybe below 4G, and iommu map(iova(pa)->pa) may conflict with kernel. + * + * So dpdk remap policy is when dpdk use iova, not set iova msb. + * when dpdk use pa as iova, set iova msb. + * The best way is call reserve_iova to keep consistent between dpdk and kernel. + * But struct iommu_dma_cookie not export symbols, we cannot get struct iova_domain + * by struct iommu_domain->iova_cookie->iovad except define struct iommu_dma_cookie + * in driver code. + */ + + return copy_to_user((void __user *)arg, &user->dma_limit, sizeof(user->dma_limit)); +} + +static long nbl_userdev_set_multi_mode(struct nbl_adapter *adapter, unsigned int cmd, + unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_dev_vsi *vsi; + u16 user_multi_mode; + int ret = 0; + + if (get_user(user_multi_mode, (unsigned long __user *)arg)) { + dev_err(NBL_ADAPTER_TO_DEV(adapter), + "set promic mode get mode failed\n"); + return -EFAULT; + } + + if (cmd == NBL_DEV_USER_SET_PROMISC_MODE && user_multi_mode == user->user_promisc_mode) + return 0; + + if (cmd == NBL_DEV_USER_SET_MCAST_MODE && user_multi_mode == user->user_mcast_mode) + return 0; + + vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; + if (user->network_type == NBL_USER_NETWORK) { + if (cmd == NBL_DEV_USER_SET_PROMISC_MODE) + ret = serv_ops->set_promisc_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + vsi->vsi_id, user_multi_mode); + else + ret = serv_ops->cfg_multi_mcast(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + vsi->vsi_id, user_multi_mode); + } + + if (cmd == NBL_DEV_USER_SET_PROMISC_MODE) + user->user_promisc_mode = user_multi_mode; + else + user->user_mcast_mode = user_multi_mode; + + return ret; +} + static long nbl_userdev_common_ioctl(struct nbl_adapter *adapter, unsigned int cmd, unsigned long arg) { - int ret = 0; + int ret = -EINVAL; switch (cmd) { case NBL_DEV_USER_CHANNEL: @@ -511,6 +623,13 @@ static long nbl_userdev_common_ioctl(struct nbl_adapter *adapter, unsigned int c case NBL_DEV_USER_GET_BAR_SIZE: ret = nbl_userdev_get_bar_size(adapter, arg); break; + case NBL_DEV_USER_GET_DMA_LIMIT: + ret = nbl_userdev_get_dma_limit(adapter, arg); + break; + case NBL_DEV_USER_SET_PROMISC_MODE: + case NBL_DEV_USER_SET_MCAST_MODE: + ret = nbl_userdev_set_multi_mode(adapter, cmd, arg); + break; default: break; } @@ -595,6 +714,95 @@ static struct nbl_userdev_dma *nbl_userdev_find_dma(struct nbl_dev_user_iommu_gr return NULL; } +static struct rb_node *nbl_userdev_find_dma_first_node(struct nbl_dev_user_iommu_group *group, + dma_addr_t start, size_t size) +{ + struct rb_node *res = NULL; + struct rb_node *node = group->dma_tree.rb_node; + struct nbl_userdev_dma *dma_res = NULL; + + while (node) { + struct nbl_userdev_dma *dma = rb_entry(node, struct nbl_userdev_dma, node); + + if (start < dma->vaddr + dma->size) { + res = node; + dma_res = dma; + if (start >= dma->vaddr) + break; + node = node->rb_left; + } else { + node = node->rb_right; + } + } + if (res && size && dma_res->vaddr >= start + size) + res = NULL; + return res; +} + +/** + * check dma conflict when multi devices in one iommu group, That is, when ACS not support. + * return -1 means multi devices conflict. + * return 1 means mapping exist and not conflict. + * return 0 means mapping not existed. + */ +static int nbl_userdev_check_dma_conflict(struct nbl_dev_user *user, + unsigned long vaddr, dma_addr_t iova, size_t size) +{ + struct nbl_dev_user_iommu_group *group = user->group; + struct nbl_userdev_dma *dma; + struct rb_node *n; + struct page *h_page; + size_t unmapped = 0; + unsigned long vfn, pfn, vaddr_new; + dma_addr_t iova_new; + int ret; + + dma = nbl_userdev_find_dma(group, vaddr, 1); + if (dma && dma->vaddr != vaddr) + return -1; + + dma = nbl_userdev_find_dma(group, vaddr + size - 1, 0); + if (dma && dma->vaddr + dma->size != vaddr + size) + return -1; + + if (!nbl_userdev_find_dma(group, vaddr, size)) + return 0; + n = nbl_userdev_find_dma_first_node(group, vaddr, size); + vaddr_new = vaddr; + iova_new = iova; + while (n) { + dma = rb_entry(n, struct nbl_userdev_dma, node); + if (dma->iova >= iova + size) + break; + + if (dma->vaddr >= vaddr + size) + break; + + if (dma->vaddr != vaddr_new || dma->iova != iova_new) + break; + + vfn = vaddr_new >> PAGE_SHIFT; + ret = vfio_pin_pages(NBL_USERDEV_TO_VFIO_DEV(user), + vaddr_new, 1, IOMMU_READ | IOMMU_WRITE, &h_page); + if (ret <= 0) + break; + pfn = page_to_pfn(h_page); + vfio_unpin_pages(NBL_USERDEV_TO_VFIO_DEV(user), vaddr_new, 1); + if (pfn != dma->pfn) + break; + + n = rb_next(n); + unmapped += dma->size; + vaddr_new += dma->size; + iova_new += dma->size; + } + + if (unmapped != size) + return -1; + + return 1; +} + static void nbl_userdev_link_dma(struct nbl_dev_user_iommu_group *group, struct nbl_userdev_dma *new) { @@ -615,10 +823,24 @@ static void nbl_userdev_link_dma(struct nbl_dev_user_iommu_group *group, rb_insert_color(&new->node, &group->dma_tree); } +#ifdef CONFIG_ARM64 +static int check_phytium_cpu(void) +{ + u32 midr = read_cpuid_id(); + u32 vendor = (midr >> VENDOR_OFFSET) & VENDOR_MASK; + + if (vendor == VENDOR_PHYTIUM) + return 1; + + return 0; +} +#endif + static void nbl_userdev_remove_dma(struct nbl_dev_user_iommu_group *group, struct nbl_userdev_dma *dma) { struct nbl_vfio_batch batch; + size_t unmmaped; long npage, batch_pages; unsigned long vaddr; int ret, caps; @@ -627,7 +849,16 @@ static void nbl_userdev_remove_dma(struct nbl_dev_user_iommu_group *group, dev_dbg(group->dev, "dma remove: vaddr 0x%lx, iova 0x%llx, size 0x%lx\n", dma->vaddr, dma->iova, dma->size); - iommu_unmap(iommu_get_domain_for_dev(group->dev), dma->iova, dma->size); + unmmaped = iommu_unmap(iommu_get_domain_for_dev(group->dev), dma->iova, dma->size); + WARN_ON(unmmaped != dma->size); + /** + * For kylin + FT Server, Exist dma invalid content when smmu translate mode. + * We can flush iommu tlb force to avoid the problem. + */ +#ifdef CONFIG_ARM64 + if (check_phytium_cpu()) + iommu_flush_iotlb_all(iommu_get_domain_for_dev(group->dev)); +#endif ret = nbl_vfio_batch_init(&batch); if (ret) { @@ -671,9 +902,8 @@ static long nbl_userdev_dma_map_ioctl(struct nbl_dev_user *user, unsigned long a struct device *dev = &pdev->dev; struct nbl_vfio_batch batch; struct nbl_userdev_dma *dma; - struct page *h_page; unsigned long minsz, pfn_base = 0, pfn; - unsigned long vaddr, vfn; + unsigned long vaddr; dma_addr_t iova; u32 mask = NBL_DEV_USER_DMA_MAP_FLAG_READ | NBL_DEV_USER_DMA_MAP_FLAG_WRITE; size_t size; @@ -697,33 +927,15 @@ static long nbl_userdev_dma_map_ioctl(struct nbl_dev_user *user, unsigned long a return ret; mutex_lock(&user->group->dma_tree_lock); - /* rb-tree find */ - dma = nbl_userdev_find_dma(user->group, vaddr, map.size); - if (dma && dma->iova == iova && dma->size == map.size) { - vfn = vaddr >> PAGE_SHIFT; - ret = vfio_pin_pages(&user->vdev, vaddr, 1, IOMMU_READ | IOMMU_WRITE, &h_page); - if (ret <= 0) { - dev_err(dev, "vfio_pin_pages failed %d\n", ret); - goto mutext_unlock; - } + ret = nbl_userdev_check_dma_conflict(user, vaddr, iova, map.size); + if (ret < 0) { + dev_err(dev, "multiple dma not equal\n"); + ret = -EINVAL; + goto mutext_unlock; + } - pfn = page_to_pfn(h_page); + if (ret) { ret = 0; - vfio_unpin_pages(&user->vdev, vaddr, 1); - - if (pfn != dma->pfn) { - dev_err(dev, "multiple dma pfn not equal, new pfn %lu, dma pfn %lu\n", - pfn, dma->pfn); - ret = -EINVAL; - goto mutext_unlock; - } - - dev_info(dev, "existing dma info, ref_cnt++\n"); - dma->ref_cnt++; - goto mutext_unlock; - } else if (dma) { - dev_info(dev, "multiple dma not equal\n"); - ret = -EINVAL; goto mutext_unlock; } @@ -749,7 +961,7 @@ static long nbl_userdev_dma_map_ioctl(struct nbl_dev_user *user, unsigned long a for (i = 1; i < batch_pages; i++) batch.pages_in[i] = batch.pages_in[i - 1] + 1; - ret = vfio_pin_pages(&user->vdev, vaddr, batch_pages, + ret = vfio_pin_pages(NBL_USERDEV_TO_VFIO_DEV(user), vaddr, batch_pages, IOMMU_READ | IOMMU_WRITE, batch.h_page); dev_dbg(dev, "page %ld pages, return %d\n", batch_pages, batch.size); @@ -817,7 +1029,6 @@ static long nbl_userdev_dma_map_ioctl(struct nbl_dev_user *user, unsigned long a dma->iova = map.iova; dma->size = map.size; dma->vaddr = map.vaddr; - dma->ref_cnt = 1; nbl_userdev_link_dma(user->group, dma); dev_info(dev, "dma map info: vaddr=0x%llx, iova=0x%llx, size=0x%llx\n", @@ -831,7 +1042,7 @@ static long nbl_userdev_dma_map_ioctl(struct nbl_dev_user *user, unsigned long a iommu_unmap(iommu_get_domain_for_dev(dev), map.iova, iova - map.iova); if (batch.size) - vfio_unpin_pages(&user->vdev, vaddr, batch.size); + vfio_unpin_pages(NBL_USERDEV_TO_VFIO_DEV(user), vaddr, batch.size); npage = (vaddr - map.vaddr) >> PAGE_SHIFT; vaddr = map.vaddr; @@ -846,7 +1057,7 @@ static long nbl_userdev_dma_map_ioctl(struct nbl_dev_user *user, unsigned long a for (i = 1; i < batch_pages; i++) batch.pages_in[i] = batch.pages_in[i - 1] + 1; - vfio_unpin_pages(&user->vdev, vaddr, batch_pages); + vfio_unpin_pages(NBL_USERDEV_TO_VFIO_DEV(user), vaddr, batch_pages); npage -= batch_pages; vaddr += (batch_pages << PAGE_SHIFT); } @@ -866,6 +1077,8 @@ static long nbl_userdev_dma_unmap_ioctl(struct nbl_dev_user *user, unsigned long struct nbl_dev_user_dma_unmap unmap; struct nbl_userdev_dma *dma; unsigned long minsz; + size_t unmapped = 0; + struct rb_node *n; minsz = offsetofend(struct nbl_dev_user_dma_unmap, size); @@ -879,19 +1092,28 @@ static long nbl_userdev_dma_unmap_ioctl(struct nbl_dev_user *user, unsigned long (u64)unmap.vaddr, (u64)unmap.iova, (u64)unmap.size); mutex_lock(&user->group->dma_tree_lock); - dma = nbl_userdev_find_dma(user->group, unmap.vaddr, unmap.size); - /* unmmap pages: rb-tree lock */ - if (dma) { - if (dma->vaddr != unmap.vaddr || dma->iova != unmap.iova || dma->size != unmap.size) - dev_err(dev, "dma unmap not equal, unmap vaddr 0x%llx, iova 0x%llx,\n" - "size 0x%llx, dma rbtree vaddr 0x%lx, iova 0x%llx, size 0x%lx\n", - unmap.vaddr, unmap.iova, unmap.size, - dma->vaddr, dma->iova, dma->size); - dma->ref_cnt--; - if (!dma->ref_cnt) - nbl_userdev_remove_dma(user->group, dma); + user->group->vdev = NBL_USERDEV_TO_VFIO_DEV(user); + dma = nbl_userdev_find_dma(user->group, unmap.vaddr, 1); + if (dma && dma->vaddr != unmap.vaddr) + return -1; + + dma = nbl_userdev_find_dma(user->group, unmap.vaddr + unmap.size - 1, 0); + if (dma && dma->vaddr + dma->size != unmap.vaddr + unmap.size) + goto unlock; + + n = nbl_userdev_find_dma_first_node(user->group, unmap.vaddr, unmap.size); + while (n) { + dma = rb_entry(n, struct nbl_userdev_dma, node); + if (dma->vaddr >= unmap.vaddr + unmap.size) + break; + + n = rb_next(n); + nbl_userdev_remove_dma(user->group, dma); + unmapped += dma->size; } +unlock: mutex_unlock(&user->group->dma_tree_lock); + unmap.size = unmapped; return 0; } @@ -901,7 +1123,7 @@ static long nbl_vfio_ioctl(struct vfio_device *vdev, unsigned int cmd, unsigned struct nbl_dev_user *user; long ret; - user = container_of(vdev, struct nbl_dev_user, vdev); + user = NBL_VFIO_DEV_TO_USERDEV(vdev); switch (cmd) { case NBL_DEV_USER_MAP_DMA: ret = nbl_userdev_dma_map_ioctl(user, arg); @@ -921,19 +1143,20 @@ static int nbl_vfio_mmap(struct vfio_device *vdev, struct vm_area_struct *vma) { struct nbl_dev_user *user; - user = container_of(vdev, struct nbl_dev_user, vdev); + user = NBL_VFIO_DEV_TO_USERDEV(vdev); return nbl_userdev_common_mmap(user->adapter, vma); } static void nbl_vfio_dma_unmap(struct vfio_device *vdev, u64 iova, u64 length) { - struct nbl_dev_user *user = container_of(vdev, struct nbl_dev_user, vdev); + struct nbl_dev_user *user = NBL_VFIO_DEV_TO_USERDEV(vdev); struct nbl_userdev_dma *dma; dev_info(user->group->dev, "vdev notifyier iova 0x%llx, size 0x%llx\n", iova, length); mutex_lock(&user->group->dma_tree_lock); + user->group->vdev = vdev; dma = nbl_userdev_find_dma(user->group, (dma_addr_t)iova, (size_t)length); if (dma) nbl_userdev_remove_dma(user->group, dma); @@ -953,15 +1176,18 @@ static void nbl_userdev_release_group(struct kref *kref) group = container_of(kref, struct nbl_dev_user_iommu_group, kref); list_del(&group->group_next); mutex_unlock(&nbl_userdev.glock); + mutex_lock(&group->dma_tree_lock); while ((node = rb_first(&group->dma_tree))) nbl_userdev_remove_dma(group, rb_entry(node, struct nbl_userdev_dma, node)); iommu_group_put(group->iommu_group); + mutex_unlock(&group->dma_tree_lock); kfree(group); } -static void nbl_userdev_group_put(struct nbl_dev_user_iommu_group *group) +static void nbl_userdev_group_put(struct nbl_dev_user *user, struct nbl_dev_user_iommu_group *group) { + group->vdev = NBL_USERDEV_TO_VFIO_DEV(user); kref_put_mutex(&group->kref, nbl_userdev_release_group, &nbl_userdev.glock); } @@ -1027,14 +1253,27 @@ static int nbl_vfio_open(struct vfio_device *vdev) struct pci_dev *pdev; int ret = 0, opened; - user = container_of(vdev, struct nbl_dev_user, vdev); + user = NBL_VFIO_DEV_TO_USERDEV(vdev); adapter = user->adapter; pdev = adapter->pdev; + if (test_bit(NBL_FATAL_ERR, adapter->state)) + return -EIO; + opened = atomic_cmpxchg(&user->open_cnt, 0, 1); if (opened) return -EBUSY; + rtnl_lock(); + if (test_bit(NBL_XDP, adapter->state)) { + atomic_set(&user->open_cnt, 0); + rtnl_unlock(); + return -EIO; + } + + set_bit(NBL_USER, adapter->state); + rtnl_unlock(); + /* add iommu group list */ iommu_group = iommu_group_get(&pdev->dev); if (!iommu_group) { @@ -1063,6 +1302,8 @@ static int nbl_vfio_open(struct vfio_device *vdev) clear_open_cnt: atomic_set(&user->open_cnt, 0); + clear_bit(NBL_USER, adapter->state); + return ret; } @@ -1072,29 +1313,48 @@ static void nbl_vfio_close(struct vfio_device *vdev) struct nbl_adapter *adapter; struct pci_dev *pdev; struct nbl_dev_mgt *dev_mgt; + struct nbl_dev_net *net_dev; struct nbl_channel_ops *chan_ops; + struct nbl_service_ops *serv_ops; - user = container_of(vdev, struct nbl_dev_user, vdev); + user = NBL_VFIO_DEV_TO_USERDEV(vdev); adapter = user->adapter; pdev = adapter->pdev; dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); if (user->group) - nbl_userdev_group_put(user->group); + nbl_userdev_group_put(user, user->group); user->group = NULL; chan_ops->clear_listener_info(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt)); nbl_user_change_kernel_network(user); + serv_ops->config_fd_flow_state(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_CHAN_FDIR_RULE_ISOLATE, NBL_FD_STATE_FLUSH); + serv_ops->clear_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), user->user_vsi); atomic_set(&user->open_cnt, 0); + clear_bit(NBL_USER, adapter->state); + user->user_promisc_mode = 0; dev_info(&pdev->dev, "nbl vfio close\n"); } +static void nbl_vfio_release(struct vfio_device *vdev) +{ +} + +static int nbl_vfio_init(struct vfio_device *vdev) +{ + return 0; +} static const struct vfio_device_ops nbl_vfio_dev_ops = { .name = "vfio-nbl", .open_device = nbl_vfio_open, .close_device = nbl_vfio_close, + .init = nbl_vfio_init, + .release = nbl_vfio_release, .read = nbl_vfio_read, .write = nbl_vfio_write, .ioctl = nbl_vfio_ioctl, @@ -1115,32 +1375,33 @@ static const struct file_operations nbl_cdev_fops = { .mmap = nbl_cdev_mmap, }; -static struct mdev_driver nbl_mdev_driver = { - .device_api = VFIO_DEVICE_API_PCI_STRING, - .driver = { - .name = "nbl_mdev", - .owner = THIS_MODULE, - .mod_name = KBUILD_MODNAME, - }, -}; - static int nbl_bus_probe(struct device *dev) { - struct mdev_driver *drv = - container_of(dev->driver, struct mdev_driver, driver); + struct nbl_dev_user *user = container_of(dev, struct nbl_dev_user, mdev); + struct nbl_vfio_device *vdev; + int ret; + + vdev = vfio_alloc_device(nbl_vfio_device, vdev, dev, &nbl_vfio_dev_ops); + if (IS_ERR(vdev)) + return PTR_ERR(vdev); + user->vdev = &vdev->vdev; + vdev->user = user; + + ret = vfio_register_emulated_iommu_dev(NBL_USERDEV_TO_VFIO_DEV(user)); + if (ret) { + dev_err(dev, "vfio register iommu failed, ret %d\n", ret); + vfio_put_device(NBL_USERDEV_TO_VFIO_DEV(user)); + } - if (!drv->probe) - return 0; - return drv->probe(to_mdev_device(dev)); + return ret; } static void nbl_bus_remove(struct device *dev) { - struct mdev_driver *drv = - container_of(dev->driver, struct mdev_driver, driver); + struct nbl_dev_user *user = container_of(dev, struct nbl_dev_user, mdev); - if (drv->remove) - drv->remove(to_mdev_device(dev)); + vfio_unregister_group_dev(NBL_USERDEV_TO_VFIO_DEV(user)); + vfio_put_device(NBL_USERDEV_TO_VFIO_DEV(user)); } static int nbl_bus_match(struct device *dev, struct device_driver *drv) @@ -1155,6 +1416,13 @@ static struct bus_type nbl_bus_type = { .match = nbl_bus_match, }; +static struct device_driver nbl_userdev_driver = { + .bus = &nbl_bus_type, + .name = "nbl_userdev", + .owner = THIS_MODULE, + .mod_name = KBUILD_MODNAME, +}; + static void nbl_mdev_device_release(struct device *dev) { dev_info(dev, "nbl mdev device release\n"); @@ -1168,7 +1436,10 @@ void nbl_dev_start_user_dev(struct nbl_adapter *adapter) struct device *cdev = NULL, *mdev; struct pci_dev *pdev = NBL_COMMON_TO_PDEV(common); struct nbl_dev_user *user; + struct device_driver *drv; void *shm_msg_ring; + struct nbl_dev_vsi *user_vsi, *xdp_vsi; + u64 dma_limit; bool iommu_status = 0, remap_status = 0; int minor = 0, ret; @@ -1178,19 +1449,24 @@ void nbl_dev_start_user_dev(struct nbl_adapter *adapter) if (!dev_is_dma_coherent(dev)) return; - if (dma_get_mask(dev) != DMA_BIT_MASK(64)) - return; - - iommu_status = nbl_dma_iommu_status(pdev); - remap_status = nbl_dma_remap_status(pdev); - - /* iommu passthrough */ - if (iommu_status && !remap_status) { - if (common->dma_dev == common->dev) + /* xdp and user vsi share same queue range */ + user_vsi = nbl_dev_vsi_select(dev_mgt, NBL_VSI_USER); + xdp_vsi = nbl_dev_vsi_select(dev_mgt, NBL_VSI_XDP); + if (xdp_vsi) { + user_vsi->queue_offset = xdp_vsi->queue_offset; + } else { + ret = user_vsi->ops->setup(dev_mgt, &adapter->init_param, user_vsi); + if (ret) { + dev_err(NBL_DEV_MGT_TO_DEV(dev_mgt), "User-vsi setup failed"); return; - remap_status = 1; + } } + iommu_status = nbl_dma_iommu_status(pdev); + remap_status = nbl_dma_remap_status(pdev, &dma_limit); + /* 39bits with 3-level paging, 48bits with 4-level paging, 57bits with 5-level paging */ + WARN_ON(fls64(dma_limit) < 39); + /* iommu passthrough must keep dpdk iova pa mode */ shm_msg_ring = kzalloc(NBL_USER_DEV_SHMMSGRING_SIZE, GFP_KERNEL); if (!shm_msg_ring) return; @@ -1203,17 +1479,12 @@ void nbl_dev_start_user_dev(struct nbl_adapter *adapter) if (remap_status) { /* mdev init */ - mdev = devm_kzalloc(dev, sizeof(struct device), GFP_KERNEL); - if (!mdev) { - kfree(shm_msg_ring); - return; - } + mdev = &user->mdev; + mdev->bus = &nbl_bus_type; + drv = &nbl_userdev_driver; device_initialize(mdev); mdev->parent = dev; - - mdev->bus = &nbl_bus_type; - mdev->release = nbl_mdev_device_release; ret = dev_set_name(mdev, pci_name(pdev)); @@ -1229,23 +1500,14 @@ void nbl_dev_start_user_dev(struct nbl_adapter *adapter) } dev_info(dev, "MDEV: created\n"); - devm_kfree(dev, user); + ret = device_driver_attach(drv, mdev); - user = vfio_alloc_device(nbl_dev_user, vdev, mdev, &nbl_vfio_dev_ops); - if (IS_ERR(user)) { - device_del(mdev); - goto free_dev; - } - - ret = vfio_register_emulated_iommu_dev(&user->vdev); if (ret) { - vfio_put_device(&user->vdev); + dev_err(dev, "driver attach failed %d\n", ret); device_del(mdev); + put_device(mdev); goto free_dev; } - - user->mdev = mdev; - mdev->driver = &nbl_mdev_driver.driver; } else { mutex_lock(&nbl_userdev.clock); minor = idr_alloc(&nbl_userdev.cidr, adapter, 1, MINORMASK + 1, GFP_KERNEL); @@ -1272,15 +1534,19 @@ void nbl_dev_start_user_dev(struct nbl_adapter *adapter) user->adapter = adapter; user->iommu_status = iommu_status; user->remap_status = remap_status; + user->dma_limit = dma_limit; atomic_set(&user->open_cnt, 0); user->network_type = NBL_KERNEL_NETWORK; + user->user_promisc_mode = 0; + user->user_mcast_mode = 0; + user->user_vsi = user_vsi->vsi_id; NBL_DEV_MGT_TO_USER_DEV(dev_mgt) = user; return; free_dev: - devm_kfree(dev, mdev); + devm_kfree(dev, user); kfree(shm_msg_ring); } @@ -1303,12 +1569,10 @@ void nbl_dev_stop_user_dev(struct nbl_adapter *adapter) kfree(user->shm_msg_ring); if (user->remap_status) { - mdev = user->mdev; - vfio_unregister_group_dev(&user->vdev); - vfio_put_device(&user->vdev); - mdev->driver = NULL; + mdev = &user->mdev; device_del(mdev); - devm_kfree(dev, mdev); + put_device(mdev); + devm_kfree(dev, user); } else if (user->dev) { mutex_lock(&nbl_userdev.clock); device_destroy(nbl_userdev.cls, MKDEV(MAJOR(nbl_userdev.cdevt), user->minor)); @@ -1329,6 +1593,17 @@ void nbl_dev_user_module_init(void) mutex_init(&nbl_userdev.glock); INIT_LIST_HEAD(&nbl_userdev.glist); + ret = bus_register(&nbl_bus_type); + if (ret) { + pr_err("nbl bus type register failed\n"); + return; + } + ret = driver_register(&nbl_userdev_driver); + if (ret) { + pr_err("nbl userdev driver register failed\n"); + bus_unregister(&nbl_bus_type); + return; + } nbl_userdev.cls = class_create("nbl_userdev"); if (IS_ERR(nbl_userdev.cls)) { pr_err("nbl_userdev class alloc failed\n"); @@ -1361,6 +1636,8 @@ void nbl_dev_user_module_init(void) class_destroy(nbl_userdev.cls); nbl_userdev.cls = NULL; err_create_cls: + driver_unregister(&nbl_userdev_driver); + bus_unregister(&nbl_bus_type); return; } @@ -1372,6 +1649,8 @@ void nbl_dev_user_module_destroy(void) unregister_chrdev_region(nbl_userdev.cdevt, MINORMASK + 1); class_destroy(nbl_userdev.cls); nbl_userdev.cls = NULL; + driver_unregister(&nbl_userdev_driver); + bus_unregister(&nbl_bus_type); nbl_userdev.success = 0; } } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.h index 8e757fd1b1569144594c8d30b017bfd25d909a57..9fa7eeceb5845ba0ba7d703113899ded5e6d1be7 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.h @@ -64,6 +64,12 @@ struct nbl_dev_user_dma_unmap { #define NBL_DEV_USER_GET_BAR_SIZE _IO(NBL_DEV_USER_TYPE, 8) +#define NBL_DEV_USER_GET_DMA_LIMIT _IO(NBL_DEV_USER_TYPE, 9) + +#define NBL_DEV_USER_SET_PROMISC_MODE _IO(NBL_DEV_USER_TYPE, 10) + +#define NBL_DEV_USER_SET_MCAST_MODE _IO(NBL_DEV_USER_TYPE, 11) + void nbl_dev_start_user_dev(struct nbl_adapter *adapter); void nbl_dev_stop_user_dev(struct nbl_adapter *adapter); diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.c index 396f8a306832b5199ba419172bebe249938188c8..f30d2ccf2949d35517188cb8840d4854e23df155 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.c @@ -133,6 +133,7 @@ static void nbl_disp_chan_add_multi_rule_resp(void *priv, u16 src_id, u16 msg_id struct nbl_resource_ops *res_ops; struct nbl_channel_ops *chan_ops; struct nbl_chan_ack_info chan_ack; + u8 broadcast_mac[ETH_ALEN]; int err = NBL_CHAN_RESP_OK; int ret = 0; u16 vsi_id; @@ -141,8 +142,10 @@ static void nbl_disp_chan_add_multi_rule_resp(void *priv, u16 src_id, u16 msg_id chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); vsi_id = *(u16 *)data; - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_multi_rule, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + memset(broadcast_mac, 0xFF, ETH_ALEN); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), broadcast_mac, 0, vsi_id); if (ret) err = NBL_CHAN_RESP_ERR; @@ -175,6 +178,7 @@ static void nbl_disp_chan_del_multi_rule_resp(void *priv, u16 src_id, u16 msg_id struct nbl_resource_ops *res_ops; struct nbl_channel_ops *chan_ops; struct nbl_chan_ack_info chan_ack; + u8 broadcast_mac[ETH_ALEN]; int err = NBL_CHAN_RESP_OK; u16 vsi_id; @@ -182,11 +186,75 @@ static void nbl_disp_chan_del_multi_rule_resp(void *priv, u16 src_id, u16 msg_id chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); vsi_id = *(u16 *)data; + memset(broadcast_mac, 0xFF, ETH_ALEN); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), broadcast_mac, 0, vsi_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_MULTI_RULE, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_multi_rule, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +static int nbl_disp_cfg_multi_mcast(void *priv, u16 vsi, u16 enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_MULTI_RULE, msg_id, err, NULL, 0); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + if (enable) + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_multi_mcast, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + else + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_multi_mcast, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + return ret; +} + +static int nbl_disp_chan_cfg_multi_mcast_req(void *priv, u16 vsi_id, u16 enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + struct nbl_chan_param_cfg_multi_mcast mcast; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + mcast.vsi = vsi_id; + mcast.enable = enable; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_MULTI_MCAST_RULE, + &mcast, sizeof(mcast), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_multi_mcast_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_cfg_multi_mcast *mcast; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + mcast = (struct nbl_chan_param_cfg_multi_mcast *)data; + + if (mcast->enable) + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_multi_mcast, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mcast->vsi); + else + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_multi_mcast, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mcast->vsi); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_MULTI_MCAST_RULE, msg_id, err, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } @@ -285,25 +353,30 @@ static void nbl_disp_chan_register_net_resp(void *priv, u16 src_id, u16 msg_id, struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); struct nbl_resource_ops *res_ops; struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_register_net_info *param; + struct nbl_chan_param_register_net_info param; struct nbl_register_net_result result = {0}; struct nbl_register_net_param register_param = {0}; struct nbl_chan_ack_info chan_ack; + int copy_len; int err = NBL_CHAN_RESP_OK; int ret = 0; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - param = (struct nbl_chan_param_register_net_info *)data; + memset(¶m, 0, sizeof(struct nbl_chan_param_register_net_info)); + copy_len = data_len < sizeof(struct nbl_chan_param_register_net_info) ? + data_len : sizeof(struct nbl_chan_param_register_net_info); + memcpy(¶m, data, copy_len); - register_param.pf_bar_start = param->pf_bar_start; - register_param.pf_bdf = param->pf_bdf; - register_param.vf_bar_start = param->vf_bar_start; - register_param.vf_bar_size = param->vf_bar_size; - register_param.total_vfs = param->total_vfs; - register_param.offset = param->offset; - register_param.stride = param->stride; + register_param.pf_bar_start = param.pf_bar_start; + register_param.pf_bdf = param.pf_bdf; + register_param.vf_bar_start = param.vf_bar_start; + register_param.vf_bar_size = param.vf_bar_size; + register_param.total_vfs = param.total_vfs; + register_param.offset = param.offset; + register_param.stride = param.stride; + register_param.is_vdpa = param.is_vdpa; NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_net, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, ®ister_param, &result); @@ -386,9 +459,7 @@ static int nbl_disp_chan_alloc_txrx_queues_req(void *priv, u16 vsi_id, u16 queue NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ALLOC_TXRX_QUEUES, ¶m, sizeof(param), &result, sizeof(result), 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - - return 0; + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } static void nbl_disp_chan_alloc_txrx_queues_resp(void *priv, u16 src_id, u16 msg_id, @@ -408,8 +479,9 @@ static void nbl_disp_chan_alloc_txrx_queues_resp(void *priv, u16 src_id, u16 msg param = (struct nbl_chan_param_alloc_txrx_queues *)data; result.queue_num = param->queue_num; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->alloc_txrx_queues, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->queue_num); + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->alloc_txrx_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->queue_num); NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ALLOC_TXRX_QUEUES, msg_id, err, &result, sizeof(result)); @@ -466,7 +538,6 @@ static int nbl_disp_chan_register_vsi2q_req(void *priv, u16 vsi_index, u16 vsi_i param.vsi_id = vsi_id; param.queue_offset = queue_offset; param.queue_num = queue_num; - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REGISTER_VSI2Q, ¶m, sizeof(param), NULL, 0, 1); return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); @@ -484,8 +555,10 @@ static void nbl_disp_chan_register_vsi2q_resp(void *priv, u16 src_id, u16 msg_id param = (struct nbl_chan_param_register_vsi2q *)data; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_vsi2q, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - param->vsi_index, param->vsi_id, param->queue_offset, param->queue_num); + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_vsi2q, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_index, param->vsi_id, + param->queue_offset, param->queue_num); NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_VSI2Q, msg_id, err, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); @@ -515,8 +588,8 @@ static void nbl_disp_chan_setup_q2vsi_resp(void *priv, u16 src_id, u16 msg_id, vsi_id = *(u16 *)data; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_q2vsi, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - vsi_id); + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_q2vsi, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SETUP_Q2VSI, msg_id, err, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); @@ -576,8 +649,8 @@ static void nbl_disp_chan_setup_rss_resp(void *priv, u16 src_id, u16 msg_id, u16 vsi_id; vsi_id = *(u16 *)data; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_rss, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - vsi_id); + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_rss, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id); NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SETUP_RSS, msg_id, err, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); @@ -649,13 +722,55 @@ static void nbl_disp_chan_setup_queue_resp(void *priv, u16 src_id, u16 msg_id, param = (struct nbl_chan_param_setup_queue *)data; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_queue, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ¶m->queue_param, param->is_tx); + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_queue, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + ¶m->queue_param, param->is_tx); NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SETUP_QUEUE, msg_id, err, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } +static int nbl_disp_chan_remove_queue_req(void *priv, struct nbl_txrx_queue_param *queue_param, + bool is_tx) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_setup_queue param; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + memcpy(¶m.queue_param, queue_param, sizeof(param.queue_param)); + param.is_tx = is_tx; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REMOVE_QUEUE, ¶m, sizeof(param), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_remove_queue_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_setup_queue *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_setup_queue *)data; + + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_queue, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + ¶m->queue_param, param->is_tx); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_QUEUE, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + static void nbl_disp_chan_remove_all_queues_req(void *priv, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; @@ -686,7 +801,7 @@ static void nbl_disp_chan_remove_all_queues_resp(void *priv, u16 src_id, u16 msg vsi_id = *(u16 *)data; NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_all_queues, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_ALL_QUEUES, msg_id, err, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); @@ -723,14 +838,27 @@ static void nbl_disp_chan_cfg_dsch_resp(void *priv, u16 src_id, u16 msg_id, param = (struct nbl_chan_param_cfg_dsch *)data; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_dsch, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->vld); + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_dsch, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->vld); NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_DSCH, msg_id, err, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_chan_setup_cqs_req(void *priv, u16 vsi_id, u16 real_qps) +static int nbl_disp_setup_cqs(void *priv, u16 vsi_id, u16 real_qps, bool rss_indir_set) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_cqs, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, real_qps, rss_indir_set); + return ret; +} + +static int nbl_disp_chan_setup_cqs_req(void *priv, u16 vsi_id, u16 real_qps, bool rss_indir_set) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops; @@ -743,6 +871,7 @@ static int nbl_disp_chan_setup_cqs_req(void *priv, u16 vsi_id, u16 real_qps) param.vsi_id = vsi_id; param.real_qps = real_qps; + param.rss_indir_set = rss_indir_set; NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SETUP_CQS, ¶m, sizeof(param), NULL, 0, 1); @@ -755,17 +884,23 @@ static void nbl_disp_chan_setup_cqs_resp(void *priv, u16 src_id, u16 msg_id, struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_setup_cqs *param; + struct nbl_chan_param_setup_cqs param; struct nbl_chan_ack_info chan_ack; + int copy_len; int err = NBL_CHAN_RESP_OK; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - param = (struct nbl_chan_param_setup_cqs *)data; + memset(¶m, 0, sizeof(struct nbl_chan_param_setup_cqs)); + param.rss_indir_set = true; + copy_len = data_len < sizeof(struct nbl_chan_param_setup_cqs) ? + data_len : sizeof(struct nbl_chan_param_setup_cqs); + memcpy(¶m, data, copy_len); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_cqs, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->real_qps); + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_cqs, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param.vsi_id, param.real_qps, param.rss_indir_set); NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SETUP_CQS, msg_id, err, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); @@ -857,6 +992,38 @@ static void nbl_disp_chan_set_promisc_mode_resp(void *priv, u16 src_id, u16 msg_ chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } +static int nbl_disp_chan_cfg_qdisc_mqprio_req(void *priv, struct nbl_tc_qidsc_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_QDISC_MQPRIO, + param, sizeof(*param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_qdisc_mqprio_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_tc_qidsc_param *param = (struct nbl_tc_qidsc_param *)data; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_qdisc_mqprio, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_QDISC_MQPRIO, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + static int nbl_disp_chan_set_spoof_check_addr_req(void *priv, u16 vsi_id, u8 *mac) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; @@ -1076,8 +1243,66 @@ static void nbl_disp_chan_get_queue_err_stats_resp(void *priv, u16 src_id, u16 m ret, NBL_CHAN_MSG_GET_QUEUE_ERR_STATS, src_id); } +static int nbl_disp_get_eth_abnormal_stats(void *priv, u8 eth_id, + struct nbl_eth_abnormal_stats *eth_abnormal_stats) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_eth_abnormal_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, eth_abnormal_stats); +} + +static int +nbl_disp_chan_get_eth_abnormal_stats_req(void *priv, u8 eth_id, + struct nbl_eth_abnormal_stats *eth_abnormal_stats) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_ETH_ABNORMAL_STATS, ð_id, + sizeof(eth_id), eth_abnormal_stats, sizeof(*eth_abnormal_stats), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_eth_abnormal_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_eth_abnormal_stats eth_abnormal_stats = { 0 }; + int err = NBL_CHAN_RESP_OK; + int ret; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_eth_abnormal_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u8 *)data, + ð_abnormal_stats); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp get eth abnormal stats resp failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_ABNORMAL_STATS, msg_id, err, + ð_abnormal_stats, sizeof(eth_abnormal_stats)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_ETH_ABNORMAL_STATS, src_id); +} + static void nbl_disp_chan_get_coalesce_req(void *priv, u16 vector_id, - struct ethtool_coalesce *ec) + struct nbl_chan_param_get_coalesce *ec) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops; @@ -1101,7 +1326,7 @@ static void nbl_disp_chan_get_coalesce_resp(void *priv, u16 src_id, u16 msg_id, struct nbl_channel_ops *chan_ops; struct nbl_chan_ack_info chan_ack; int ret = NBL_CHAN_RESP_OK; - struct ethtool_coalesce ec = { 0 }; + struct nbl_chan_param_get_coalesce ec = { 0 }; u16 vector_id; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); @@ -1281,7 +1506,7 @@ static void nbl_disp_chan_get_rxfh_rss_key_resp(void *priv, u16 src_id, u16 msg_ kfree(rss_key); } -static void nbl_disp_chan_get_rxfh_rss_alg_sel_req(void *priv, u8 *rss_alg_sel, u8 eth_id) +static void nbl_disp_chan_get_rxfh_rss_alg_sel_req(void *priv, u16 vsi_id, u8 *rss_alg_sel) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops; @@ -1291,9 +1516,9 @@ static void nbl_disp_chan_get_rxfh_rss_alg_sel_req(void *priv, u8 *rss_alg_sel, chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - NBL_CHAN_SEND(chan_send, common->mgt_pf, - NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, ð_id, - sizeof(eth_id), rss_alg_sel, sizeof(u8), 1); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, &vsi_id, + sizeof(vsi_id), rss_alg_sel, sizeof(u8), 1); chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } @@ -1304,59 +1529,75 @@ static void nbl_disp_chan_get_rxfh_rss_alg_sel_resp(void *priv, u16 src_id, u16 struct nbl_resource_ops *res_ops; struct nbl_channel_ops *chan_ops; struct nbl_chan_ack_info chan_ack; - u8 rss_alg_sel, eth_id; + u16 vsi_id; + u8 rss_alg_sel; int ret = NBL_CHAN_RESP_OK; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - eth_id = *(u8 *)data; + vsi_id = *(u16 *)data; NBL_OPS_CALL(res_ops->get_rss_alg_sel, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &rss_alg_sel, eth_id)); + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, &rss_alg_sel)); NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, msg_id, ret, &rss_alg_sel, sizeof(rss_alg_sel)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void nbl_disp_chan_get_phy_caps_req(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps) +static int nbl_disp_set_rxfh_rss_alg_sel(void *priv, u16 vsi_id, u8 rss_alg_sel) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->set_rss_alg_sel, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, rss_alg_sel)); + return ret; +} + +static int nbl_disp_chan_set_rxfh_rss_alg_sel_req(void *priv, u16 vsi_id, u8 rss_alg_sel) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_rxfh_rss_alg_sel param = {0}; struct nbl_chan_send_info chan_send = {0}; struct nbl_common_info *common; chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_PHY_CAPS, ð_id, - sizeof(eth_id), phy_caps, sizeof(*phy_caps), 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + param.vsi_id = vsi_id; + param.rss_alg_sel = rss_alg_sel; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_RXFH_RSS_ALG_SEL, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_get_phy_caps_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_set_rxfh_rss_alg_sel_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_rxfh_rss_alg_sel *param; struct nbl_chan_ack_info chan_ack; - int ret = NBL_CHAN_RESP_OK; - struct nbl_phy_caps phy_caps = { 0 }; - u8 eth_id; + int err = NBL_CHAN_RESP_OK; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - eth_id = *(u8 *)data; - - NBL_OPS_CALL(res_ops->get_phy_caps, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &phy_caps)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PHY_CAPS, msg_id, ret, - &phy_caps, sizeof(phy_caps)); + param = (struct nbl_chan_param_set_rxfh_rss_alg_sel *)data; + err = NBL_OPS_CALL(res_ops->set_rss_alg_sel, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->rss_alg_sel)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_RXFH_RSS_ALG_SEL, msg_id, err, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void nbl_disp_chan_get_phy_state_req(void *priv, u8 eth_id, struct nbl_phy_state *phy_state) +static void nbl_disp_chan_get_phy_caps_req(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops; @@ -1366,30 +1607,30 @@ static void nbl_disp_chan_get_phy_state_req(void *priv, u8 eth_id, struct nbl_ph chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_PHY_STATE, ð_id, - sizeof(eth_id), phy_state, sizeof(*phy_state), 1); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_PHY_CAPS, ð_id, + sizeof(eth_id), phy_caps, sizeof(*phy_caps), 1); chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_get_phy_state_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_get_phy_caps_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; struct nbl_channel_ops *chan_ops; struct nbl_chan_ack_info chan_ack; int ret = NBL_CHAN_RESP_OK; - struct nbl_phy_state phy_state = { 0 }; + struct nbl_phy_caps phy_caps = { 0 }; u8 eth_id; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); eth_id = *(u8 *)data; - NBL_OPS_CALL(res_ops->get_phy_state, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &phy_state)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PHY_STATE, msg_id, ret, - &phy_state, sizeof(phy_state)); + NBL_OPS_CALL(res_ops->get_phy_caps, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &phy_caps)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PHY_CAPS, msg_id, ret, + &phy_caps, sizeof(phy_caps)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } @@ -1444,71 +1685,142 @@ static void nbl_disp_chan_set_sfp_state_resp(void *priv, u16 src_id, u16 msg_id, ret, NBL_CHAN_MSG_SET_SFP_STATE, src_id); } -static u64 nbl_disp_chan_get_real_hw_addr_req(void *priv, u16 vsi_id) +static void nbl_disp_chan_register_rdma_req(void *priv, u16 vsi_id, + struct nbl_rdma_register_param *param) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_common_info *common; - u64 addr = 0; - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - NBL_CHAN_SEND(chan_send, common->mgt_pf, - NBL_CHAN_MSG_GET_REAL_HW_ADDR, &vsi_id, - sizeof(vsi_id), &addr, sizeof(addr), 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - return addr; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_REGISTER_RDMA, + &vsi_id, sizeof(vsi_id), param, sizeof(*param), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_get_real_hw_addr_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_register_rdma_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_rdma_register_param result = {0}; struct nbl_chan_ack_info chan_ack; + u16 *vsi_id; int ret = NBL_CHAN_RESP_OK; - u16 vsi_id; - u64 addr; - vsi_id = *(u16 *)data; - addr = NBL_OPS_CALL(res_ops->get_real_hw_addr, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REAL_HW_ADDR, msg_id, - ret, &addr, sizeof(addr)); + vsi_id = (u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_rdma, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *vsi_id, &result); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_RDMA, + msg_id, ret, &result, sizeof(result)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static u16 nbl_disp_chan_get_function_id_req(void *priv, u16 vsi_id) +static void nbl_disp_chan_unregister_rdma_req(void *priv, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops; struct nbl_chan_send_info chan_send = {0}; struct nbl_common_info *common; - u16 func_id = 0; + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); common = NBL_DISP_MGT_TO_COMMON(disp_mgt); NBL_CHAN_SEND(chan_send, common->mgt_pf, - NBL_CHAN_MSG_GET_FUNCTION_ID, &vsi_id, - sizeof(vsi_id), &func_id, sizeof(func_id), 1); + NBL_CHAN_MSG_UNREGISTER_RDMA, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - - return func_id; } -static void nbl_disp_chan_get_function_id_resp(void *priv, u16 src_id, u16 msg_id, +static void nbl_disp_chan_unregister_rdma_resp(void *priv, u16 src_id, u16 msg_id, void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; struct nbl_chan_ack_info chan_ack; int ret = NBL_CHAN_RESP_OK; - u16 vsi_id, func_id; + u16 *vsi_id; - vsi_id = *(u16 *)data; + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + vsi_id = (u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_rdma, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *vsi_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNREGISTER_RDMA, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u64 nbl_disp_chan_get_real_hw_addr_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + u64 addr = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_REAL_HW_ADDR, &vsi_id, + sizeof(vsi_id), &addr, sizeof(addr), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return addr; +} + +static void nbl_disp_chan_get_real_hw_addr_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 vsi_id; + u64 addr; + + vsi_id = *(u16 *)data; + addr = NBL_OPS_CALL(res_ops->get_real_hw_addr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REAL_HW_ADDR, msg_id, + ret, &addr, sizeof(addr)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u16 nbl_disp_chan_get_function_id_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + u16 func_id = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_FUNCTION_ID, &vsi_id, + sizeof(vsi_id), &func_id, sizeof(func_id), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return func_id; +} + +static void nbl_disp_chan_get_function_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 vsi_id, func_id; + + vsi_id = *(u16 *)data; func_id = NBL_OPS_CALL(res_ops->get_function_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); @@ -1590,6 +1902,38 @@ static void nbl_disp_chan_get_mbx_irq_num_resp(void *priv, u16 src_id, u16 msg_i chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } +static void nbl_disp_chan_clear_accel_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_CLEAR_ACCEL_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_clear_accel_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + u16 *vsi_id = (u16 *)data; + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->clear_accel_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CLEAR_ACCEL_FLOW, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + static void nbl_disp_chan_clear_flow_req(void *priv, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; @@ -1652,2970 +1996,8440 @@ static void nbl_disp_chan_clear_queues_resp(void *priv, u16 src_id, u16 msg_id, chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static u16 nbl_disp_chan_get_vsi_id_req(void *priv, u16 func_id, u16 type) +static int nbl_disp_chan_disable_phy_flow_req(void *priv, u8 eth_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_get_vsi_id param = {0}; - struct nbl_chan_param_get_vsi_id result = {0}; - struct nbl_chan_send_info chan_send; - struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - - param.type = type; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_VSI_ID, ¶m, - sizeof(param), &result, sizeof(result), 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - return result.vsi_id; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DISABLE_PHY_FLOW, ð_id, + sizeof(eth_id), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_get_vsi_id_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_disable_phy_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_get_vsi_id *param; - struct nbl_chan_param_get_vsi_id result; struct nbl_chan_ack_info chan_ack; + u8 *eth_id = (u8 *)data; int err = NBL_CHAN_RESP_OK; - int ret = 0; - - param = (struct nbl_chan_param_get_vsi_id *)data; + int ret; - result.vsi_id = NBL_OPS_CALL(res_ops->get_vsi_id, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, param->type)); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->disable_phy_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *eth_id); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp disable phy flow resp failed with ret: %d\n", ret); + } - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VSI_ID, - msg_id, err, &result, sizeof(result)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DISABLE_PHY_FLOW, msg_id, err, NULL, 0); ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); if (ret) - dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", - ret, NBL_CHAN_MSG_GET_VSI_ID); + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_DISABLE_PHY_FLOW, src_id); } -static void nbl_disp_chan_get_eth_id_req(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id) +static int nbl_disp_chan_enable_phy_flow_req(void *priv, u8 eth_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_get_eth_id param = {0}; - struct nbl_chan_param_get_eth_id result = {0}; - struct nbl_chan_send_info chan_send; + struct nbl_chan_send_info chan_send = {0}; struct nbl_common_info *common; common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - param.vsi_id = vsi_id; - - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_ETH_ID, ¶m, - sizeof(param), &result, sizeof(result), 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - - *eth_mode = result.eth_mode; - *eth_id = result.eth_id; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ENABLE_PHY_FLOW, ð_id, + sizeof(eth_id), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_get_eth_id_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_enable_phy_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_get_eth_id *param; - struct nbl_chan_param_get_eth_id result = {0}; struct nbl_chan_ack_info chan_ack; + u8 *eth_id = (u8 *)data; int err = NBL_CHAN_RESP_OK; - int ret = 0; - - param = (struct nbl_chan_param_get_eth_id *)data; - - NBL_OPS_CALL(res_ops->get_eth_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, - &result.eth_mode, &result.eth_id)); + int ret; - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_ID, - msg_id, err, &result, sizeof(result)); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->enable_phy_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *eth_id); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp enable phy flow resp failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ENABLE_PHY_FLOW, msg_id, err, NULL, 0); ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); if (ret) - dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", - ret, NBL_CHAN_MSG_GET_ETH_ID); + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_ENABLE_PHY_FLOW, src_id); } -static int nbl_disp_alloc_rings(void *priv, struct net_device *netdev, u16 tx_num, - u16 rx_num, u16 tx_desc_num, u16 rx_desc_num) +static void nbl_disp_chan_init_acl_req(void *priv) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - int ret = 0; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL(res_ops->alloc_rings, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), netdev, tx_num, - rx_num, tx_desc_num, rx_desc_num)); - return ret; + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_INIT_ACL, NULL, 0, NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_remove_rings(void *priv) +static void nbl_disp_chan_init_acl_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - - if (!disp_mgt) - return; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->remove_rings, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->init_acl, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_INIT_ACL, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static dma_addr_t nbl_disp_start_tx_ring(void *priv, u8 ring_index) +static void nbl_disp_chan_uninit_acl_req(void *priv) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - dma_addr_t addr = 0; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; - if (!disp_mgt) - return -EINVAL; + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - addr = NBL_OPS_CALL(res_ops->start_tx_ring, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); - return addr; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_UNINIT_ACL, NULL, 0, NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_stop_tx_ring(void *priv, u8 ring_index) +static void nbl_disp_chan_uninit_acl_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - - if (!disp_mgt) - return; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->stop_tx_ring, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->uninit_acl, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNINIT_ACL, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static dma_addr_t nbl_disp_start_rx_ring(void *priv, u8 ring_index, bool use_napi) +static int nbl_disp_chan_set_upcall_rule_req(void *priv, u8 eth_id, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - dma_addr_t addr = 0; - - if (!disp_mgt) - return -EINVAL; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_upcall param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - addr = NBL_OPS_CALL(res_ops->start_rx_ring, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, use_napi)); + param.eth_id = eth_id; + param.vsi_id = vsi_id; - return addr; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_UPCALL_RULE, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_stop_rx_ring(void *priv, u8 ring_index) +static void nbl_disp_chan_set_upcall_rule_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_upcall *param; + int err = NBL_CHAN_RESP_OK; + int ret; - if (!disp_mgt) - return; + param = (struct nbl_chan_param_set_upcall *)data; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->stop_rx_ring, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); + ret = NBL_OPS_CALL(res_ops->set_upcall_rule, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->vsi_id)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp set upcall rule resp failed with ret: %d\n", ret); + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_UPCALL_RULE, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_SET_UPCALL_RULE, src_id); } -static void nbl_disp_kick_rx_ring(void *priv, u16 index) +static int nbl_disp_chan_unset_upcall_rule_req(void *priv, u8 eth_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->kick_rx_ring, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index)); -} + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; -static int nbl_disp_dump_ring(void *priv, struct seq_file *m, bool is_tx, int index) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - int ret = 0; + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL(res_ops->dump_ring, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m, is_tx, index)); - return ret; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_UNSET_UPCALL_RULE, + ð_id, sizeof(eth_id), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static int nbl_disp_dump_ring_stats(void *priv, struct seq_file *m, bool is_tx, int index) +static void nbl_disp_chan_unset_upcall_rule_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - int ret = 0; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + u8 *eth_id = (u8 *)data; + int err = NBL_CHAN_RESP_OK; + int ret; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL(res_ops->dump_ring_stats, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m, is_tx, index)); - return ret; + ret = NBL_OPS_CALL(res_ops->unset_upcall_rule, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *eth_id)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp unset upcall rule resp failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNSET_UPCALL_RULE, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_UNSET_UPCALL_RULE, src_id); } -static struct napi_struct *nbl_disp_get_vector_napi(void *priv, u16 index) +static void nbl_disp_chan_set_shaping_dport_vld_req(void *priv, u8 eth_id, bool vld) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->get_vector_napi, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index)); -} + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_func_vld param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); -static void nbl_disp_set_vector_info(void *priv, u8 *irq_enable_base, - u32 irq_data, u16 index, bool mask_en) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; + param.eth_id = eth_id; + param.vld = vld; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->set_vector_info, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - irq_enable_base, irq_data, index, mask_en)); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_SET_SHAPING_DPORT_VLD, + ¶m, sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_register_vsi_ring(void *priv, u16 vsi_index, u16 ring_offset, u16 ring_num) +static void nbl_disp_chan_set_shaping_dport_vld_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_func_vld *param; + int err = NBL_CHAN_RESP_OK; - NBL_OPS_CALL(res_ops->register_vsi_ring, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_index, ring_offset, ring_num)); + param = (struct nbl_chan_param_set_func_vld *)data; + + NBL_OPS_CALL(res_ops->set_shaping_dport_vld, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->vld)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_SHAPING_DPORT_VLD, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void nbl_disp_get_res_pt_ops(void *priv, struct nbl_resource_pt_ops *pt_ops) +static void nbl_disp_chan_set_dport_fc_th_vld_req(void *priv, u8 eth_id, bool vld) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_func_vld param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_resource_pt_ops, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), pt_ops)); + param.eth_id = eth_id; + param.vld = vld; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_SET_DPORT_FC_TH_VLD, + ¶m, sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static int nbl_disp_register_net(void *priv, struct nbl_register_net_param *register_param, - struct nbl_register_net_result *register_result) +static void nbl_disp_chan_set_dport_fc_th_vld_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - int ret = 0; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_func_vld *param; + int err = NBL_CHAN_RESP_OK; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_net, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, - register_param, register_result); - return ret; -} + param = (struct nbl_chan_param_set_func_vld *)data; -static int nbl_disp_alloc_txrx_queues(void *priv, u16 vsi_id, u16 queue_num) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - int ret = 0; + NBL_OPS_CALL(res_ops->set_dport_fc_th_vld, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->vld)); - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->alloc_txrx_queues, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, queue_num); - return ret; + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_DPORT_FC_TH_VLD, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void nbl_disp_free_txrx_queues(void *priv, u16 vsi_id) +static u16 nbl_disp_chan_get_vsi_id_req(void *priv, u16 func_id, u16 type) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_vsi_id param = {0}; + struct nbl_chan_param_get_vsi_id result = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->free_txrx_queues, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); -} + param.type = type; -static int nbl_disp_register_vsi2q(void *priv, u16 vsi_index, u16 vsi_id, - u16 queue_offset, u16 queue_num) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_VSI_ID, ¶m, + sizeof(param), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_vsi2q, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_index, vsi_id, - queue_offset, queue_num); + return result.vsi_id; } -static int nbl_disp_setup_q2vsi(void *priv, u16 vsi_id) +static void nbl_disp_chan_get_vsi_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_vsi_id *param; + struct nbl_chan_param_get_vsi_id result; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; - return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_q2vsi, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); -} + param = (struct nbl_chan_param_get_vsi_id *)data; -static void nbl_disp_remove_q2vsi(void *priv, u16 vsi_id) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + result.vsi_id = NBL_OPS_CALL(res_ops->get_vsi_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, param->type)); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_q2vsi, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VSI_ID, + msg_id, err, &result, sizeof(result)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_GET_VSI_ID); } -static int nbl_disp_setup_rss(void *priv, u16 vsi_id) +static void +nbl_disp_chan_get_eth_id_req(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id, u8 *logic_eth_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_eth_id param = {0}; + struct nbl_chan_param_get_eth_id result = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; - return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_rss, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_ETH_ID, ¶m, + sizeof(param), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + *eth_mode = result.eth_mode; + *eth_id = result.eth_id; + *logic_eth_id = result.logic_eth_id; } -static void nbl_disp_remove_rss(void *priv, u16 vsi_id) +static void nbl_disp_chan_get_eth_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_eth_id *param; + struct nbl_chan_param_get_eth_id result = {0}; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_rss, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + param = (struct nbl_chan_param_get_eth_id *)data; + + NBL_OPS_CALL(res_ops->get_eth_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, + &result.eth_mode, &result.eth_id, &result.logic_eth_id)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_ID, + msg_id, err, &result, sizeof(result)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_GET_ETH_ID); } -static int nbl_disp_setup_queue(void *priv, struct nbl_txrx_queue_param *param, bool is_tx) +static int nbl_disp_alloc_rings(void *priv, struct net_device *netdev, + struct nbl_ring_param *ring_param) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; int ret = 0; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_queue, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, is_tx); + ret = NBL_OPS_CALL(res_ops->alloc_rings, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), netdev, ring_param)); return ret; } -static void nbl_disp_remove_all_queues(void *priv, u16 vsi_id) +static void nbl_disp_remove_rings(void *priv) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; + if (!disp_mgt) + return; + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_all_queues, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + NBL_OPS_CALL(res_ops->remove_rings, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); } -static int nbl_disp_cfg_dsch(void *priv, u16 vsi_id, bool vld) +static dma_addr_t nbl_disp_start_tx_ring(void *priv, u8 ring_index) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; - int ret = 0; + dma_addr_t addr = 0; + + if (!disp_mgt) + return -EINVAL; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_dsch, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vld); - return ret; + addr = NBL_OPS_CALL(res_ops->start_tx_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); + return addr; } -static int nbl_disp_setup_cqs(void *priv, u16 vsi_id, u16 real_qps) +static void nbl_disp_stop_tx_ring(void *priv, u8 ring_index) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; - int ret = 0; + + if (!disp_mgt) + return; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_cqs, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, real_qps); - return ret; + NBL_OPS_CALL(res_ops->stop_tx_ring, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); } -static void nbl_disp_remove_cqs(void *priv, u16 vsi_id) +static dma_addr_t nbl_disp_start_rx_ring(void *priv, u8 ring_index, bool use_napi) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; + dma_addr_t addr = 0; + + if (!disp_mgt) + return -EINVAL; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_cqs, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + addr = NBL_OPS_CALL(res_ops->start_rx_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, use_napi)); + + return addr; } -static int nbl_disp_enable_msix_irq(void *priv, u16 global_vector_id) +static void nbl_disp_stop_rx_ring(void *priv, u8 ring_index) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; - int ret = 0; if (!disp_mgt) - return -EINVAL; + return; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL(res_ops->enable_msix_irq, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), global_vector_id)); - return ret; + NBL_OPS_CALL(res_ops->stop_rx_ring, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); } -static u8 *nbl_disp_get_msix_irq_enable_info(void *priv, u16 global_vector_id, u32 *irq_data) +static void nbl_disp_kick_rx_ring(void *priv, u16 index) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; - if (!disp_mgt) - return NULL; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->get_msix_irq_enable_info, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), global_vector_id, irq_data)); + NBL_OPS_CALL(res_ops->kick_rx_ring, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index)); } -static int nbl_disp_add_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) +static int nbl_disp_dump_ring(void *priv, struct seq_file *m, bool is_tx, int index) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; int ret = 0; - if (!disp_mgt || !mac) - return -EINVAL; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_macvlan, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac, vlan, vsi); + ret = NBL_OPS_CALL(res_ops->dump_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m, is_tx, index)); return ret; } -static void nbl_disp_del_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - - if (!disp_mgt || !mac) - return; - - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_macvlan, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac, vlan, vsi); -} - -static int nbl_disp_add_multi_rule(void *priv, u16 vsi) +static int nbl_disp_dump_ring_stats(void *priv, struct seq_file *m, bool is_tx, int index) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; int ret = 0; - if (!disp_mgt) - return -EINVAL; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_multi_rule, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + ret = NBL_OPS_CALL(res_ops->dump_ring_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m, is_tx, index)); return ret; } -static void nbl_disp_del_multi_rule(void *priv, u16 vsi) +static void nbl_disp_set_rings_xdp_prog(void *priv, void *prog) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; - if (!disp_mgt) - return; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_multi_rule, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); -} - -static int nbl_disp_setup_multi_group(void *priv) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - - return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_multi_group, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); -} - -static void nbl_disp_remove_multi_group(void *priv) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_multi_group, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + NBL_OPS_CALL(res_ops->set_rings_xdp_prog, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), prog)); } -static void nbl_disp_get_net_stats(void *priv, struct nbl_stats *net_stats) +static int nbl_disp_register_xdp_rxq(void *priv, u8 ring_index) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; + int ret = 0; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_net_stats, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), net_stats)); + ret = NBL_OPS_CALL(res_ops->register_xdp_rxq, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); + return ret; } -static void nbl_disp_get_private_stat_len(void *priv, u32 *len) +static void nbl_disp_unregister_xdp_rxq(void *priv, u8 ring_index) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_private_stat_len, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), len); + NBL_OPS_CALL(res_ops->unregister_xdp_rxq, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); } -static void nbl_disp_get_private_stat_data(void *priv, u32 eth_id, u64 *data, u32 data_len) +static struct nbl_napi_struct *nbl_disp_get_vector_napi(void *priv, u16 index) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_private_stat_data, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, data); -} - -static void nbl_disp_get_private_stat_data_req(void *priv, u32 eth_id, u64 *data, u32 data_len) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops; - struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_param_get_private_stat_data param = {0}; - struct nbl_common_info *common; - - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - - param.eth_id = eth_id; - param.data_len = data_len; - - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_ETH_STATS, ¶m, - sizeof(param), data, data_len, 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); -} - -static void nbl_disp_chan_get_private_stat_data_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_get_private_stat_data *param; - struct nbl_chan_ack_info chan_ack; - u64 *recv_data; - int ret = NBL_CHAN_RESP_OK; - - param = (struct nbl_chan_param_get_private_stat_data *)data; - recv_data = kmalloc(param->data_len, GFP_ATOMIC); - if (!recv_data) { - dev_err(dev, "Allocate memory to private_stat_data failed\n"); - return; - } - - NBL_OPS_CALL(res_ops->get_private_stat_data, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, recv_data)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_STATS, msg_id, - ret, recv_data, param->data_len); - chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); - - kfree(recv_data); + return NBL_OPS_CALL(res_ops->get_vector_napi, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index)); } -static void nbl_disp_fill_private_stat_strings(void *priv, u8 *strings) +static void nbl_disp_set_vector_info(void *priv, u8 *irq_enable_base, + u32 irq_data, u16 index, bool mask_en) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->fill_private_stat_strings, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), strings); + NBL_OPS_CALL(res_ops->set_vector_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + irq_enable_base, irq_data, index, mask_en)); } -static u16 nbl_disp_get_max_desc_num(void *priv) +static void nbl_disp_register_vsi_ring(void *priv, u16 vsi_index, u16 ring_offset, u16 ring_num) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - u16 ret = 0; - ret = NBL_OPS_CALL(res_ops->get_max_desc_num, ()); - return ret; + NBL_OPS_CALL(res_ops->register_vsi_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_index, ring_offset, ring_num)); } -static u16 nbl_disp_get_min_desc_num(void *priv) +static void nbl_disp_get_res_pt_ops(void *priv, struct nbl_resource_pt_ops *pt_ops) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - u16 ret = 0; + struct nbl_resource_ops *res_ops; - ret = NBL_OPS_CALL(res_ops->get_min_desc_num, ()); - return ret; + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_resource_pt_ops, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), pt_ops)); } -static int nbl_disp_set_spoof_check_addr(void *priv, u16 vsi_id, u8 *mac) +static int nbl_disp_register_net(void *priv, struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; int ret = 0; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_spoof_check_addr, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, mac); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_net, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, + register_param, register_result); return ret; } -static int nbl_disp_set_vf_spoof_check(void *priv, u16 vsi_id, int vf_id, u8 enable) +static int nbl_disp_alloc_txrx_queues(void *priv, u16 vsi_id, u16 queue_num) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; int ret = 0; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_vf_spoof_check, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vf_id, enable); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->alloc_txrx_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, queue_num); return ret; } -static void nbl_disp_get_base_mac_addr(void *priv, u8 *mac) +static void nbl_disp_free_txrx_queues(void *priv, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_base_mac_addr, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->free_txrx_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); } -static u16 nbl_disp_get_tx_desc_num(void *priv, u32 ring_index) +static int nbl_disp_register_vsi2q(void *priv, u16 vsi_index, u16 vsi_id, + u16 queue_offset, u16 queue_num) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - u16 ret = 0; - ret = NBL_OPS_CALL(res_ops->get_tx_desc_num, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); - return ret; + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_vsi2q, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_index, vsi_id, + queue_offset, queue_num); } -static u16 nbl_disp_get_rx_desc_num(void *priv, u32 ring_index) +static int nbl_disp_setup_q2vsi(void *priv, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - u16 ret = 0; - ret = NBL_OPS_CALL(res_ops->get_rx_desc_num, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); - return ret; + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_q2vsi, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); } -static void nbl_disp_set_tx_desc_num(void *priv, u32 ring_index, u16 desc_num) +static void nbl_disp_remove_q2vsi(void *priv, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->set_tx_desc_num, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, desc_num)); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_q2vsi, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); } -static void nbl_disp_set_rx_desc_num(void *priv, u32 ring_index, u16 desc_num) +static int nbl_disp_setup_rss(void *priv, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->set_rx_desc_num, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, desc_num)); -} - -static void nbl_disp_get_queue_stats(void *priv, u8 queue_id, - struct nbl_queue_stats *queue_stats, bool is_tx) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_queue_stats, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), queue_id, queue_stats, is_tx)); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_rss, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); } -static void nbl_disp_get_firmware_version(void *priv, char *firmware_verion, u8 max_len) +static void nbl_disp_remove_rss(void *priv, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - int ret = 0; - ret = NBL_OPS_CALL(res_ops->get_firmware_version, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), firmware_verion)); - if (ret) - dev_err(dev, "get emp version failed with ret: %d\n", ret); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_rss, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); } -static int nbl_disp_get_driver_info(void *priv, struct nbl_driver_info *driver_info) +static int nbl_disp_setup_queue(void *priv, struct nbl_txrx_queue_param *param, bool is_tx) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_resource_ops *res_ops; + int ret = 0; - return NBL_OPS_CALL(res_ops->get_driver_info, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), driver_info)); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_queue, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, is_tx); + return ret; } -static void nbl_disp_get_coalesce(void *priv, u16 vector_id, - struct ethtool_coalesce *ec) +static void nbl_disp_remove_all_queues(void *priv, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_coalesce, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, vector_id, ec)); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_all_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); } -static void nbl_disp_set_coalesce(void *priv, u16 vector_id, u16 vector_num, u16 pnum, u16 rate) +static int nbl_disp_remove_queue(void *priv, struct nbl_txrx_queue_param *param, bool is_tx) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_coalesce, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, vector_id, - vector_num, pnum, rate); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_queue, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, is_tx); } -static void nbl_disp_get_rxfh_indir_size(void *priv, u16 vsi_id, u32 *rxfh_indir_size) +static int nbl_disp_cfg_dsch(void *priv, u16 vsi_id, bool vld) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; + int ret = 0; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_rxfh_indir_size, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, rxfh_indir_size)); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_dsch, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vld); + return ret; } -static void nbl_disp_get_rxfh_rss_key_size(void *priv, u32 *rxfh_rss_key_size) +static void nbl_disp_remove_cqs(void *priv, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_rxfh_rss_key_size, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rxfh_rss_key_size)); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_cqs, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); } -static void nbl_disp_get_rxfh_indir(void *priv, u16 vsi_id, u32 *indir, u32 indir_size) +static u8 *nbl_disp_get_msix_irq_enable_info(void *priv, u16 global_vector_id, u32 *irq_data) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; + if (!disp_mgt) + return NULL; + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_rxfh_indir, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, indir)); + return NBL_OPS_CALL(res_ops->get_msix_irq_enable_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), global_vector_id, irq_data)); } -static void nbl_disp_get_rxfh_rss_key(void *priv, u8 *rss_key, u32 key_size) +static int nbl_disp_add_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt || !mac) + return -EINVAL; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_rxfh_rss_key, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rss_key)); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac, vlan, vsi); + return ret; } -static void nbl_disp_get_rxfh_rss_alg_sel(void *priv, u8 *alg_sel, u8 eth_id) +static void nbl_disp_del_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; + if (!disp_mgt || !mac) + return; + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_rss_alg_sel, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), alg_sel, eth_id)); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac, vlan, vsi); } -static void nbl_disp_get_phy_caps(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps) +static int nbl_disp_add_multi_rule(void *priv, u16 vsi) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; + u8 broadcast_mac[ETH_ALEN]; + int ret = 0; + memset(broadcast_mac, 0xFF, ETH_ALEN); res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_phy_caps, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, phy_caps)); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), broadcast_mac, 0, vsi); + + return ret; } -static void nbl_disp_get_phy_state(void *priv, u8 eth_id, struct nbl_phy_state *phy_state) +static void nbl_disp_del_multi_rule(void *priv, u16 vsi) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; + u8 broadcast_mac[ETH_ALEN]; + memset(broadcast_mac, 0xFF, ETH_ALEN); res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_phy_state, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, phy_state)); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), broadcast_mac, 0, vsi); } -static int nbl_disp_set_sfp_state(void *priv, u8 eth_id, u8 state) +static int nbl_disp_setup_multi_group(void *priv) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - int ret = 0; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL(res_ops->set_sfp_state, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, state)); - return ret; + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_multi_group, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); } -static int nbl_disp_init_chip_module(void *priv) +static void nbl_disp_remove_multi_group(void *priv) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - int ret = 0; - - if (!disp_mgt) - return -EINVAL; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL(res_ops->init_chip_module, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); - return ret; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_multi_group, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); } -static int nbl_disp_queue_init(void *priv) +static void nbl_disp_get_net_stats(void *priv, struct nbl_stats *net_stats) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; - int ret = 0; - - if (!disp_mgt) - return -EINVAL; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL(res_ops->queue_init, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); - return ret; + NBL_OPS_CALL(res_ops->get_net_stats, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), net_stats)); } -static int nbl_disp_vsi_init(void *priv) +static void nbl_disp_get_private_stat_len(void *priv, u32 *len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; - int ret = 0; - - if (!disp_mgt) - return -EINVAL; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL(res_ops->vsi_init, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); - return ret; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_private_stat_len, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), len); } -static int nbl_disp_configure_msix_map(void *priv, u16 num_net_msix, u16 num_others_msix, - bool net_msix_mask_en) +static int nbl_disp_get_pause_stats(void *priv, u32 eth_id, + struct nbl_pause_stats *pause_stats, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; - int ret = 0; - - if (!disp_mgt) - return -EINVAL; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_msix_map, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, num_net_msix, - num_others_msix, net_msix_mask_en); - return ret; + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_pause_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, pause_stats); } -static int nbl_disp_chan_configure_msix_map_req(void *priv, u16 num_net_msix, u16 num_others_msix, - bool net_msix_mask_en) +static int nbl_disp_chan_get_pause_stats_req(void *priv, u32 eth_id, + struct nbl_pause_stats *pause_stats, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_cfg_msix_map param = {0}; - struct nbl_chan_send_info chan_send; + struct nbl_chan_send_info chan_send = {0}; struct nbl_common_info *common; - if (!disp_mgt) - return -EINVAL; - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - param.num_net_msix = num_net_msix; - param.num_others_msix = num_others_msix; - param.msix_mask_en = net_msix_mask_en; - - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CONFIGURE_MSIX_MAP, - ¶m, sizeof(param), NULL, 0, 1); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_PAUSE_STATS, ð_id, + sizeof(eth_id), pause_stats, data_len, 1); return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_configure_msix_map_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_get_pause_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); - struct nbl_resource_ops *res_ops; - struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_cfg_msix_map *param; struct nbl_chan_ack_info chan_ack; + struct nbl_pause_stats pause_stats = {0}; + u32 *param = (u32 *)(data); int err = NBL_CHAN_RESP_OK; - int ret = 0; - - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - param = (struct nbl_chan_param_cfg_msix_map *)data; + int ret; - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_msix_map, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, - param->num_net_msix, param->num_others_msix, param->msix_mask_en); - if (ret) + ret = NBL_OPS_CALL(res_ops->get_pause_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *param, &pause_stats)); + if (ret) { err = NBL_CHAN_RESP_ERR; - - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CONFIGURE_MSIX_MAP, msg_id, err, NULL, 0); + dev_err(dev, "disp get eth pause stats failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PAUSE_STATS, msg_id, + ret, &pause_stats, sizeof(struct nbl_pause_stats)); ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); if (ret) - dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", - ret, NBL_CHAN_MSG_CONFIGURE_MSIX_MAP); + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_PAUSE_STATS, src_id); } -static int nbl_disp_chan_destroy_msix_map_req(void *priv) +static void nbl_disp_get_private_stat_data(void *priv, u32 eth_id, u64 *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_private_stat_data, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, data, data_len); +} + +static void nbl_disp_get_private_stat_data_req(void *priv, u32 eth_id, u64 *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops; - struct nbl_chan_send_info chan_send; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_private_stat_data param = {0}; struct nbl_common_info *common; - if (!disp_mgt) - return -EINVAL; - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DESTROY_MSIX_MAP, - NULL, 0, NULL, 0, 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + param.eth_id = eth_id; + param.data_len = data_len; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_ETH_STATS, ¶m, + sizeof(param), data, data_len, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_destroy_msix_map_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_get_private_stat_data_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); - struct nbl_resource_ops *res_ops; - struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_cfg_msix_map *param; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_private_stat_data *param; struct nbl_chan_ack_info chan_ack; - int err = NBL_CHAN_RESP_OK; - int ret = 0; + u64 *recv_data; + int ret = NBL_CHAN_RESP_OK; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - param = (struct nbl_chan_param_cfg_msix_map *)data; + param = (struct nbl_chan_param_get_private_stat_data *)data; + recv_data = kmalloc(param->data_len, GFP_ATOMIC); + if (!recv_data) { + dev_err(dev, "Allocate memory to private_stat_data failed\n"); + return; + } - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->destroy_msix_map, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id); - if (ret) - err = NBL_CHAN_RESP_ERR; + NBL_OPS_CALL(res_ops->get_private_stat_data, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, + recv_data, param->data_len)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_STATS, msg_id, + ret, recv_data, param->data_len); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DESTROY_MSIX_MAP, msg_id, err, NULL, 0); - ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); - if (ret) - dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", - ret, NBL_CHAN_MSG_DESTROY_MSIX_MAP); + kfree(recv_data); } -static int nbl_disp_chan_enable_mailbox_irq_req(void *priv, u16 vector_id, bool enable_msix) +static int nbl_disp_get_eth_ctrl_stats(void *priv, u32 eth_id, + struct nbl_eth_ctrl_stats *eth_ctrl_stats, + u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_eth_ctrl_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, + eth_ctrl_stats); +} + +static int nbl_disp_chan_get_eth_ctrl_stats_req(void *priv, u32 eth_id, + struct nbl_eth_ctrl_stats *eth_ctrl_stats, + u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_enable_mailbox_irq param = {0}; - struct nbl_chan_send_info chan_send; + struct nbl_chan_send_info chan_send = {0}; struct nbl_common_info *common; - if (!disp_mgt) - return -EINVAL; - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - param.vector_id = vector_id; - param.enable_msix = enable_msix; - - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ, - ¶m, sizeof(param), NULL, 0, 1); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_ETH_CTRL_STATS, ð_id, + sizeof(eth_id), eth_ctrl_stats, data_len, 1); return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_enable_mailbox_irq_resp(void *priv, u16 src_id, u16 msg_id, +static void nbl_disp_chan_get_eth_ctrl_stats_resp(void *priv, u16 src_id, u16 msg_id, void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); - struct nbl_resource_ops *res_ops; - struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_enable_mailbox_irq *param; + struct nbl_eth_ctrl_stats eth_ctrl_stats = {0}; struct nbl_chan_ack_info chan_ack; + u32 *param = (u32 *)(data); int err = NBL_CHAN_RESP_OK; - int ret = 0; - - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - param = (struct nbl_chan_param_enable_mailbox_irq *)data; + int ret; - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->enable_mailbox_irq, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, - param->vector_id, param->enable_msix); - if (ret) + ret = NBL_OPS_CALL(res_ops->get_eth_ctrl_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *param, ð_ctrl_stats)); + if (ret) { err = NBL_CHAN_RESP_ERR; - - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ, msg_id, err, NULL, 0); + dev_err(dev, "disp get eth ctrl stats failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_CTRL_STATS, msg_id, + ret, ð_ctrl_stats, sizeof(struct nbl_eth_ctrl_stats)); ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); if (ret) - dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", - ret, NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ); + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_ETH_CTRL_STATS, src_id); } -static u16 nbl_disp_chan_get_global_vector_req(void *priv, u16 vsi_id, u16 local_vector_id) +static int nbl_disp_get_eth_mac_stats(void *priv, u32 eth_id, + struct nbl_eth_mac_stats *eth_mac_stats, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_eth_mac_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, eth_mac_stats); +} + +static int nbl_disp_chan_get_eth_mac_stats_req(void *priv, u32 eth_id, + struct nbl_eth_mac_stats *eth_mac_stats, + u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_get_global_vector param = {0}; - struct nbl_chan_param_get_global_vector result = {0}; - struct nbl_chan_send_info chan_send; + struct nbl_chan_send_info chan_send = {0}; struct nbl_common_info *common; - if (!disp_mgt) - return -EINVAL; - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - param.vsi_id = vsi_id; - param.vector_id = local_vector_id; - - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_GLOBAL_VECTOR, ¶m, - sizeof(param), &result, sizeof(result), 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_ETH_MAC_STATS, ð_id, + sizeof(eth_id), eth_mac_stats, data_len, 1); - return result.vector_id; + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_get_global_vector_resp(void *priv, u16 src_id, u16 msg_id, +static void nbl_disp_chan_get_eth_mac_stats_resp(void *priv, u16 src_id, u16 msg_id, void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); - struct nbl_resource_ops *res_ops; - struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_get_global_vector *param; - struct nbl_chan_param_get_global_vector result; struct nbl_chan_ack_info chan_ack; + struct nbl_eth_mac_stats eth_mac_stats = {0}; + u32 *param = (u32 *)(data); int err = NBL_CHAN_RESP_OK; - int ret = 0; + int ret; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - param = (struct nbl_chan_param_get_global_vector *)data; + ret = NBL_OPS_CALL(res_ops->get_eth_mac_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *param, ð_mac_stats)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp get eth mac stats failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_MAC_STATS, msg_id, + ret, ð_mac_stats, sizeof(struct nbl_eth_mac_stats)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_ETH_MAC_STATS, src_id); +} - result.vector_id = NBL_OPS_CALL(res_ops->get_global_vector, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - param->vsi_id, param->vector_id)); +static int nbl_disp_get_rmon_stats(void *priv, u32 eth_id, + struct nbl_rmon_stats *rmon_stats, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_GLOBAL_VECTOR, - msg_id, err, &result, sizeof(result)); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_rmon_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, rmon_stats); +} + +static int nbl_disp_chan_get_rmon_stats_req(void *priv, u32 eth_id, + struct nbl_rmon_stats *rmon_stats, + u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_RMON_STATS, ð_id, + sizeof(eth_id), rmon_stats, data_len, 1); + + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_rmon_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_chan_ack_info chan_ack; + struct nbl_rmon_stats rmon_stats = {0}; + u32 *param = (u32 *)(data); + int err = NBL_CHAN_RESP_OK; + int ret; + + ret = NBL_OPS_CALL(res_ops->get_rmon_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *param, &rmon_stats)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp get eth mac stats failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_RMON_STATS, msg_id, + ret, &rmon_stats, sizeof(struct nbl_rmon_stats)); ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); if (ret) - dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", - ret, NBL_CHAN_MSG_GET_GLOBAL_VECTOR); + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_RMON_STATS, src_id); } -static int nbl_disp_destroy_msix_map(void *priv) +static void nbl_disp_fill_private_stat_strings(void *priv, u8 *strings) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; - int ret = 0; - - if (!disp_mgt) - return -EINVAL; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->destroy_msix_map, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->fill_private_stat_strings, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), strings); +} + +static u16 nbl_disp_get_max_desc_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u16 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_max_desc_num, ()); return ret; } -static int nbl_disp_enable_mailbox_irq(void *priv, u16 vector_id, bool enable_msix) +static u16 nbl_disp_get_min_desc_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u16 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_min_desc_num, ()); + return ret; +} + +static int nbl_disp_cfg_qdisc_mqprio(void *priv, struct nbl_tc_qidsc_param *param) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; int ret = 0; - if (!disp_mgt) - return -EINVAL; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->enable_mailbox_irq, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, vector_id, enable_msix); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_qdisc_mqprio, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); return ret; } -static int nbl_disp_enable_abnormal_irq(void *priv, u16 vector_id, bool enable_msix) +static int nbl_disp_set_spoof_check_addr(void *priv, u16 vsi_id, u8 *mac) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; int ret = 0; - if (!disp_mgt) - return -EINVAL; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL(res_ops->enable_abnormal_irq, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vector_id, enable_msix)); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_spoof_check_addr, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, mac); return ret; } -static int nbl_disp_enable_adminq_irq(void *priv, u16 vector_id, bool enable_msix) +static int nbl_disp_set_vf_spoof_check(void *priv, u16 vsi_id, int vf_id, u8 enable) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; int ret = 0; - if (!disp_mgt) - return -EINVAL; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL(res_ops->enable_adminq_irq, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vector_id, enable_msix)); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_vf_spoof_check, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vf_id, enable); return ret; } -static u16 nbl_disp_get_global_vector(void *priv, u16 vsi_id, u16 local_vector_id) +static void nbl_disp_get_base_mac_addr(void *priv, u8 *mac) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; - u16 ret = 0; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL(res_ops->get_global_vector, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, local_vector_id)); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_base_mac_addr, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac); +} + +static u16 nbl_disp_get_tx_desc_num(void *priv, u32 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u16 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_tx_desc_num, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); return ret; } -static u16 nbl_disp_get_msix_entry_id(void *priv, u16 vsi_id, u16 local_vector_id) +static u16 nbl_disp_get_rx_desc_num(void *priv, u32 ring_index) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); u16 ret = 0; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL(res_ops->get_msix_entry_id, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, local_vector_id)); + ret = NBL_OPS_CALL(res_ops->get_rx_desc_num, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); return ret; } -static void nbl_disp_dump_flow(void *priv, struct seq_file *m) +static void nbl_disp_set_tx_desc_num(void *priv, u32 ring_index, u16 desc_num) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->dump_flow, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m); + NBL_OPS_CALL(res_ops->set_tx_desc_num, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, desc_num)); } -static u16 nbl_disp_get_vsi_id(void *priv, u16 func_id, u16 type) +static void nbl_disp_set_rx_desc_num(void *priv, u32 ring_index, u16 desc_num) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - if (!disp_mgt) - return -EINVAL; + NBL_OPS_CALL(res_ops->set_rx_desc_num, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, desc_num)); +} + +static void nbl_disp_cfg_txrx_vlan(void *priv, u16 vlan_tci, u16 vlan_proto, u8 vsi_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->cfg_txrx_vlan, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vlan_tci, vlan_proto, vsi_index)); +} + +static void nbl_disp_get_rep_stats(void *priv, u16 rep_vsi_id, + struct nbl_rep_stats *rep_stats, bool is_tx) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->get_vsi_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - func_id, type)); + NBL_OPS_CALL(res_ops->get_rep_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rep_vsi_id, rep_stats, is_tx)); } -static void nbl_disp_get_eth_id(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id) +static u16 nbl_disp_get_rep_index(void *priv, u16 rep_vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_resource_ops *res_ops; - NBL_OPS_CALL(res_ops->get_eth_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - vsi_id, eth_mode, eth_id)); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_rep_index, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rep_vsi_id)); } -static int nbl_disp_chan_add_lag_flow_req(void *priv, u16 vsi_id) +static void nbl_disp_get_queue_stats(void *priv, u8 queue_id, + struct nbl_queue_stats *queue_stats, bool is_tx) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_send_info chan_send; + struct nbl_resource_ops *res_ops; - NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_ADD_LAG_FLOW, &vsi_id, sizeof(vsi_id), - NULL, 0, 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_queue_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), queue_id, queue_stats, is_tx)); } -static void nbl_disp_chan_add_lag_flow_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_get_firmware_version(void *priv, char *firmware_verion, u8 max_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_ack_info chan_ack; - int err = NBL_CHAN_RESP_OK; int ret = 0; - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lag_flow, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u16 *)data); - if (ret) - err = NBL_CHAN_RESP_ERR; - - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_LAG_FLOW, msg_id, err, NULL, 0); - ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + ret = NBL_OPS_CALL(res_ops->get_firmware_version, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), firmware_verion)); if (ret) - dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", - ret, NBL_CHAN_MSG_ADD_LAG_FLOW); + dev_err(dev, "get emp version failed with ret: %d\n", ret); } -static int nbl_disp_add_lag_flow(void *priv, u16 vsi_id) +static int nbl_disp_get_driver_info(void *priv, struct nbl_driver_info *driver_info) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lag_flow, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + return NBL_OPS_CALL(res_ops->get_driver_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), driver_info)); } -static void nbl_disp_chan_del_lag_flow_req(void *priv, u16 vsi_id) +static void nbl_disp_get_coalesce(void *priv, u16 vector_id, + struct nbl_chan_param_get_coalesce *ec) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_send_info chan_send; + struct nbl_resource_ops *res_ops; - NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_DEL_LAG_FLOW, &vsi_id, sizeof(vsi_id), - NULL, 0, 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_coalesce, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, vector_id, ec)); } -static void nbl_disp_chan_del_lag_flow_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_set_coalesce(void *priv, u16 vector_id, u16 vector_num, u16 pnum, u16 rate) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_ack_info chan_ack; - int err = NBL_CHAN_RESP_OK; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_coalesce, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, vector_id, + vector_num, pnum, rate); +} + +static void nbl_disp_get_rxfh_indir_size(void *priv, u16 vsi_id, u32 *rxfh_indir_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rxfh_indir_size, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, rxfh_indir_size)); +} + +static void nbl_disp_get_rxfh_rss_key_size(void *priv, u32 *rxfh_rss_key_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rxfh_rss_key_size, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rxfh_rss_key_size)); +} + +static void nbl_disp_get_rxfh_indir(void *priv, u16 vsi_id, u32 *indir, u32 indir_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rxfh_indir, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, indir)); +} + +static int nbl_disp_set_rxfh_indir(void *priv, u16 vsi_id, const u32 *indir, u32 indir_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; int ret = 0; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lag_flow, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - *(u16 *)data); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->set_rxfh_indir, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, indir, indir_size)); + return ret; +} - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_LAG_FLOW, msg_id, err, NULL, 0); +static int nbl_disp_chan_set_rxfh_indir_req(void *priv, + u16 vsi_id, const u32 *indir, u32 indir_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_rxfh_indir *param = NULL; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int ret = 0; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) + return -ENOMEM; + + param->vsi_id = vsi_id; + param->indir_size = indir_size; + memcpy(param->indir, indir, indir_size * sizeof(param->indir[0])); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_SET_RXFH_INDIR, param, + sizeof(*param), NULL, 0, 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + kfree(param); + return ret; +} + +static void nbl_disp_chan_set_rxfh_indir_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_rxfh_indir *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_rxfh_indir *)data; + + err = NBL_OPS_CALL(res_ops->set_rxfh_indir, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->indir, param->indir_size)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_RXFH_INDIR, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_get_rxfh_rss_key(void *priv, u8 *rss_key, u32 key_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rxfh_rss_key, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rss_key)); +} + +static void nbl_disp_get_rxfh_rss_alg_sel(void *priv, u16 vsi_id, u8 *alg_sel) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rss_alg_sel, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, alg_sel)); +} + +static void nbl_disp_get_phy_caps(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_phy_caps, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, phy_caps)); +} + +static int nbl_disp_set_sfp_state(void *priv, u8 eth_id, u8 state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->set_sfp_state, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, state)); + return ret; +} + +static int nbl_disp_init_chip_module(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->init_chip_module, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static int nbl_disp_queue_init(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->queue_init, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static int nbl_disp_vsi_init(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->vsi_init, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static int nbl_disp_configure_msix_map(void *priv, u16 num_net_msix, u16 num_others_msix, + bool net_msix_mask_en) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_msix_map, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, num_net_msix, + num_others_msix, net_msix_mask_en); + return ret; +} + +static int nbl_disp_chan_configure_msix_map_req(void *priv, u16 num_net_msix, u16 num_others_msix, + bool net_msix_mask_en) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_cfg_msix_map param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.num_net_msix = num_net_msix; + param.num_others_msix = num_others_msix; + param.msix_mask_en = net_msix_mask_en; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CONFIGURE_MSIX_MAP, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_configure_msix_map_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_cfg_msix_map *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_cfg_msix_map *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_msix_map, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, + param->num_net_msix, param->num_others_msix, param->msix_mask_en); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CONFIGURE_MSIX_MAP, msg_id, err, NULL, 0); ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); if (ret) dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", - ret, NBL_CHAN_MSG_DEL_LAG_FLOW); + ret, NBL_CHAN_MSG_CONFIGURE_MSIX_MAP); +} + +static int nbl_disp_chan_destroy_msix_map_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DESTROY_MSIX_MAP, + NULL, 0, NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_destroy_msix_map_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_cfg_msix_map *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_cfg_msix_map *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->destroy_msix_map, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DESTROY_MSIX_MAP, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_DESTROY_MSIX_MAP); +} + +static int nbl_disp_chan_enable_mailbox_irq_req(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_enable_mailbox_irq param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vector_id = vector_id; + param.enable_msix = enable_msix; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_enable_mailbox_irq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_enable_mailbox_irq *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_enable_mailbox_irq *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->enable_mailbox_irq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, + param->vector_id, param->enable_msix); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ); +} + +static u16 nbl_disp_chan_get_global_vector_req(void *priv, u16 vsi_id, u16 local_vector_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_global_vector param = {0}; + struct nbl_chan_param_get_global_vector result = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.vector_id = local_vector_id; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_GLOBAL_VECTOR, ¶m, + sizeof(param), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result.vector_id; +} + +static void nbl_disp_chan_get_global_vector_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_global_vector *param; + struct nbl_chan_param_get_global_vector result; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_get_global_vector *)data; + + result.vector_id = NBL_OPS_CALL(res_ops->get_global_vector, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->vector_id)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_GLOBAL_VECTOR, + msg_id, err, &result, sizeof(result)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_GET_GLOBAL_VECTOR); +} + +static int nbl_disp_destroy_msix_map(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->destroy_msix_map, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0); + return ret; +} + +static int nbl_disp_enable_mailbox_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->enable_mailbox_irq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, vector_id, enable_msix); + return ret; +} + +static int nbl_disp_enable_abnormal_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->enable_abnormal_irq, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vector_id, enable_msix)); + return ret; +} + +static int nbl_disp_enable_adminq_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->enable_adminq_irq, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vector_id, enable_msix)); + return ret; +} + +static u16 nbl_disp_get_global_vector(void *priv, u16 vsi_id, u16 local_vector_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + u16 ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->get_global_vector, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, local_vector_id)); + return ret; +} + +static u16 nbl_disp_get_msix_entry_id(void *priv, u16 vsi_id, u16 local_vector_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + u16 ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->get_msix_entry_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, local_vector_id)); + return ret; +} + +static void nbl_disp_dump_flow(void *priv, struct seq_file *m) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->dump_flow, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m); +} + +static u16 nbl_disp_get_vsi_id(void *priv, u16 func_id, u16 type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_vsi_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + func_id, type)); +} + +static void nbl_disp_get_eth_id(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id, u8 *logic_eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_eth_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, eth_mode, eth_id, logic_eth_id)); +} + +static void nbl_disp_get_rep_feature(void *priv, + struct nbl_register_net_result *register_result) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rep_feature, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), register_result)); +} + +static void nbl_disp_set_eswitch_mode(void *priv, u16 eswitch_mode) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->set_eswitch_mode, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eswitch_mode)); +} + +static u16 nbl_disp_get_eswitch_mode(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + u16 ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->get_eswitch_mode, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static int nbl_disp_alloc_rep_data(void *priv, int num_vfs, u16 vf_base_vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->alloc_rep_data, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), num_vfs, vf_base_vsi_id)); +} + +static void nbl_disp_free_rep_data(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->free_rep_data, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_set_rep_netdev_info(void *priv, void *rep_data) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->set_rep_netdev_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rep_data)); +} + +static void nbl_disp_unset_rep_netdev_info(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->unset_rep_netdev_info, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static struct net_device *nbl_disp_get_rep_netdev_info(void *priv, u16 rep_data_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_rep_netdev_info, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + rep_data_index)); +} + +static int nbl_disp_enable_lag_protocol(void *priv, u16 eth_id, bool lag_en) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->enable_lag_protocol, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, lag_en)); +} + +static int nbl_disp_chan_cfg_lag_hash_algorithm_req(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_lag_hash_algorithm param = {0}; + struct nbl_chan_send_info chan_send; + + param.eth_id = eth_id; + param.lag_id = lag_id; + param.hash_type = hash_type; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CFG_LAG_HASH_ALGORITHM, ¶m, sizeof(param), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_lag_hash_algorithm_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_lag_hash_algorithm *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_param_cfg_lag_hash_algorithm *)data; + + ret = NBL_OPS_CALL(res_ops->cfg_lag_hash_algorithm, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->lag_id, param->hash_type)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_LAG_HASH_ALGORITHM, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CFG_LAG_HASH_ALGORITHM); +} + +static int nbl_disp_cfg_lag_hash_algorithm(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->cfg_lag_hash_algorithm, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, lag_id, hash_type)); +} + +static int nbl_disp_chan_cfg_lag_member_fwd_req(void *priv, u16 eth_id, u16 lag_id, u8 fwd) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_lag_member_fwd param = {0}; + struct nbl_chan_send_info chan_send; + + param.eth_id = eth_id; + param.lag_id = lag_id; + param.fwd = fwd; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CFG_LAG_MEMBER_FWD, ¶m, sizeof(param), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_lag_member_fwd_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_lag_member_fwd *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_param_cfg_lag_member_fwd *)data; + + ret = NBL_OPS_CALL(res_ops->cfg_lag_member_fwd, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->lag_id, param->fwd)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_LAG_MEMBER_FWD, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CFG_LAG_MEMBER_FWD); +} + +static int nbl_disp_cfg_lag_member_fwd(void *priv, u16 eth_id, u16 lag_id, u8 fwd) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->cfg_lag_member_fwd, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, lag_id, fwd)); +} + +static int nbl_disp_chan_cfg_lag_member_list_req(void *priv, + struct nbl_lag_member_list_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_lag_member_list_param chan_param = {0}; + struct nbl_chan_send_info chan_send; + + memcpy(&chan_param, param, sizeof(chan_param)); + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CFG_LAG_MEMBER_LIST, &chan_param, + sizeof(chan_param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_lag_member_list_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_lag_member_list_param *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_lag_member_list_param *)data; + + ret = NBL_OPS_CALL(res_ops->cfg_lag_member_list, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_LAG_MEMBER_LIST, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CFG_LAG_MEMBER_LIST); +} + +static int nbl_disp_cfg_lag_member_list(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->cfg_lag_member_list, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); +} + +static int nbl_disp_chan_cfg_lag_member_up_attr_req(void *priv, u16 eth_id, u16 lag_id, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_lag_member_up_attr param = {0}; + struct nbl_chan_send_info chan_send; + + param.eth_id = eth_id; + param.eth_id = enable; + param.enable = enable; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CFG_LAG_MEMBER_UP_ATTR, ¶m, sizeof(param), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_lag_member_up_attr_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_lag_member_up_attr *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_param_cfg_lag_member_up_attr *)data; + + ret = NBL_OPS_CALL(res_ops->cfg_lag_member_up_attr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->lag_id, + param->enable)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_LAG_MEMBER_UP_ATTR, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CFG_LAG_MEMBER_UP_ATTR); +} + +static int nbl_disp_cfg_lag_member_up_attr(void *priv, u16 eth_id, u16 lag_id, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->cfg_lag_member_up_attr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, lag_id, enable)); +} + +static int nbl_disp_chan_add_lag_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_ADD_LAG_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_lag_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lag_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u16 *)data); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_LAG_FLOW, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_ADD_LAG_FLOW); +} + +static int nbl_disp_add_lag_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lag_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static void nbl_disp_chan_del_lag_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_DEL_LAG_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_lag_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lag_flow, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + *(u16 *)data); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_LAG_FLOW, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_DEL_LAG_FLOW); +} + +static void nbl_disp_del_lag_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lag_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_chan_add_lldp_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_ADD_LLDP_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_lldp_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lldp_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u16 *)data); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_LLDP_FLOW, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_ADD_LLDP_FLOW); +} + +static int nbl_disp_add_lldp_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lldp_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static void nbl_disp_chan_del_lldp_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_DEL_LLDP_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_lldp_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lldp_flow, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + *(u16 *)data); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_LLDP_FLOW, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_DEL_LLDP_FLOW); +} + +static void nbl_disp_del_lldp_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lldp_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_cfg_duppkt_info(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->cfg_duppkt_info, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); +} + +static int nbl_disp_chan_cfg_duppkt_mcc_req(void *priv, struct nbl_lag_member_list_param *mem_param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_lag_member_list_param param = {0}; + struct nbl_chan_send_info chan_send; + + memcpy(¶m, mem_param, sizeof(param)); + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CFG_DUPPKT_MCC, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_duppkt_mcc_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_lag_member_list_param *param = NULL; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_lag_member_list_param *)data; + + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_duppkt_mcc, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_DUPPKT_MCC, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CFG_DUPPKT_MCC); +} + +static int nbl_disp_cfg_duppkt_mcc(void *priv, struct nbl_lag_member_list_param *mem_param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_duppkt_mcc, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mem_param); +} + +static int nbl_disp_chan_cfg_bond_shaping_req(void *priv, u8 eth_id, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_bond_shaping param = {0}; + struct nbl_chan_send_info chan_send; + + param.eth_id = eth_id; + param.enable = enable; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_CFG_BOND_SHAPING, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_bond_shaping_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(NBL_DISP_MGT_TO_COMMON(disp_mgt)); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_bond_shaping *param = NULL; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_param_cfg_bond_shaping *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_bond_shaping, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->enable); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_BOND_SHAPING, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CFG_BOND_SHAPING); +} + +static int nbl_disp_cfg_bond_shaping(void *priv, u8 eth_id, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_bond_shaping, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, enable); +} + +static void nbl_disp_chan_cfg_bgid_back_pressure_req(void *priv, u8 main_eth_id, u8 other_eth_id, + bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_bgid_back_pressure param = {0}; + struct nbl_chan_send_info chan_send; + + param.main_eth_id = main_eth_id; + param.other_eth_id = other_eth_id; + param.enable = enable; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_CFG_BGID_BACK_PRESSURE, + ¶m, sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_bgid_back_pressure_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(NBL_DISP_MGT_TO_COMMON(disp_mgt)); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_bgid_back_pressure *param = NULL; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_param_cfg_bgid_back_pressure *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_bgid_back_pressure, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->main_eth_id, + param->other_eth_id, param->enable); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_BGID_BACK_PRESSURE, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CFG_BGID_BACK_PRESSURE); +} + +static void nbl_disp_cfg_bgid_back_pressure(void *priv, u8 main_eth_id, u8 other_eth_id, + bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_bgid_back_pressure, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), main_eth_id, other_eth_id, enable); +} + +static u32 nbl_disp_get_tx_headroom(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u32 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_tx_headroom, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static void nbl_disp_register_rdma(void *priv, u16 vsi_id, struct nbl_rdma_register_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_rdma, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, param); +} + +static void nbl_disp_unregister_rdma(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_rdma, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static u8 __iomem *nbl_disp_get_hw_addr(void *priv, size_t *size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u8 __iomem *addr = NULL; + + addr = NBL_OPS_CALL(res_ops->get_hw_addr, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), size)); + return addr; +} + +static u64 nbl_disp_get_real_hw_addr(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u64 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_real_hw_addr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); + return ret; +} + +static u16 nbl_disp_get_function_id(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u16 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_function_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); + return ret; +} + +static void nbl_disp_get_real_bdf(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_real_bdf, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, bus, dev, function)); +} + +static bool nbl_disp_check_fw_heartbeat(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = false; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->check_fw_heartbeat, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static bool nbl_disp_check_fw_reset(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->check_fw_reset, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_flash_lock(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->flash_lock, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_flash_unlock(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->flash_unlock, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_flash_prepare(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->flash_prepare, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_flash_image(void *priv, u32 module, const u8 *data, size_t len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->flash_image, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), module, data, len)); +} + +static int nbl_disp_flash_activate(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->flash_activate, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_set_eth_loopback(void *priv, u8 enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u8 eth_id = NBL_DISP_MGT_TO_COMMON(disp_mgt)->eth_id; + + return NBL_OPS_CALL(res_ops->setup_loopback, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, enable)); +} + +static int nbl_disp_chan_set_eth_loopback_req(void *priv, u8 enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_eth_loopback param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_port_id = NBL_DISP_MGT_TO_COMMON(disp_mgt)->eth_id; + param.enable = enable; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_ETH_LOOPBACK, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_eth_loopback_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_eth_loopback *param; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_set_eth_loopback *)data; + ret = NBL_OPS_CALL(res_ops->setup_loopback, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_port_id, param->enable)); + if (ret) { + dev_err(dev, "setup loopback adminq failed with ret: %d\n", ret); + err = NBL_CHAN_RESP_ERR; + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_ETH_LOOPBACK, + msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_SET_ETH_LOOPBACK); +} + +static struct sk_buff *nbl_disp_clean_rx_lb_test(void *priv, u32 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->clean_rx_lb_test, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); +} + +static u32 nbl_disp_check_active_vf(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->check_active_vf, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0)); +} + +static u32 nbl_disp_chan_check_active_vf_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct device *dev = NBL_DISP_MGT_TO_DEV(disp_mgt); + u32 active_vf_num = 0; + int ret; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CHECK_ACTIVE_VF, NULL, 0, + &active_vf_num, sizeof(active_vf_num), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + dev_err(dev, "channel check active vf send msg failed with ret: %d\n", ret); + + return active_vf_num; +} + +static void nbl_disp_chan_check_active_vf_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u32 active_vf_num; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + active_vf_num = NBL_OPS_CALL(res_ops->check_active_vf, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CHECK_ACTIVE_VF, + msg_id, err, &active_vf_num, sizeof(active_vf_num)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_SET_ETH_LOOPBACK); +} + +static u32 nbl_disp_get_adminq_tx_buf_size(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + return chan_ops->get_adminq_tx_buf_size(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt)); +} + +static int nbl_disp_adminq_emp_console_write(void *priv, char *buf, size_t count) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_EMP_CONSOLE_WRITE, + buf, count, NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static bool nbl_disp_get_product_flex_cap(void *priv, enum nbl_flex_cap_type cap_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + bool has_cap = false; + + has_cap = NBL_OPS_CALL(res_ops->get_product_flex_cap, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + cap_type)); + return has_cap; +} + +static int nbl_disp_set_pmd_debug(void *priv, bool pmd_debug) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_pmd_debug, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), pmd_debug)); +} + +static bool nbl_disp_chan_get_product_flex_cap_req(void *priv, enum nbl_flex_cap_type cap_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + bool has_cap = false; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_PRODUCT_FLEX_CAP, &cap_type, + sizeof(cap_type), &has_cap, sizeof(has_cap), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return has_cap; +} + +static void nbl_disp_chan_get_product_flex_cap_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + enum nbl_flex_cap_type *cap_type = (enum nbl_flex_cap_type *)data; + struct nbl_chan_ack_info chan_ack = {0}; + bool has_cap = false; + + has_cap = NBL_OPS_CALL(res_ops->get_product_flex_cap, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *cap_type)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PRODUCT_FLEX_CAP, msg_id, + NBL_CHAN_RESP_OK, &has_cap, sizeof(has_cap)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static bool nbl_disp_get_product_fix_cap(void *priv, enum nbl_fix_cap_type cap_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + bool has_cap = false; + + has_cap = NBL_OPS_CALL(res_ops->get_product_fix_cap, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + cap_type)); + return has_cap; +} + +static int nbl_disp_alloc_ktls_tx_index(void *priv, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int index = 0; + + index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ktls_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + return index; +} + +static int nbl_disp_chan_alloc_ktls_tx_index_req(void *priv, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int index = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_ALLOC_KTLS_TX_INDEX, &vsi, sizeof(u16), + &index, sizeof(index), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return index; +} + +static void nbl_disp_chan_alloc_ktls_tx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack = {0}; + int index; + u16 vsi; + + vsi = *(u16 *)data; + index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ktls_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ALLOC_KTLS_TX_INDEX, msg_id, + NBL_CHAN_RESP_OK, &index, sizeof(index)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_free_ktls_tx_index(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ktls_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_chan_free_ktls_tx_index_req(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_FREE_KTLS_TX_INDEX, &index, + sizeof(index), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_free_ktls_tx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack = {0}; + u32 index; + + index = *(u32 *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ktls_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_FREE_KTLS_TX_INDEX, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_cfg_ktls_tx_keymat(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ktls_tx_keymat, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, mode, salt, + key, key_len); +} + +static void nbl_disp_chan_cfg_ktls_tx_keymat_req(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_keymat param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + param.index = index; + param.mode = mode; + memcpy(param.salt, salt, sizeof(param.salt)); + memcpy(param.key, key, key_len); + param.key_len = key_len; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_KTLS_TX_KEYMAT, ¶m, + sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_ktls_tx_keymat_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_keymat *param; + struct nbl_chan_ack_info chan_ack; + + param = (struct nbl_chan_cfg_ktls_keymat *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ktls_tx_keymat, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index, + param->mode, param->salt, param->key, param->key_len); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_KTLS_TX_KEYMAT, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_alloc_ktls_rx_index(void *priv, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int index = 0; + + index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ktls_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + return index; +} + +static int nbl_disp_chan_alloc_ktls_rx_index_req(void *priv, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int index = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_ALLOC_KTLS_RX_INDEX, &vsi, sizeof(u16), + &index, sizeof(index), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return index; +} + +static void nbl_disp_chan_alloc_ktls_rx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack = {0}; + int index; + u16 vsi; + + vsi = *(u16 *)data; + index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ktls_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ALLOC_KTLS_RX_INDEX, msg_id, + NBL_CHAN_RESP_OK, &index, sizeof(index)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_free_ktls_rx_index(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ktls_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_chan_free_ktls_rx_index_req(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_FREE_KTLS_RX_INDEX, &index, + sizeof(index), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_free_ktls_rx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack = {0}; + u32 index; + + index = *(u32 *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ktls_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_FREE_KTLS_RX_INDEX, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_cfg_ktls_rx_keymat(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ktls_rx_keymat, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, mode, + salt, key, key_len); +} + +static void nbl_disp_chan_cfg_ktls_rx_keymat_req(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_keymat param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + param.mode = mode; + memcpy(param.salt, salt, sizeof(param.salt)); + memcpy(param.key, key, key_len); + param.key_len = key_len; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_KTLS_RX_KEYMAT, ¶m, + sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_ktls_rx_keymat_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_keymat *param; + struct nbl_chan_ack_info chan_ack; + + param = (struct nbl_chan_cfg_ktls_keymat *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ktls_rx_keymat, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index, + param->mode, param->salt, param->key, param->key_len); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_KTLS_RX_KEYMAT, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_cfg_ktls_rx_record(void *priv, u32 index, u32 tcp_sn, u64 rec_num, bool init) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ktls_rx_record, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, tcp_sn, rec_num, init); +} + +static void nbl_disp_chan_cfg_ktls_rx_record_req(void *priv, u32 index, + u32 tcp_sn, u64 rec_num, bool init) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_record param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.init = init; + param.index = index; + param.tcp_sn = tcp_sn; + param.rec_num = rec_num; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_KTLS_RX_RECORD, ¶m, + sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_ktls_rx_record_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_record *param; + struct nbl_chan_ack_info chan_ack; + + param = (struct nbl_chan_cfg_ktls_record *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ktls_rx_record, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->index, param->tcp_sn, param->rec_num, param->init); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_KTLS_RX_RECORD, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_add_ktls_rx_flow(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->add_ktls_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, data, vsi); +} + +static int nbl_disp_chan_add_ktls_rx_flow_req(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_flow param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + param.vsi = vsi; + memcpy(param.data, data, sizeof(param.data)); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ADD_KTLS_RX_FLOW, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_ktls_rx_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_flow *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_cfg_ktls_flow *)data; + ret = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->add_ktls_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index, + param->data, param->vsi); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_KTLS_RX_FLOW, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_del_ktls_rx_flow(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->del_ktls_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_chan_del_ktls_rx_flow_req(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_flow param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DEL_KTLS_RX_FLOW, ¶m, + sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_ktls_rx_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_flow *param; + struct nbl_chan_ack_info chan_ack; + + param = (struct nbl_chan_cfg_ktls_flow *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->del_ktls_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_KTLS_RX_FLOW, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_alloc_ipsec_tx_index(void *priv, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int index = 0; + + index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ipsec_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), cfg_info); + return index; +} + +static int nbl_disp_chan_alloc_ipsec_tx_index_req(void *priv, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ipsec_index param = {0}; + struct nbl_chan_ipsec_index result = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + memcpy(¶m.cfg_info, cfg_info, sizeof(param.cfg_info)); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ALLOC_IPSEC_TX_INDEX, ¶m, + sizeof(param), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result.index; +} + +static void nbl_disp_chan_alloc_ipsec_tx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ipsec_index *param; + struct nbl_chan_ipsec_index result = {0}; + struct nbl_chan_ack_info chan_ack = {0}; + + param = (struct nbl_chan_ipsec_index *)data; + result.index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ipsec_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + ¶m->cfg_info); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ALLOC_IPSEC_TX_INDEX, msg_id, + NBL_CHAN_RESP_OK, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_free_ipsec_tx_index(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ipsec_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_chan_free_ipsec_tx_index_req(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ipsec_index param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_FREE_IPSEC_TX_INDEX, ¶m, + sizeof(param), NULL, 0, 0); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_free_ipsec_tx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_chan_ipsec_index *param; + + param = (struct nbl_chan_ipsec_index *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ipsec_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index); +} + +static int nbl_disp_alloc_ipsec_rx_index(void *priv, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int index = 0; + + index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ipsec_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), cfg_info); + return index; +} + +static int nbl_disp_chan_alloc_ipsec_rx_index_req(void *priv, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ipsec_index param = {0}; + struct nbl_chan_ipsec_index result = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + memcpy(¶m.cfg_info, cfg_info, sizeof(param.cfg_info)); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ALLOC_IPSEC_RX_INDEX, ¶m, + sizeof(param), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result.index; +} + +static void nbl_disp_chan_alloc_ipsec_rx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ipsec_index *param; + struct nbl_chan_ipsec_index result = {0}; + struct nbl_chan_ack_info chan_ack = {0}; + + param = (struct nbl_chan_ipsec_index *)data; + result.index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ipsec_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + ¶m->cfg_info); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ALLOC_IPSEC_RX_INDEX, msg_id, + NBL_CHAN_RESP_OK, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_free_ipsec_rx_index(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ipsec_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_chan_free_ipsec_rx_index_req(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ipsec_index param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_FREE_IPSEC_RX_INDEX, ¶m, + sizeof(param), NULL, 0, 0); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_free_ipsec_rx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_chan_ipsec_index *param; + + param = (struct nbl_chan_ipsec_index *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ipsec_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index); +} + +static void nbl_disp_cfg_ipsec_tx_sad(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ipsec_tx_sad, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, sa_entry); +} + +static void nbl_disp_chan_cfg_ipsec_tx_sad_req(void *priv, u32 index, + struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_sad param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + memcpy(¶m.sa_entry, sa_entry, sizeof(param.sa_entry)); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_IPSEC_TX_SAD, ¶m, + sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_ipsec_tx_sad_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_sad *param; + struct nbl_chan_ack_info chan_ack; + + param = (struct nbl_chan_cfg_ipsec_sad *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ipsec_tx_sad, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index, + ¶m->sa_entry); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_IPSEC_TX_SAD, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_cfg_ipsec_rx_sad(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ipsec_rx_sad, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, sa_entry); +} + +static void nbl_disp_chan_cfg_ipsec_rx_sad_req(void *priv, u32 index, + struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_sad param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + memcpy(¶m.sa_entry, sa_entry, sizeof(param.sa_entry)); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_IPSEC_RX_SAD, ¶m, + sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_ipsec_rx_sad_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_sad *param; + struct nbl_chan_ack_info chan_ack; + + param = (struct nbl_chan_cfg_ipsec_sad *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ipsec_rx_sad, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index, + ¶m->sa_entry); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_IPSEC_RX_SAD, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_add_ipsec_tx_flow(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->add_ipsec_tx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, data, vsi); +} + +static int nbl_disp_chan_add_ipsec_tx_flow_req(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + param.vsi = vsi; + memcpy(param.data, data, sizeof(param.data)); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ADD_IPSEC_TX_FLOW, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_ipsec_tx_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_cfg_ipsec_flow *)data; + ret = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->add_ipsec_tx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->index, param->data, param->vsi); + if (ret) + err = NBL_CHAN_RESP_ERR; + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_IPSEC_TX_FLOW, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_del_ipsec_tx_flow(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->del_ipsec_tx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_chan_del_ipsec_tx_flow_req(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DEL_IPSEC_TX_FLOW, ¶m, + sizeof(param), NULL, 0, 0); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_ipsec_tx_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow *param; + + param = (struct nbl_chan_cfg_ipsec_flow *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->del_ipsec_tx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index); +} + +static int nbl_disp_add_ipsec_rx_flow(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->add_ipsec_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, data, vsi); +} + +static int nbl_disp_chan_add_ipsec_rx_flow_req(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + param.vsi = vsi; + memcpy(param.data, data, sizeof(param.data)); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ADD_IPSEC_RX_FLOW, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_ipsec_rx_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_cfg_ipsec_flow *)data; + ret = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->add_ipsec_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->index, param->data, param->vsi); + if (ret) + err = NBL_CHAN_RESP_ERR; + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_IPSEC_RX_FLOW, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_del_ipsec_rx_flow(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->del_ipsec_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_chan_del_ipsec_rx_flow_req(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DEL_IPSEC_RX_FLOW, ¶m, + sizeof(param), NULL, 0, 0); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_ipsec_rx_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow *param; + + param = (struct nbl_chan_cfg_ipsec_flow *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->del_ipsec_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index); +} + +static bool nbl_disp_check_ipsec_status(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->check_ipsec_status, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static u32 nbl_disp_get_dipsec_lft_info(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->get_dipsec_lft_info, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static void nbl_disp_handle_dipsec_soft_expire(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->handle_dipsec_soft_expire, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_handle_dipsec_hard_expire(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->handle_dipsec_hard_expire, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static u32 nbl_disp_get_uipsec_lft_info(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->get_uipsec_lft_info, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static void nbl_disp_handle_uipsec_soft_expire(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->handle_uipsec_soft_expire, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_handle_uipsec_hard_expire(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->handle_uipsec_hard_expire, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static int nbl_disp_get_mbx_irq_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_mbx_irq_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_get_adminq_irq_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_adminq_irq_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_get_abnormal_irq_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_abnormal_irq_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_clear_accel_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->clear_accel_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static void nbl_disp_clear_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static void nbl_disp_clear_queues(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_disable_phy_flow(void *priv, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->disable_phy_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id); +} + +static int nbl_disp_enable_phy_flow(void *priv, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->enable_phy_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id); +} + +static void nbl_disp_init_acl(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->init_acl, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_uninit_acl(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->uninit_acl, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_set_upcall_rule(void *priv, u8 eth_id, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_upcall_rule, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, vsi_id)); +} + +static int nbl_disp_unset_upcall_rule(void *priv, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->unset_upcall_rule, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id)); +} + +static void nbl_disp_set_shaping_dport_vld(void *priv, u8 eth_id, bool vld) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->set_shaping_dport_vld, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, vld)); +} + +static void nbl_disp_set_dport_fc_th_vld(void *priv, u8 eth_id, bool vld) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->set_dport_fc_th_vld, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, vld)); +} + +static u16 nbl_disp_get_vsi_global_qid(void *priv, u16 vsi_id, u16 local_qid) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_vsi_global_queue_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, local_qid)); +} + +static u16 +nbl_disp_chan_get_vsi_global_qid_req(void *priv, u16 vsi_id, u16 local_qid) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_vsi_qid_info param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.local_qid = local_qid; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_VSI_GLOBAL_QUEUE_ID, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void +nbl_disp_chan_get_vsi_global_qid_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_vsi_qid_info *param; + struct nbl_chan_ack_info chan_ack; + u16 global_qid; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_vsi_qid_info *)data; + global_qid = NBL_OPS_CALL(res_ops->get_vsi_global_queue_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->local_qid)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VSI_GLOBAL_QUEUE_ID, + msg_id, global_qid, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_get_line_rate_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_rep_line_rate_info result = {0}; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_line_rate_info, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data, + &result)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_LINE_RATE_INFO, + msg_id, 0, &result, sizeof(struct nbl_rep_line_rate_info)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_register_net_rep_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_register_net_rep *param; + struct nbl_chan_ack_info chan_ack; + struct nbl_register_net_rep_result result = {0}; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_register_net_rep *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_net_rep, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->pf_id, + param->vf_id, &result); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_NET_REP, + msg_id, 0, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_unregister_net_rep_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + vsi_id = *(u16 *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_net_rep, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNREGISTER_NET_REP, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_register_eth_rep_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u8 eth_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_eth_rep, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_ETH_REP, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_get_queue_cxt_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_queue_cxt *param; + struct nbl_chan_ack_info chan_ack; + u16 cxt; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_get_queue_cxt *)data; + + cxt = NBL_OPS_CALL(res_ops->get_queue_ctx, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->local_queue)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_QUEUE_CXT, + msg_id, 0, &cxt, sizeof(cxt)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_init_vdpaq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_vdpaq_init_info *param; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_vdpaq_init_info *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->init_vdpaq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, param->pa, param->size); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_INIT_VDPAQ, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_destroy_vdpaq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->destroy_vdpaq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DESTROY_VDPAQ, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_get_upcall_port_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int ret; + u16 bdf; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + ret = NBL_OPS_CALL(res_ops->get_upcall_port, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &bdf)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_GET_UPCALL_PORT, + msg_id, ret, &bdf, sizeof(u16)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_get_board_info(void *priv, struct nbl_board_port_info *board_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_board_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), board_info)); +} + +static void +nbl_disp_chan_get_board_info_req(void *priv, struct nbl_board_port_info *board_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_BOARD_INFO, NULL, + 0, board_info, sizeof(*board_info), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void +nbl_disp_chan_get_board_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_board_port_info board_info = {0}; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_board_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &board_info)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_BOARD_INFO, + msg_id, 0, &board_info, sizeof(board_info)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_cfg_log_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_cfg_log *param; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_cfg_log *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_queue_log, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, + param->qps, param->vld); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_LOG, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_unregister_eth_rep_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u8 eth_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_eth_rep, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNREGISTER_ETH_REP, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_register_upcall_port_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int ret; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_upcall_port, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_UPCALL_PORT, + msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_unregister_upcall_port_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_upcall_port, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNREGISTER_UPCALL_PORT, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_set_offload_status_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_offload_status, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id); +} + +static int nbl_disp_check_offload_status(void *priv, bool *is_down) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->check_offload_status, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), is_down)); +} + +static int nbl_disp_get_port_attributes(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_port_attributes, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + if (ret) + dev_err(dev, "get port attributes failed with ret: %d\n", ret); + + return ret; +} + +static int nbl_disp_update_ring_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->update_ring_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_update_rdma_cap(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->update_rdma_cap, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static u16 nbl_disp_get_rdma_cap_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_rdma_cap_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_update_rdma_mem_type(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->update_rdma_mem_type, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_set_ring_num(void *priv, struct nbl_fw_cmd_net_ring_num_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_ring_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); +} + +static int nbl_disp_enable_port(void *priv, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->enable_port, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), enable)); + if (ret) + dev_err(dev, "enable port failed with ret: %d\n", ret); + + return ret; +} + +static void nbl_disp_init_port(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->init_port, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_chan_recv_port_notify_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->recv_port_notify, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data)); +} + +static int nbl_disp_get_fec_stats(void *priv, u8 eth_id, + struct nbl_fec_stats *fec_stats) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_fec_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, fec_stats)); + return ret; +} + +static int nbl_disp_chan_get_fec_stats_req(void *priv, u8 eth_id, + struct nbl_fec_stats *fec_stats) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_FEC_STATS, ð_id, sizeof(eth_id), + fec_stats, sizeof(*fec_stats), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_fec_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_chan_ack_info chan_ack; + struct nbl_fec_stats info = {0}; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_fec_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u8 *)data, &info)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp get eth fec stats failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FEC_STATS, msg_id, err, + &info, sizeof(info)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_FEC_STATS, src_id); +} + +static int nbl_disp_get_port_state(void *priv, u8 eth_id, + struct nbl_port_state *port_state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_port_state, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, port_state)); + return ret; +} + +static int nbl_disp_chan_get_port_state_req(void *priv, u8 eth_id, + struct nbl_port_state *port_state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_PORT_STATE, ð_id, sizeof(eth_id), + port_state, sizeof(*port_state), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_port_state_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + struct nbl_port_state info = {0}; + int ret = 0; + u8 eth_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + ret = NBL_OPS_CALL(res_ops->get_port_state, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &info)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PORT_STATE, msg_id, err, + &info, sizeof(info)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_set_port_advertising(void *priv, + struct nbl_port_advertising *port_advertising) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->set_port_advertising, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), port_advertising)); + return ret; +} + +static int nbl_disp_chan_set_port_advertising_req(void *priv, + struct nbl_port_advertising *port_advertising) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_PORT_ADVERTISING, + port_advertising, sizeof(*port_advertising), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_port_advertising_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_port_advertising *param; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_port_advertising *)data; + + ret = res_ops->set_port_advertising(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_PORT_ADVERTISING, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_module_info(void *priv, u8 eth_id, struct ethtool_modinfo *info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->get_module_info(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, info); +} + +static int nbl_disp_chan_get_module_info_req(void *priv, u8 eth_id, struct ethtool_modinfo *info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_MODULE_INFO, ð_id, + sizeof(eth_id), info, sizeof(*info), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_module_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + struct ethtool_modinfo info; + int ret = 0; + u8 eth_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_module_info, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &info); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MODULE_INFO, msg_id, err, + &info, sizeof(info)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_module_eeprom(void *priv, u8 eth_id, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->get_module_eeprom(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, eeprom, data); +} + +static int nbl_disp_chan_get_module_eeprom_req(void *priv, u8 eth_id, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_module_eeprom param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + memcpy(¶m.eeprom, eeprom, sizeof(struct ethtool_eeprom)); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_MODULE_EEPROM, ¶m, + sizeof(param), data, eeprom->len, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_module_eeprom_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_module_eeprom *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u8 eth_id; + struct ethtool_eeprom *eeprom; + u8 *recv_data; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_get_module_eeprom *)data; + eth_id = param->eth_id; + eeprom = ¶m->eeprom; + recv_data = kmalloc(eeprom->len, GFP_ATOMIC); + if (!recv_data) { + dev_err(dev, "Allocate memory to store module eeprom failed\n"); + return; + } + + ret = res_ops->get_module_eeprom(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + eth_id, eeprom, recv_data); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "Get module eeprom failed with ret: %d\n", ret); + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MODULE_EEPROM, msg_id, err, + recv_data, eeprom->len); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_MODULE_EEPROM, src_id); + kfree(recv_data); +} + +static int nbl_disp_get_link_state(void *priv, u8 eth_id, struct nbl_eth_link_info *eth_link_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + /* if donot have res_ops->get_link_state(), default eth is up */ + if (res_ops->get_link_state) + ret = res_ops->get_link_state(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + eth_id, eth_link_info); + else + eth_link_info->link_status = 1; + + return ret; +} + +static int nbl_disp_chan_get_link_state_req(void *priv, u8 eth_id, + struct nbl_eth_link_info *eth_link_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_LINK_STATE, ð_id, + sizeof(eth_id), eth_link_info, sizeof(*eth_link_info), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_link_state_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u8 eth_id; + struct nbl_eth_link_info eth_link_info = {0}; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + ret = res_ops->get_link_state(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + eth_id, ð_link_info); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_LINK_STATE, msg_id, err, + ð_link_info, sizeof(eth_link_info)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_link_down_count(void *priv, u8 eth_id, u64 *link_down_count) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_link_down_count, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, link_down_count)); +} + +static int nbl_disp_chan_get_link_down_count_req(void *priv, u8 eth_id, u64 *link_down_count) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_LINK_DOWN_COUNT, ð_id, + sizeof(eth_id), link_down_count, sizeof(*link_down_count), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_link_down_count_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u8 eth_id; + u64 link_down_count = 0; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + ret = res_ops->get_link_down_count(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + eth_id, &link_down_count); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_LINK_DOWN_COUNT, msg_id, err, + &link_down_count, sizeof(link_down_count)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_link_status_opcode(void *priv, u8 eth_id, u32 *link_status_opcode) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_link_status_opcode, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, link_status_opcode)); +} + +static int nbl_disp_chan_get_link_status_opcode_req(void *priv, u8 eth_id, u32 *link_status_opcode) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_LINK_STATUS_OPCODE, ð_id, + sizeof(eth_id), link_status_opcode, sizeof(*link_status_opcode), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_link_status_opcode_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u8 eth_id; + u32 link_status_opcode = 0; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + ret = res_ops->get_link_status_opcode(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + eth_id, &link_status_opcode); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_LINK_STATUS_OPCODE, msg_id, err, + &link_status_opcode, sizeof(link_status_opcode)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_get_reg_dump(void *priv, u32 *data, u32 len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_reg_dump, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data, len)); +} + +static void nbl_disp_chan_get_reg_dump_req(void *priv, u32 *data, u32 len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + u32 *result = NULL; + + result = kmalloc(len, GFP_KERNEL); + if (!result) + return; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_REG_DUMP, &len, sizeof(len), + result, len, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + memcpy(data, result, len); + kfree(result); +} + +static int nbl_disp_set_wol(void *priv, u8 eth_id, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_wol, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, enable)); +} + +static int nbl_disp_chan_set_wol_req(void *priv, u8 eth_id, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_set_wol param = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + param.enable = enable; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_WOL, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_wol_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_wol *param; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_set_wol *)data; + ret = res_ops->set_wol(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->enable); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_WOL, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_reg_dump_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u32 *result = NULL; + u32 len = 0; + + len = *(u32 *)data; + result = kmalloc(len, GFP_KERNEL); + if (!result) + return; + + NBL_OPS_CALL(res_ops->get_reg_dump, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), result, len)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REG_DUMP, msg_id, err, result, len); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + kfree(result); +} + +static int nbl_disp_get_reg_dump_len(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_reg_dump_len, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_chan_get_reg_dump_len_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + int result = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_REG_DUMP_LEN, NULL, 0, + &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result; +} + +static void nbl_disp_chan_get_reg_dump_len_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int result = 0; + + result = NBL_OPS_CALL(res_ops->get_reg_dump_len, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REG_DUMP_LEN, msg_id, err, + &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_init_offload_fwd_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + vsi_id = *(u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->init_offload_fwd, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_INIT_OFLD, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_init_cmdq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->init_cmdq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data, src_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_INIT_CMDQ, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_destroy_cmdq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->destroy_cmdq, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DESTROY_CMDQ, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_reset_cmdq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->reset_cmdq, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_RESET_CMDQ, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_offload_flow_rule_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->offload_flow_rule, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_OFFLOAD_FLOW_RULE, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_get_flow_acl_switch_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + u8 acl_enable = false; + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_flow_acl_switch, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &acl_enable); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ACL_SWITCH, + msg_id, 0, &acl_enable, sizeof(u8)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_init_rep_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_rep_cfg_info *param; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_rep_cfg_info *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->init_rep, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->inner_type, param->outer_type, param->rep_type); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_INIT_REP, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_init_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->init_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_INIT_FLOW, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_deinit_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->deinit_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEINIT_FLOW, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_configure_rdma_msix_off(void *priv, u16 vector) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->configure_rdma_msix_off(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vector); +} + +static int nbl_disp_set_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_eth_mac_addr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac, eth_id)); +} + +static int nbl_disp_chan_set_eth_mac_addr_req(void *priv, u8 *mac, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_eth_mac_addr param; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + memcpy(param.mac, mac, sizeof(param.mac)); + param.eth_id = eth_id; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_ETH_MAC_ADDR, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_eth_mac_addr_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_eth_mac_addr *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_eth_mac_addr *)data; + + ret = NBL_OPS_CALL(res_ops->set_eth_mac_addr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->mac, param->eth_id)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_ETH_MAC_ADDR, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_SET_ETH_MAC_ADDR); +} + +static u32 nbl_disp_get_chip_temperature(void *priv, enum nbl_hwmon_type type, u32 senser_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_chip_temperature, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), type, senser_id)); +} + +static u32 nbl_disp_chan_get_chip_temperature_req(void *priv, + enum nbl_hwmon_type type, u32 senser_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_hwmon param = {0}; + struct nbl_common_info *common; + u32 chip_tempetature = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + param.senser_id = senser_id; + param.type = type; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_CHIP_TEMPERATURE, ¶m, sizeof(param), + &chip_tempetature, sizeof(chip_tempetature), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return chip_tempetature; +} + +static void nbl_disp_chan_get_chip_temperature_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_get_hwmon *param = (struct nbl_chan_param_get_hwmon *)data; + int ret = NBL_CHAN_RESP_OK; + u32 chip_tempetature = 0; + + chip_tempetature = NBL_OPS_CALL(res_ops->get_chip_temperature, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->type, param->senser_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_CHIP_TEMPERATURE, msg_id, + ret, &chip_tempetature, sizeof(chip_tempetature)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_module_temperature(void *priv, u8 eth_id, + enum nbl_hwmon_type type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_module_temperature, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, type)); +} + +static int nbl_disp_chan_get_module_temperature_req(void *priv, u8 eth_id, + enum nbl_hwmon_type type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + int module_temp; + struct nbl_chan_param_get_hwmon param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + param.senser_id = eth_id; + param.type = type; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_MODULE_TEMPERATURE, + ¶m, sizeof(param), &module_temp, sizeof(module_temp), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return module_temp; +} + +static void nbl_disp_chan_get_module_temperature_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + int module_temp; + struct nbl_chan_param_get_hwmon *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_get_hwmon *)data; + module_temp = NBL_OPS_CALL(res_ops->get_module_temperature, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->senser_id, param->type)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MODULE_TEMPERATURE, msg_id, + ret, &module_temp, sizeof(module_temp)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_process_abnormal_event(void *priv, struct nbl_abnormal_event_info *abnomal_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->process_abnormal_event(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), abnomal_info); +} + +static int nbl_disp_chan_switchdev_init_cmdq_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int ret_status = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SWITCHDEV_INIT_CMDQ, + NULL, 0, &ret_status, sizeof(ret_status), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + return ret_status; +} + +static void nbl_disp_chan_switchdev_init_cmdq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + int ret_status = 0; + + ret_status = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->switchdev_init_cmdq, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SWITCHDEV_INIT_CMDQ, msg_id, + ret, &ret_status, sizeof(ret_status)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_switchdev_init_cmdq(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->switchdev_init_cmdq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static int nbl_disp_chan_switchdev_deinit_cmdq_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int ret_status = 0; + u8 tc_inst_id; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + tc_inst_id = common->tc_inst_id; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SWITCHDEV_DEINIT_CMDQ, + &tc_inst_id, sizeof(tc_inst_id), &ret_status, sizeof(ret_status), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (!ret_status) + common->tc_inst_id = NBL_TC_FLOW_INST_COUNT; + return 0; +} + +static void nbl_disp_chan_switchdev_deinit_cmdq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + int ret_status = 0; + u8 tc_inst_id; + + tc_inst_id = *(u8 *)data; + ret_status = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->switchdev_deinit_cmdq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), tc_inst_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SWITCHDEV_DEINIT_CMDQ, msg_id, + ret, &ret_status, sizeof(ret_status)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_switchdev_deinit_cmdq(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->switchdev_deinit_cmdq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + common->tc_inst_id); +} + +static int nbl_disp_add_tc_flow(void *priv, struct nbl_tc_flow_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_tc_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + return ret; +} + +static int nbl_disp_del_tc_flow(void *priv, struct nbl_tc_flow_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!param) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_tc_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + return ret; +} + +static bool nbl_disp_tc_tun_encap_lookup(void *priv, + struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param) +{ + bool ret = 0; + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + if (!rule_act || !param) + return false; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->tc_tun_encap_lookup, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + rule_act, param); + return ret; +} + +static int nbl_disp_tc_tun_encap_del(void *priv, struct nbl_encap_key *key) +{ + int ret = 0; + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + if (!key) + return -EINVAL; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->tc_tun_encap_del, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), key); + return ret; +} + +static int nbl_disp_tc_tun_encap_add(void *priv, struct nbl_rule_action *action) +{ + int ret = 0; + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + if (!action) + return -EINVAL; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->tc_tun_encap_add, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), action); + return ret; +} + +static int nbl_disp_flow_index_lookup(void *priv, struct nbl_flow_index_key key) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flow_index_lookup, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), key); + return ret; +} + +static int nbl_disp_query_tc_stats(void *priv, struct nbl_stats_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!param) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->query_tc_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); + return ret; +} + +static int nbl_disp_set_tc_flow_info(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_tc_flow_info, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static int nbl_disp_chan_set_tc_flow_info_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int ret_status = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_TC_FLOW_INFO, + NULL, 0, &ret_status, sizeof(ret_status), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + return ret_status; +} + +static void nbl_disp_chan_set_tc_flow_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + int ret_status = 0; + + ret_status = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_tc_flow_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_TC_FLOW_INFO, msg_id, + ret, &ret_status, sizeof(ret_status)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_unset_tc_flow_info(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unset_tc_flow_info, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static int nbl_disp_chan_unset_tc_flow_info_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int ret_status = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_UNSET_TC_FLOW_INFO, + NULL, 0, &ret_status, sizeof(ret_status), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + return 0; +} + +static void nbl_disp_chan_unset_tc_flow_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + int ret_status = 0; + + ret_status = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unset_tc_flow_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNSET_TC_FLOW_INFO, msg_id, + ret, &ret_status, sizeof(ret_status)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_tc_flow_info(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_tc_flow_info, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static void nbl_disp_adapt_desc_gother(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->adapt_desc_gother, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_set_desc_high_throughput(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->set_desc_high_throughput, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_flr_clear_rdma(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_rdma, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_flr_clear_net(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_net, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_flr_clear_accel(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->flr_clear_accel, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_flr_clear_queues(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_flr_clear_accel_flow(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->flr_clear_accel_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_flr_clear_flows(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_flows, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_flr_clear_interrupt(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_interrupt, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static u16 nbl_disp_covert_vfid_to_vsi_id(void *priv, u16 vfid) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->covert_vfid_to_vsi_id, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vfid); +} + +static void nbl_disp_unmask_all_interrupts(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unmask_all_interrupts, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static u32 nbl_disp_get_perf_dump_length(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_perf_dump_length, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static u32 nbl_disp_get_perf_dump_data(void *priv, u8 *buffer, u32 size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_perf_dump_data, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), buffer, size); +} + +static void nbl_disp_keep_alive_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_KEEP_ALIVE, + NULL, 0, NULL, 0, 1); + + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_keep_alive_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_KEEP_ALIVE, msg_id, + 0, NULL, 0); + + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_rep_queue_info_req(void *priv, u16 *queue_num, u16 *queue_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_queue_info result = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_REP_QUEUE_INFO, + NULL, 0, &result, sizeof(result), 1); + + if (!chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send)) { + *queue_num = result.queue_num; + *queue_size = result.queue_size; + } +} + +static void nbl_disp_chan_get_rep_queue_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_get_queue_info result = {0}; + int ret = NBL_CHAN_RESP_OK; + + NBL_OPS_CALL(res_ops->get_rep_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &result.queue_num, &result.queue_size)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REP_QUEUE_INFO, msg_id, + ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_get_rep_queue_info(void *priv, u16 *queue_num, u16 *queue_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_rep_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), queue_num, queue_size)); +} + +static void nbl_disp_chan_get_user_queue_info_req(void *priv, u16 *queue_num, u16 *queue_size, + u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_queue_info result = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_USER_QUEUE_INFO, + &vsi_id, sizeof(vsi_id), &result, sizeof(result), 1); + + if (!chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send)) { + *queue_num = result.queue_num; + *queue_size = result.queue_size; + } +} + +static void nbl_disp_chan_get_user_queue_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_get_queue_info result = {0}; + int ret = NBL_CHAN_RESP_OK; + + NBL_OPS_CALL(res_ops->get_user_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &result.queue_num, + &result.queue_size, *(u16 *)data)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_USER_QUEUE_INFO, msg_id, + ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_get_user_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_user_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), queue_num, queue_size, vsi_id)); +} + +static int nbl_disp_ctrl_port_led(void *priv, u8 eth_id, + enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->ctrl_port_led, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, led_ctrl, led_reg)); +} + +static int nbl_disp_chan_ctrl_port_led_req(void *priv, u8 eth_id, + enum nbl_led_reg_ctrl led_ctrl, + u32 *led_reg) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_ctrl_port_led param = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + param.led_status = led_ctrl; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CTRL_PORT_LED, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_ctrl_port_led_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_ctrl_port_led *param = {0}; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_ctrl_port_led *)data; + ret = NBL_OPS_CALL(res_ops->ctrl_port_led, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->led_status, NULL)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CTRL_PORT_LED, msg_id, + ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_passthrough_fw_cmd(void *priv, struct nbl_passthrough_fw_cmd_param *param, + struct nbl_passthrough_fw_cmd_param *result) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->passthrough_fw_cmd, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, result)); +} + +static int nbl_disp_nway_reset(void *priv, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->nway_reset, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id)); +} + +static int nbl_disp_chan_nway_reset_req(void *priv, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_NWAY_RESET, + ð_id, sizeof(eth_id), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_nway_reset_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + u8 *eth_id; + int ret = NBL_CHAN_RESP_OK; + + eth_id = (u8 *)data; + ret = NBL_OPS_CALL(res_ops->nway_reset, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *eth_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_NWAY_RESET, msg_id, + ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u16 nbl_disp_get_vf_base_vsi_id(void *priv, u16 func_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_vf_base_vsi_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id)); +} + +static u16 nbl_disp_chan_get_vf_base_vsi_id_req(void *priv, u16 func_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + u16 vf_base_vsi_id = 0; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_VF_BASE_VSI_ID, + NULL, 0, &vf_base_vsi_id, sizeof(vf_base_vsi_id), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return vf_base_vsi_id; +} + +static void nbl_disp_chan_get_vf_base_vsi_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 vf_base_vsi_id; + + vf_base_vsi_id = NBL_OPS_CALL(res_ops->get_vf_base_vsi_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VF_BASE_VSI_ID, msg_id, + ret, &vf_base_vsi_id, sizeof(vf_base_vsi_id)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u16 nbl_disp_get_intr_suppress_level(void *priv, u64 pkt_rates, u16 last_level) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_intr_suppress_level, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), pkt_rates, last_level)); +} + +static void nbl_disp_set_intr_suppress_level(void *priv, u16 vector_id, u16 vector_num, u16 level) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_intr_suppress_level, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), common->mgt_pf, + vector_id, vector_num, level); +} + +static void nbl_disp_chan_set_intr_suppress_level_req(void *priv, u16 vector_id, + u16 vector_num, u16 level) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_intr_suppress_level param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.local_vector_id = vector_id; + param.vector_num = vector_num; + param.level = level; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_INTL_SUPPRESS_LEVEL, + ¶m, sizeof(param), NULL, 0, 0); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_intr_suppress_level_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_intr_suppress_level *param; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_intr_suppress_level *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_intr_suppress_level, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, param->local_vector_id, + param->vector_num, param->level); +} + +static u32 nbl_disp_get_p4_version(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_p4_version, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_get_p4_info(void *priv, char *verify_code) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_p4_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), verify_code)); +} + +static int nbl_disp_load_p4(void *priv, struct nbl_load_p4_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->load_p4, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); +} + +static int nbl_disp_load_p4_default(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->load_p4_default, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_chan_get_p4_used_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + int p4_type; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_P4_USED, + NULL, 0, &p4_type, sizeof(p4_type), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return p4_type; +} + +static void nbl_disp_chan_get_p4_used_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + int p4_type; + + p4_type = NBL_OPS_CALL(res_ops->get_p4_used, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_P4_USED, msg_id, + ret, &p4_type, sizeof(p4_type)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_p4_used(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_p4_used, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_set_p4_used(void *priv, int p4_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_p4_used, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), p4_type)); +} + +static int nbl_disp_chan_cfg_eth_bond_info_req(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_CFG_ETH_BOND_INFO, + param, sizeof(*param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_eth_bond_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + ret = NBL_OPS_CALL(res_ops->cfg_eth_bond_info, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + (struct nbl_lag_member_list_param *)data)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_ETH_BOND_INFO, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_cfg_eth_bond_info(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->cfg_eth_bond_info, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param)); +} + +static int nbl_disp_chan_add_nd_upcall_flow(void *priv, u16 vsi_id, bool for_pmd) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_nd_upcall_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, for_pmd); +} + +static int nbl_disp_chan_add_nd_upcall_flow_req(void *priv, u16 vsi_id, bool for_pmd) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = { 0 }; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_param_nd_upcall param = { 0 }; + + param.vsi_id = vsi_id; + param.for_pmd = for_pmd; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_ADD_ND_UPCALL_FLOW, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_nd_upcall_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_nd_upcall *param = + (struct nbl_chan_param_nd_upcall *)data; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_nd_upcall_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->for_pmd); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp set nd dup rule failed with ret: %d\n", ret); + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_ND_UPCALL_FLOW, + msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_SET_UPCALL_RULE, src_id); +} + +static void nbl_disp_chan_del_nd_upcall_flow(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_nd_upcall_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static void nbl_disp_chan_del_nd_upcall_flow_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_DEL_ND_UPCALL_FLOW, + NULL, 0, NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_nd_upcall_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_common_info *common; + int err = NBL_CHAN_RESP_OK; + int ret; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_nd_upcall_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_ND_UPCALL_FLOW, + msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_SET_UPCALL_RULE, src_id); +} + +static int nbl_disp_chan_get_board_id_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + int result = -1; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_BOARD_ID, + NULL, 0, &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result; +} + +static void nbl_disp_chan_get_board_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK, result = -1; + + result = NBL_OPS_CALL(res_ops->get_board_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_BOARD_ID, + msg_id, ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_board_id(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_board_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_chan_register_rdma_bond_req(void *priv, + struct nbl_lag_member_list_param *list_param, + struct nbl_rdma_register_param *register_param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_REGISTER_RDMA_BOND, + list_param, sizeof(*list_param), register_param, sizeof(*register_param), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_register_rdma_bond_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_lag_member_list_param *list_param = NULL; + struct nbl_rdma_register_param register_param = {0}; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + list_param = (struct nbl_lag_member_list_param *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_rdma_bond, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + list_param, ®ister_param); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_RDMA_BOND, + msg_id, ret, ®ister_param, sizeof(register_param)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_register_rdma_bond(void *priv, struct nbl_lag_member_list_param *list_param, + struct nbl_rdma_register_param *register_param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_rdma_bond, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), list_param, register_param); +} + +static void nbl_disp_chan_unregister_rdma_bond_req(void *priv, u16 lag_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_UNREGISTER_RDMA_BOND, &lag_id, sizeof(lag_id), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_unregister_rdma_bond_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_rdma_bond, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u16 *)data); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNREGISTER_RDMA_BOND, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_unregister_rdma_bond(void *priv, u16 lag_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_rdma_bond, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), lag_id); +} + +static dma_addr_t nbl_disp_restore_abnormal_ring(void *priv, int ring_index, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->restore_abnormal_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, type)); +} + +static int nbl_disp_restart_abnormal_ring(void *priv, int ring_index, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->restart_abnormal_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, type)); +} + +static int nbl_disp_chan_restore_hw_queue_req(void *priv, u16 vsi_id, u16 local_queue_id, + dma_addr_t dma, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_param_restore_hw_queue param = {0}; + struct nbl_chan_send_info chan_send = {0}; + + param.vsi_id = vsi_id; + param.local_queue_id = local_queue_id; + param.dma = dma; + param.type = type; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_RESTORE_HW_QUEUE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_restore_hw_queue_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_restore_hw_queue *param = NULL; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_restore_hw_queue *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->restore_hw_queue, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->local_queue_id, param->dma, param->type); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_RESTORE_HW_QUEUE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_restore_hw_queue(void *priv, u16 vsi_id, u16 local_queue_id, + dma_addr_t dma, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->restore_hw_queue, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, local_queue_id, dma, type); +} + +static int +nbl_disp_chan_stop_abnormal_hw_queue_req(void *priv, u16 vsi_id, u16 local_queue_id, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_param_stop_abnormal_hw_queue param = {0}; + struct nbl_chan_send_info chan_send = {0}; + + param.vsi_id = vsi_id; + param.local_queue_id = local_queue_id; + param.type = type; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_STOP_ABNORMAL_HW_QUEUE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void +nbl_disp_chan_stop_abnormal_hw_queue_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_stop_abnormal_hw_queue *param = NULL; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_stop_abnormal_hw_queue *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->stop_abnormal_hw_queue, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->local_queue_id, + param->type); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_STOP_ABNORMAL_HW_QUEUE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_stop_abnormal_hw_queue(void *priv, u16 vsi_id, u16 local_queue_id, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->stop_abnormal_hw_queue, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, local_queue_id, type); +} + +static int nbl_disp_stop_abnormal_sw_queue(void *priv, u16 local_queue_id, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->stop_abnormal_sw_queue, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + local_queue_id, type); } -static void nbl_disp_del_lag_flow(void *priv, u16 vsi_id) +static u16 nbl_disp_get_local_queue_id(void *priv, u16 vsi_id, u16 global_queue_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lag_flow, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + return NBL_OPS_CALL(res_ops->get_local_queue_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, global_queue_id)); } -static int nbl_disp_chan_add_lldp_flow_req(void *priv, u16 vsi_id) +static int nbl_disp_chan_get_eth_bond_info_req(void *priv, struct nbl_bond_param *param) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; - NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_ADD_LLDP_FLOW, &vsi_id, sizeof(vsi_id), - NULL, 0, 1); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_ETH_BOND_INFO, NULL, 0, param, sizeof(*param), 1); return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_add_lldp_flow_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_get_eth_bond_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_bond_param result; struct nbl_chan_ack_info chan_ack; - int err = NBL_CHAN_RESP_OK; - int ret = 0; + int ret = NBL_CHAN_RESP_OK; - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lldp_flow, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u16 *)data); - if (ret) - err = NBL_CHAN_RESP_ERR; + memset(&result, 0, sizeof(result)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_LLDP_FLOW, msg_id, err, NULL, 0); - ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); - if (ret) - dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", - ret, NBL_CHAN_MSG_ADD_LLDP_FLOW); + NBL_OPS_CALL(res_ops->get_eth_bond_info, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &result)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_BOND_INFO, + msg_id, ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_add_lldp_flow(void *priv, u16 vsi_id) +static int nbl_disp_get_eth_bond_info(void *priv, struct nbl_bond_param *param) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lldp_flow, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + return NBL_OPS_CALL(res_ops->get_eth_bond_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); } -static void nbl_disp_chan_del_lldp_flow_req(void *priv, u16 vsi_id) +static void nbl_disp_cfg_eth_bond_event(void *priv, bool enable) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_send_info chan_send; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_DEL_LLDP_FLOW, &vsi_id, sizeof(vsi_id), - NULL, 0, 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + NBL_OPS_CALL(res_ops->cfg_eth_bond_event, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), enable)); } -static void nbl_disp_chan_del_lldp_flow_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static int nbl_disp_set_bridge_mode(void *priv, u16 bmode) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_ack_info chan_ack; - int err = NBL_CHAN_RESP_OK; - int ret = 0; - - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lldp_flow, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - *(u16 *)data); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_LLDP_FLOW, msg_id, err, NULL, 0); - ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); - if (ret) - dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", - ret, NBL_CHAN_MSG_DEL_LLDP_FLOW); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_bridge_mode, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + NBL_COMMON_TO_MGT_PF(common), bmode); } -static void nbl_disp_del_lldp_flow(void *priv, u16 vsi_id) +static int nbl_disp_chan_set_bridge_mode_req(void *priv, u16 bmode) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lldp_flow, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_BRIDGE_MODE, &bmode, sizeof(bmode), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static u32 nbl_disp_get_tx_headroom(void *priv) +static void nbl_disp_chan_set_bridge_mode_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - u32 ret = 0; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 *bmode; - ret = NBL_OPS_CALL(res_ops->get_tx_headroom, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); - return ret; + bmode = (u16 *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_bridge_mode, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, *bmode); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_BRIDGE_MODE, + msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static u8 __iomem *nbl_disp_get_hw_addr(void *priv, size_t *size) +static u16 nbl_disp_get_vf_function_id(void *priv, u16 vsi_id, int vf_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - u8 __iomem *addr = NULL; - addr = NBL_OPS_CALL(res_ops->get_hw_addr, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), size)); - return addr; + return NBL_OPS_CALL(res_ops->get_vf_function_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vf_id)); } -static u64 nbl_disp_get_real_hw_addr(void *priv, u16 vsi_id) +static u16 nbl_disp_chan_get_vf_function_id_req(void *priv, u16 vsi_id, int vf_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - u64 ret = 0; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_vf_func_id param; + struct nbl_common_info *common; + u16 func_id = 0; - ret = NBL_OPS_CALL(res_ops->get_real_hw_addr, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); - return ret; + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + param.vsi_id = vsi_id; + param.vf_id = vf_id; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_VF_FUNCTION_ID, ¶m, + sizeof(param), &func_id, sizeof(func_id), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return func_id; } -static u16 nbl_disp_get_function_id(void *priv, u16 vsi_id) +static void nbl_disp_chan_get_vf_function_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - u16 ret = 0; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_vf_func_id *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 func_id; - ret = NBL_OPS_CALL(res_ops->get_function_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); - return ret; + param = (struct nbl_chan_param_get_vf_func_id *)data; + func_id = NBL_OPS_CALL(res_ops->get_vf_function_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->vf_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VF_FUNCTION_ID, msg_id, + ret, &func_id, sizeof(func_id)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void nbl_disp_get_real_bdf(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function) +static u16 nbl_disp_get_vf_vsi_id(void *priv, u16 vsi_id, int vf_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_real_bdf, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, bus, dev, function)); + return NBL_OPS_CALL(res_ops->get_vf_vsi_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vf_id)); } -static bool nbl_disp_check_fw_heartbeat(void *priv) +static u16 nbl_disp_chan_get_vf_vsi_id_req(void *priv, u16 vsi_id, int vf_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - int ret = false; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_vf_vsi_id param; + struct nbl_common_info *common; + u16 vf_vsi = 0; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL(res_ops->check_fw_heartbeat, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); - return ret; -} + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + param.vsi_id = vsi_id; + param.vf_id = vf_id; -static bool nbl_disp_check_fw_reset(void *priv) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_VF_VSI_ID, ¶m, + sizeof(param), &vf_vsi, sizeof(vf_vsi), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->check_fw_reset, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return vf_vsi; } -static int nbl_disp_flash_lock(void *priv) +static void nbl_disp_chan_get_vf_vsi_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_vf_vsi_id *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 vsi_id; - return NBL_OPS_CALL(res_ops->flash_lock, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + param = (struct nbl_chan_param_get_vf_vsi_id *)data; + vsi_id = NBL_OPS_CALL(res_ops->get_vf_vsi_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->vf_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VF_VSI_ID, msg_id, + ret, &vsi_id, sizeof(vsi_id)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_flash_unlock(void *priv) +static void nbl_disp_register_func_mac(void *priv, u8 *mac, u16 func_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->flash_unlock, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + NBL_OPS_CALL(res_ops->register_func_mac, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac, func_id)); } -static int nbl_disp_flash_prepare(void *priv) +static bool nbl_disp_check_vf_is_active(void *priv, u16 func_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_resource_ops *res_ops; + int ret = false; - return NBL_OPS_CALL(res_ops->flash_prepare, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->check_vf_is_active, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id)); + return ret; } -static int nbl_disp_flash_image(void *priv, u32 module, const u8 *data, size_t len) +static bool nbl_disp_chan_check_vf_is_active_req(void *priv, u16 func_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + bool is_active; - return NBL_OPS_CALL(res_ops->flash_image, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), module, data, len)); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_CHECK_VF_IS_ACTIVE, &func_id, sizeof(func_id), + &is_active, sizeof(is_active), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return is_active; } -static int nbl_disp_flash_activate(void *priv) +static void nbl_disp_chan_check_vf_is_active_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_chan_ack_info chan_ack; + u16 func_id; + bool is_active; + int err = NBL_CHAN_RESP_OK; + int ret = 0; - return NBL_OPS_CALL(res_ops->flash_activate, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + func_id = *(u16 *)data; + + is_active = NBL_OPS_CALL(res_ops->check_vf_is_active, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_CHECK_VF_IS_ACTIVE, msg_id, + err, &is_active, sizeof(is_active)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_CHECK_VF_IS_ACTIVE); } -static int nbl_disp_set_eth_loopback(void *priv, u8 enable) +static int nbl_disp_check_vf_is_vdpa(void *priv, u16 func_id, u8 *is_vdpa) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - u8 eth_id = NBL_DISP_MGT_TO_COMMON(disp_mgt)->eth_id; + struct nbl_resource_ops *res_ops; + int ret = false; - return NBL_OPS_CALL(res_ops->setup_loopback, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, enable)); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->check_vf_is_vdpa, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, is_vdpa)); + return ret; } -static int nbl_disp_chan_set_eth_loopback_req(void *priv, u8 enable) +static int nbl_disp_chan_check_vf_is_vdpa_req(void *priv, u16 func_id, u8 *is_vdpa) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_set_eth_loopback param = {0}; - struct nbl_chan_send_info chan_send; - struct nbl_common_info *common; - - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - - param.eth_port_id = NBL_DISP_MGT_TO_COMMON(disp_mgt)->eth_id; - param.enable = enable; + struct nbl_chan_send_info chan_send = {0}; - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_ETH_LOOPBACK, ¶m, - sizeof(param), NULL, 0, 1); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_CHECK_VF_IS_VDPA, &func_id, sizeof(func_id), + is_vdpa, sizeof(*is_vdpa), 1); return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_set_eth_loopback_resp(void *priv, u16 src_id, u16 msg_id, +static void nbl_disp_chan_check_vf_is_vdpa_resp(void *priv, u16 src_id, u16 msg_id, void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); - struct nbl_resource_ops *res_ops; - struct nbl_channel_ops *chan_ops; struct nbl_chan_ack_info chan_ack; - struct nbl_chan_param_set_eth_loopback *param; + u16 func_id; int err = NBL_CHAN_RESP_OK; + u8 is_vdpa = 0; int ret = 0; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - - param = (struct nbl_chan_param_set_eth_loopback *)data; - ret = NBL_OPS_CALL(res_ops->setup_loopback, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_port_id, param->enable)); - if (ret) - dev_err(dev, "setup loopback adminq failed with ret: %d\n", ret); + func_id = *(u16 *)data; - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_ETH_LOOPBACK, - msg_id, err, NULL, 0); + err = NBL_OPS_CALL(res_ops->check_vf_is_vdpa, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, &is_vdpa)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_CHECK_VF_IS_VDPA, msg_id, + err, &is_vdpa, sizeof(is_vdpa)); ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); if (ret) dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", - ret, NBL_CHAN_MSG_SET_ETH_LOOPBACK); + ret, NBL_CHAN_CHECK_VF_IS_VDPA); +} + +static int nbl_disp_get_vdpa_vf_stats(void *priv, u16 func_id, struct nbl_vf_stats *vf_stats) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = false; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->get_vdpa_vf_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, vf_stats)); + return ret; +} + +static int nbl_disp_chan_get_vdpa_vf_stats_req(void *priv, u16 func_id, + struct nbl_vf_stats *vf_stats) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_VDPA_VF_STATS, + &func_id, sizeof(func_id), vf_stats, sizeof(*vf_stats), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static struct sk_buff *nbl_disp_clean_rx_lb_test(void *priv, u32 ring_index) +static void nbl_disp_chan_get_vdpa_vf_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + u16 func_id; + struct nbl_vf_stats vf_stats = {0}; + int err = NBL_CHAN_RESP_OK; - return NBL_OPS_CALL(res_ops->clean_rx_lb_test, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); + func_id = *(u16 *)data; + + err = NBL_OPS_CALL(res_ops->get_vdpa_vf_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, &vf_stats)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VDPA_VF_STATS, msg_id, + err, &vf_stats, sizeof(vf_stats)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static u32 nbl_disp_check_active_vf(void *priv) +static int nbl_disp_get_uvn_pkt_drop_stats(void *priv, u16 vsi_id, + u16 num_queues, u32 *uvn_stat_pkt_drop) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; - return NBL_OPS_CALL(res_ops->check_active_vf, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0)); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_uvn_pkt_drop_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, num_queues, uvn_stat_pkt_drop); + return ret; } -static u32 nbl_disp_chan_check_active_vf_req(void *priv) +static int nbl_disp_chan_get_uvn_pkt_drop_stats_req(void *priv, u16 vsi_id, u16 num_queues, + u32 *uvn_stat_pkt_drop) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct device *dev = NBL_DISP_MGT_TO_DEV(disp_mgt); - u32 active_vf_num = 0; - int ret; - struct nbl_chan_send_info chan_send; - struct nbl_common_info *common; - - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_uvn_pkt_drop_stats param = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CHECK_ACTIVE_VF, NULL, 0, - &active_vf_num, sizeof(active_vf_num), 1); - ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - if (ret) - dev_err(dev, "channel check active vf send msg failed with ret: %d\n", ret); + param.vsi_id = vsi_id; + param.num_queues = num_queues; - return active_vf_num; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_GET_UVN_PKT_DROP_STATS, + ¶m, sizeof(param), + uvn_stat_pkt_drop, num_queues * sizeof(*uvn_stat_pkt_drop), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_check_active_vf_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_get_uvn_pkt_drop_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); - struct nbl_resource_ops *res_ops; - struct nbl_channel_ops *chan_ops; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_uvn_pkt_drop_stats *param = {0}; struct nbl_chan_ack_info chan_ack; - u32 active_vf_num; + u32 *uvn_stat_pkt_drop = NULL; int err = NBL_CHAN_RESP_OK; - int ret = 0; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_get_uvn_pkt_drop_stats *)data; + uvn_stat_pkt_drop = kcalloc(param->num_queues, sizeof(*uvn_stat_pkt_drop), GFP_KERNEL); + if (!uvn_stat_pkt_drop) { + err = -ENOMEM; + goto send_ack; + } - active_vf_num = NBL_OPS_CALL(res_ops->check_active_vf, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id)); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_uvn_pkt_drop_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->num_queues, uvn_stat_pkt_drop); +send_ack: + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_GET_UVN_PKT_DROP_STATS, msg_id, + err, uvn_stat_pkt_drop, param->num_queues * sizeof(*uvn_stat_pkt_drop)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CHECK_ACTIVE_VF, - msg_id, err, &active_vf_num, sizeof(active_vf_num)); - ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); - if (ret) - dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", - ret, NBL_CHAN_MSG_SET_ETH_LOOPBACK); + kfree(uvn_stat_pkt_drop); } -static u32 nbl_disp_get_adminq_tx_buf_size(void *priv) +static int nbl_disp_get_ustore_pkt_drop_stats(void *priv) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; - return chan_ops->get_adminq_tx_buf_size(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt)); + ret = NBL_OPS_CALL(res_ops->get_ustore_pkt_drop_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; } -static bool nbl_disp_get_product_flex_cap(void *priv, enum nbl_flex_cap_type cap_type) +static int nbl_disp_chan_get_ustore_pkt_drop_stats_req(void *priv) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - bool has_cap = false; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - has_cap = NBL_OPS_CALL(res_ops->get_product_flex_cap, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - cap_type)); - return has_cap; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_GET_USTORE_PKT_DROP_STATS, + NULL, 0, NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static bool nbl_disp_chan_get_product_flex_cap_req(void *priv, enum nbl_flex_cap_type cap_type) +static void nbl_disp_chan_get_ustore_pkt_drop_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_send_info chan_send = {0}; - struct nbl_common_info *common; - bool has_cap = false; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + err = NBL_OPS_CALL(res_ops->get_ustore_pkt_drop_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_GET_USTORE_PKT_DROP_STATS, msg_id, + err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_PRODUCT_FLEX_CAP, &cap_type, - sizeof(cap_type), &has_cap, sizeof(has_cap), 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +static int nbl_disp_get_ustore_total_pkt_drop_stats(void *priv, u8 eth_id, + struct nbl_ustore_stats *ustore_stats) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; - return has_cap; + ret = NBL_OPS_CALL(res_ops->get_ustore_total_pkt_drop_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, ustore_stats)); + + return ret; } -static void nbl_disp_chan_get_product_flex_cap_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static int nbl_disp_chan_get_ustore_total_pkt_drop_stats_req(void *priv, u8 eth_id, + struct nbl_ustore_stats *ustore_stats) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - enum nbl_flex_cap_type *cap_type = (enum nbl_flex_cap_type *)data; - struct nbl_chan_ack_info chan_ack = {0}; - bool has_cap = false; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - has_cap = NBL_OPS_CALL(res_ops->get_product_flex_cap, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *cap_type)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PRODUCT_FLEX_CAP, msg_id, - NBL_CHAN_RESP_OK, &has_cap, sizeof(has_cap)); - chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_GET_USTORE_TOTAL_PKT_DROP_STATS, + ð_id, sizeof(eth_id), ustore_stats, sizeof(*ustore_stats), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static bool nbl_disp_get_product_fix_cap(void *priv, enum nbl_fix_cap_type cap_type) +static void nbl_disp_chan_get_ustore_total_pkt_drop_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - bool has_cap = false; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + u8 eth_id; + struct nbl_ustore_stats ustore_stats = {0}; + int err = NBL_CHAN_RESP_OK; - has_cap = NBL_OPS_CALL(res_ops->get_product_fix_cap, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - cap_type)); - return has_cap; + eth_id = *(u8 *)data; + + err = NBL_OPS_CALL(res_ops->get_ustore_total_pkt_drop_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &ustore_stats)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_GET_USTORE_TOTAL_PKT_DROP_STATS, msg_id, + err, &ustore_stats, sizeof(ustore_stats)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_get_mbx_irq_num(void *priv) +static void nbl_disp_chan_register_func_mac_req(void *priv, u8 *mac, u16 func_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_register_func_mac param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - return NBL_OPS_CALL(res_ops->get_mbx_irq_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + param.func_id = func_id; + ether_addr_copy(param.mac, mac); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REGISTER_FUNC_MAC, ¶m, sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static int nbl_disp_get_adminq_irq_num(void *priv) +static void nbl_disp_chan_register_func_mac_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_register_func_mac *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; - return NBL_OPS_CALL(res_ops->get_adminq_irq_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + param = (struct nbl_chan_param_register_func_mac *)data; + NBL_OPS_CALL(res_ops->register_func_mac, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->mac, param->func_id)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_MAC, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_get_abnormal_irq_num(void *priv) +static int nbl_disp_register_func_trust(void *priv, u16 func_id, + bool trusted, bool *should_notify) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->get_abnormal_irq_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return NBL_OPS_CALL(res_ops->register_func_trust, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, + trusted, should_notify)); } -static void nbl_disp_clear_flow(void *priv, u16 vsi_id) +static int nbl_disp_chan_register_func_trust_req(void *priv, u16 func_id, + bool trusted, bool *should_notify) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_register_trust param; + bool result; + int ret; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_flow, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + param.func_id = func_id; + param.trusted = trusted; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REGISTER_FUNC_TRUST, ¶m, sizeof(param), + &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (!ret) + *should_notify = result; + + return ret; } -static void nbl_disp_clear_queues(void *priv, u16 vsi_id) +static void nbl_disp_chan_register_func_trust_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_register_trust *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + bool notify = false; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_queues, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + param = (struct nbl_chan_param_register_trust *)data; + ret = NBL_OPS_CALL(res_ops->register_func_trust, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, + param->trusted, ¬ify)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_TRUST, + msg_id, ret, ¬ify, sizeof(notify)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static u16 nbl_disp_get_vsi_global_qid(void *priv, u16 vsi_id, u16 local_qid) +static int nbl_disp_register_func_vlan(void *priv, u16 func_id, u16 vlan_tci, + u16 vlan_proto, bool *should_notify) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->get_vsi_global_queue_id, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, local_qid)); + return NBL_OPS_CALL(res_ops->register_func_vlan, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, vlan_tci, + vlan_proto, should_notify)); } -static u16 -nbl_disp_chan_get_vsi_global_qid_req(void *priv, u16 vsi_id, u16 local_qid) +static int nbl_disp_chan_register_func_vlan_req(void *priv, u16 func_id, u16 vlan_tci, + u16 vlan_proto, bool *should_notify) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops; - struct nbl_chan_vsi_qid_info param = {0}; - struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_register_vlan param; + bool result; + int ret; - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param.func_id = func_id; + param.vlan_tci = vlan_tci; + param.vlan_proto = vlan_proto; - param.vsi_id = vsi_id; - param.local_qid = local_qid; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REGISTER_FUNC_VLAN, ¶m, sizeof(param), + &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (!ret) + *should_notify = result; - NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_GET_VSI_GLOBAL_QUEUE_ID, - ¶m, sizeof(param), NULL, 0, 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + return ret; } -static void -nbl_disp_chan_get_vsi_global_qid_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_register_func_vlan_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - struct nbl_channel_ops *chan_ops; - struct nbl_chan_vsi_qid_info *param; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_register_vlan *param; struct nbl_chan_ack_info chan_ack; - u16 global_qid; + int ret = NBL_CHAN_RESP_OK; + bool notify = false; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_register_vlan *)data; + ret = NBL_OPS_CALL(res_ops->register_func_vlan, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, + param->vlan_tci, param->vlan_proto, ¬ify)); - param = (struct nbl_chan_vsi_qid_info *)data; - global_qid = NBL_OPS_CALL(res_ops->get_vsi_global_queue_id, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - param->vsi_id, param->local_qid)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VSI_GLOBAL_QUEUE_ID, - msg_id, global_qid, NULL, 0); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_VLAN, + msg_id, ret, ¬ify, sizeof(notify)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void -nbl_disp_chan_get_board_info_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static int nbl_disp_register_func_rate(void *priv, u16 func_id, int rate) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - struct nbl_channel_ops *chan_ops; - struct nbl_chan_ack_info chan_ack; - struct nbl_board_port_info board_info = {0}; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_board_info, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &board_info)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_BOARD_INFO, - msg_id, 0, &board_info, sizeof(board_info)); - chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + return NBL_OPS_CALL(res_ops->register_func_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, rate)); } -static int nbl_disp_get_port_attributes(void *priv) +static int nbl_disp_chan_register_func_rate_req(void *priv, u16 func_id, int tx_rate) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - int ret = 0; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_set_tx_rate param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - ret = NBL_OPS_CALL(res_ops->get_port_attributes, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); - if (ret) - dev_err(dev, "get port attributes failed with ret: %d\n", ret); + param.func_id = func_id; + param.tx_rate = tx_rate; - return ret; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REGISTER_FUNC_RATE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static int nbl_disp_update_ring_num(void *priv) +static void nbl_disp_chan_register_func_rate_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_tx_rate *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; - return NBL_OPS_CALL(res_ops->update_ring_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + param = (struct nbl_chan_param_set_tx_rate *)data; + ret = NBL_OPS_CALL(res_ops->register_func_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, param->tx_rate)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_RATE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_set_ring_num(void *priv, struct nbl_fw_cmd_ring_num_param *param) +static int nbl_disp_set_tx_rate(void *priv, u16 func_id, int tx_rate, int burst) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->set_ring_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); + return NBL_OPS_CALL(res_ops->set_tx_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, tx_rate, burst)); } -static int nbl_disp_enable_port(void *priv, bool enable) +static int nbl_disp_chan_set_tx_rate_req(void *priv, u16 func_id, int tx_rate, int burst) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - int ret = 0; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_set_txrx_rate param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - ret = NBL_OPS_CALL(res_ops->enable_port, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), enable)); - if (ret) - dev_err(dev, "enable port failed with ret: %d\n", ret); + param.func_id = func_id; + param.txrx_rate = tx_rate; + param.burst = burst; - return ret; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_TX_RATE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_recv_port_notify_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_set_tx_rate_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_txrx_rate *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - res_ops->recv_port_notify(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data); + param = (struct nbl_chan_param_set_txrx_rate *)data; + ret = NBL_OPS_CALL(res_ops->set_tx_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, + param->txrx_rate, param->burst)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_TX_RATE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_get_port_state(void *priv, u8 eth_id, - struct nbl_port_state *port_state) +static int nbl_disp_set_rx_rate(void *priv, u16 func_id, int rx_rate, int burst) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - int ret = 0; - ret = NBL_OPS_CALL(res_ops->get_port_state, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, port_state)); - return ret; + return NBL_OPS_CALL(res_ops->set_rx_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, rx_rate, burst)); } -static int nbl_disp_chan_get_port_state_req(void *priv, u8 eth_id, - struct nbl_port_state *port_state) +static int nbl_disp_chan_set_rx_rate_req(void *priv, u16 func_id, int rx_rate, int burst) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_common_info *common; + struct nbl_chan_param_set_txrx_rate param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + param.func_id = func_id; + param.txrx_rate = rx_rate; + param.burst = burst; - NBL_CHAN_SEND(chan_send, common->mgt_pf, - NBL_CHAN_MSG_GET_PORT_STATE, ð_id, sizeof(eth_id), - port_state, sizeof(*port_state), 1); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_RX_RATE, ¶m, sizeof(param), NULL, 0, 1); return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_get_port_state_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_set_rx_rate_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - struct nbl_channel_ops *chan_ops; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_txrx_rate *param; struct nbl_chan_ack_info chan_ack; - int err = NBL_CHAN_RESP_OK; - struct nbl_port_state info = {0}; - int ret = 0; - u8 eth_id; - - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + int ret = NBL_CHAN_RESP_OK; - eth_id = *(u8 *)data; - ret = NBL_OPS_CALL(res_ops->get_port_state, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &info)); - if (ret) - err = NBL_CHAN_RESP_ERR; + param = (struct nbl_chan_param_set_txrx_rate *)data; + ret = NBL_OPS_CALL(res_ops->set_rx_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, + param->txrx_rate, param->burst)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PORT_STATE, msg_id, err, - &info, sizeof(info)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_RX_RATE, msg_id, ret, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_set_port_advertising(void *priv, - struct nbl_port_advertising *port_advertising) +static int nbl_disp_register_func_link_forced(void *priv, u16 func_id, u8 link_forced, + bool *should_notify) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - int ret = 0; - ret = NBL_OPS_CALL(res_ops->set_port_advertising, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), port_advertising)); - return ret; + return NBL_OPS_CALL(res_ops->register_func_link_forced, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, + link_forced, should_notify)); } -static int nbl_disp_chan_set_port_advertising_req(void *priv, - struct nbl_port_advertising *port_advertising) +static int nbl_disp_chan_register_func_link_forced_req(void *priv, u16 func_id, u8 link_forced, + bool *should_notify) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_common_info *common; + struct nbl_chan_param_register_func_link_forced param; + struct nbl_chan_param_register_func_link_forced result; + int ret = 0; - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + param.func_id = func_id; + param.link_forced = link_forced; - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_PORT_ADVERTISING, - port_advertising, sizeof(*port_advertising), - NULL, 0, 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REGISTER_FUNC_LINK_FORCED, ¶m, sizeof(param), + &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + return ret; + + *should_notify = result.should_notify; + return 0; } -static void nbl_disp_chan_set_port_advertising_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_register_func_link_forced_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - struct nbl_channel_ops *chan_ops; - struct nbl_port_advertising *param; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_register_func_link_forced *param; + struct nbl_chan_param_register_func_link_forced result = {0}; struct nbl_chan_ack_info chan_ack; - int err = NBL_CHAN_RESP_OK; - int ret = 0; - - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - - param = (struct nbl_port_advertising *)data; + int ret = NBL_CHAN_RESP_OK; - ret = res_ops->set_port_advertising(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); - if (ret) - err = NBL_CHAN_RESP_ERR; + param = (struct nbl_chan_param_register_func_link_forced *)data; + ret = NBL_OPS_CALL(res_ops->register_func_link_forced, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->func_id, param->link_forced, &result.should_notify)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_PORT_ADVERTISING, msg_id, err, NULL, 0); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_LINK_FORCED, + msg_id, ret, &result, sizeof(result)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_get_module_info(void *priv, u8 eth_id, struct ethtool_modinfo *info) +static int nbl_disp_get_link_forced(void *priv, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return res_ops->get_module_info(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, info); + return NBL_OPS_CALL(res_ops->get_link_forced, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); } -static int nbl_disp_chan_get_module_info_req(void *priv, u8 eth_id, struct ethtool_modinfo *info) +static int nbl_disp_chan_get_link_forced_req(void *priv, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_common_info *common; + int link_forced = 0; - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_LINK_FORCED, &vsi_id, sizeof(vsi_id), + &link_forced, sizeof(link_forced), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - NBL_CHAN_SEND(chan_send, common->mgt_pf, - NBL_CHAN_MSG_GET_MODULE_INFO, ð_id, - sizeof(eth_id), info, sizeof(*info), 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + return link_forced; } -static void nbl_disp_chan_get_module_info_resp(void *priv, u16 src_id, u16 msg_id, +static void nbl_disp_chan_get_link_forced_resp(void *priv, u16 src_id, u16 msg_id, void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - struct nbl_channel_ops *chan_ops; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_ack_info chan_ack; - int err = NBL_CHAN_RESP_OK; - struct ethtool_modinfo info; int ret = 0; - u8 eth_id; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - - eth_id = *(u8 *)data; - - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_module_info, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &info); - if (ret) - err = NBL_CHAN_RESP_ERR; + ret = NBL_OPS_CALL(res_ops->get_link_forced, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u16 *)data)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MODULE_INFO, msg_id, err, - &info, sizeof(info)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_LINK_FORCED, + msg_id, NBL_CHAN_RESP_OK, &ret, sizeof(ret)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_get_module_eeprom(void *priv, u8 eth_id, - struct ethtool_eeprom *eeprom, u8 *data) +static void nbl_disp_get_driver_version(void *priv, char *ver, int len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return res_ops->get_module_eeprom(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, eeprom, data); + NBL_OPS_CALL(res_ops->get_driver_version, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ver, len)); } -static int nbl_disp_chan_get_module_eeprom_req(void *priv, u8 eth_id, - struct ethtool_eeprom *eeprom, u8 *data) +static void nbl_disp_setup_rdma_id(void *priv) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_get_module_eeprom param = {0}; - struct nbl_chan_send_info chan_send = {0}; - struct nbl_common_info *common; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + NBL_OPS_CALL(res_ops->setup_rdma_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} - param.eth_id = eth_id; - memcpy(¶m.eeprom, eeprom, sizeof(struct ethtool_eeprom)); +static void nbl_disp_remove_rdma_id(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_MODULE_EEPROM, ¶m, - sizeof(param), data, eeprom->len, 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + NBL_OPS_CALL(res_ops->remove_rdma_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); } -static void nbl_disp_chan_get_module_eeprom_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static int nbl_disp_get_max_mtu(void *priv) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); struct nbl_resource_ops *res_ops; - struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_get_module_eeprom *param; - struct nbl_chan_ack_info chan_ack; - int err = NBL_CHAN_RESP_OK; - u8 eth_id; - struct ethtool_eeprom *eeprom; - u8 *recv_data; int ret = 0; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - - param = (struct nbl_chan_param_get_module_eeprom *)data; - eth_id = param->eth_id; - eeprom = ¶m->eeprom; - recv_data = kmalloc(eeprom->len, GFP_ATOMIC); - if (!recv_data) { - dev_err(dev, "Allocate memory to store module eeprom failed\n"); - return; - } - - ret = res_ops->get_module_eeprom(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - eth_id, eeprom, recv_data); - if (ret) { - err = NBL_CHAN_RESP_ERR; - dev_err(dev, "Get module eeprom failed with ret: %d\n", ret); - } - - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MODULE_EEPROM, msg_id, err, - recv_data, eeprom->len); - ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); - if (ret) - dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d, src_id: %d\n", - ret, NBL_CHAN_MSG_GET_MODULE_EEPROM, src_id); - kfree(recv_data); + ret = NBL_OPS_CALL(res_ops->get_max_mtu, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; } -static int nbl_disp_get_link_state(void *priv, u8 eth_id, struct nbl_eth_link_info *eth_link_info) +static int nbl_disp_set_mtu(void *priv, u16 vsi_id, u16 mtu) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; int ret = 0; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - - /* if donot have res_ops->get_link_state(), default eth is up */ - if (res_ops->get_link_state) - ret = res_ops->get_link_state(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - eth_id, eth_link_info); - else - eth_link_info->link_status = 1; - + ret = NBL_OPS_CALL(res_ops->set_mtu, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, mtu)); return ret; } -static int nbl_disp_chan_get_link_state_req(void *priv, u8 eth_id, - struct nbl_eth_link_info *eth_link_info) +static int nbl_disp_chan_set_mtu_req(void *priv, u16 vsi_id, u16 mtu) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops; - struct nbl_chan_send_info chan_send; - struct nbl_common_info *common; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_set_mtu param = {0}; - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + param.mtu = mtu; + param.vsi_id = vsi_id; - NBL_CHAN_SEND(chan_send, common->mgt_pf, - NBL_CHAN_MSG_GET_LINK_STATE, ð_id, - sizeof(eth_id), eth_link_info, sizeof(*eth_link_info), 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_MTU_SET, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), + &chan_send); } -static void nbl_disp_chan_get_link_state_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_set_mtu_resp(void *priv, + u16 src_id, u16 msg_id, void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - struct nbl_channel_ops *chan_ops; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_mtu *param = NULL; int err = NBL_CHAN_RESP_OK; - u8 eth_id; - struct nbl_eth_link_info eth_link_info = {0}; - int ret = 0; - - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - eth_id = *(u8 *)data; - ret = res_ops->get_link_state(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - eth_id, ð_link_info); - if (ret) - err = NBL_CHAN_RESP_ERR; + param = (struct nbl_chan_param_set_mtu *)data; + err = NBL_OPS_CALL(res_ops->set_mtu, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->mtu)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_LINK_STATE, msg_id, err, - ð_link_info, sizeof(eth_link_info)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_MTU_SET, msg_id, err, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void nbl_disp_get_reg_dump(void *priv, u32 *data, u32 len) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - - NBL_OPS_CALL(res_ops->get_reg_dump, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data, len)); -} - -static void nbl_disp_chan_get_reg_dump_req(void *priv, u32 *data, u32 len) +static int nbl_disp_chan_get_fd_flow_req(void *priv, u16 vsi_id, u32 location, + enum nbl_chan_fdir_rule_type rule_type, + struct nbl_chan_param_fdir_replace *cmd) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_send_info chan_send; - struct nbl_common_info *common; - u32 *result = NULL; - - result = kmalloc(len, GFP_KERNEL); - if (!result) - return; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_fd_flow param = {0}; + int ret = 0; - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + param.vsi_id = vsi_id; + param.location = location; + param.rule_type = rule_type; - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_REG_DUMP, &len, sizeof(len), - result, len, 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_FD_FLOW, ¶m, + sizeof(param), cmd, NBL_CHAN_FDIR_FLOW_RULE_SIZE, 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + return ret; - memcpy(data, result, len); - kfree(result); + return 0; } -static void nbl_disp_chan_get_reg_dump_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_get_fd_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_fd_flow *param = NULL; + struct nbl_chan_param_fdir_replace *result; struct nbl_chan_ack_info chan_ack; - int err = NBL_CHAN_RESP_OK; - u32 *result = NULL; - u32 len = 0; - - len = *(u32 *)data; - result = kmalloc(len, GFP_KERNEL); - if (!result) - return; - - NBL_OPS_CALL(res_ops->get_reg_dump, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), result, len)); + int ret = 0; - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REG_DUMP, msg_id, err, result, len); + result = kzalloc(NBL_CHAN_FDIR_FLOW_RULE_SIZE, GFP_KERNEL); + if (!result) { + ret = -ENOMEM; + goto send_ack; + } + param = (struct nbl_chan_param_get_fd_flow *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->location, + param->rule_type, result); +send_ack: + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW, msg_id, + ret, result, sizeof(*result) + result->tlv_length); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); kfree(result); } -static int nbl_disp_get_reg_dump_len(void *priv) +static int nbl_disp_get_fd_flow(void *priv, u16 vsi_id, u32 location, + enum nbl_chan_fdir_rule_type rule_type, + struct nbl_chan_param_fdir_replace *cmd) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->get_reg_dump_len, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, location, + rule_type, cmd); } -static int nbl_disp_chan_get_reg_dump_len_req(void *priv) +static int nbl_disp_chan_get_fd_flow_cnt_req(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_send_info chan_send; - struct nbl_common_info *common; - int result = 0; - - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_fdir_flowcnt param; + int result = 0, ret = 0; - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_REG_DUMP_LEN, NULL, 0, - &result, sizeof(result), 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + param.rule_type = rule_type; + param.vsi = vsi_id; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_FD_FLOW_CNT, ¶m, + sizeof(param), &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + return ret; return result; } -static void nbl_disp_chan_get_reg_dump_len_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_get_fd_flow_cnt_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_ack_info chan_ack; - int err = NBL_CHAN_RESP_OK; - int result = 0; - - result = NBL_OPS_CALL(res_ops->get_reg_dump_len, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + struct nbl_chan_param_fdir_flowcnt *param; + int result = 0, err = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_fdir_flowcnt *)data; + result = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_cnt, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->rule_type, param->vsi); + if (result < 0) { + err = result; + result = 0; + } - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REG_DUMP_LEN, msg_id, err, - &result, sizeof(result)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW_CNT, msg_id, + err, &result, sizeof(result)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_set_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) +static int nbl_disp_get_fd_flow_cnt(void *priv, enum nbl_chan_fdir_rule_type rule_type, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->set_eth_mac_addr, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac, eth_id)); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_cnt, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rule_type, vsi_id); } -static int nbl_disp_chan_set_eth_mac_addr_req(void *priv, u8 *mac, u8 eth_id) +static int nbl_disp_chan_get_fd_flow_all_req(void *priv, + struct nbl_chan_param_get_fd_flow_all *param, + u32 *rule_locs) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_set_eth_mac_addr param; - struct nbl_chan_send_info chan_send; - struct nbl_common_info *common; - - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - - memcpy(param.mac, mac, sizeof(param.mac)); - param.eth_id = eth_id; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_result_get_fd_flow_all *result = NULL; + int ret = 0; - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_ETH_MAC_ADDR, - ¶m, sizeof(param), NULL, 0, 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + result = (struct nbl_chan_result_get_fd_flow_all *)rule_locs; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_FD_FLOW_ALL, param, + sizeof(*param), result, sizeof(*result), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + goto send_fail; +send_fail: + return ret; } -static void nbl_disp_chan_set_eth_mac_addr_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_get_fd_flow_all_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); - struct nbl_resource_ops *res_ops; - struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_set_eth_mac_addr *param; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_fd_flow_all *param = NULL; + struct nbl_chan_result_get_fd_flow_all *result = NULL; struct nbl_chan_ack_info chan_ack; - int err = NBL_CHAN_RESP_OK; int ret = 0; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - param = (struct nbl_chan_param_set_eth_mac_addr *)data; + result = kzalloc(sizeof(*result), GFP_KERNEL); + if (!result) { + ret = -ENOMEM; + goto send_ack; + } - ret = NBL_OPS_CALL(res_ops->set_eth_mac_addr, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->mac, param->eth_id)); - if (ret) - err = NBL_CHAN_RESP_ERR; + param = (struct nbl_chan_param_get_fd_flow_all *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_all, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, result->rule_locs); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_ETH_MAC_ADDR, msg_id, err, NULL, 0); - ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); - if (ret) - dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", - ret, NBL_CHAN_MSG_SET_ETH_MAC_ADDR); +send_ack: + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW_ALL, msg_id, + ret, result, sizeof(*result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + + kfree(result); } -static u32 nbl_disp_get_chip_temperature(void *priv) +static int nbl_disp_get_fd_flow_all(void *priv, struct nbl_chan_param_get_fd_flow_all *param, + u32 *rule_locs) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return res_ops->get_chip_temperature(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_all, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, rule_locs); } -static u32 nbl_disp_chan_get_chip_temperature_req(void *priv) +static int nbl_disp_chan_get_fd_flow_max_req(void *priv) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_common_info *common; - u32 chip_tempetature = 0; - - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + int ret = 0, result = 0; - NBL_CHAN_SEND(chan_send, common->mgt_pf, - NBL_CHAN_MSG_GET_CHIP_TEMPERATURE, NULL, 0, - &chip_tempetature, sizeof(chip_tempetature), 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_FD_FLOW_MAX, NULL, 0, &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + return ret; - return chip_tempetature; + return result; } -static void nbl_disp_chan_get_chip_temperature_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_get_fd_flow_max_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_ack_info chan_ack; - int ret = NBL_CHAN_RESP_OK; - u32 chip_tempetature = 0; + int result = 0, err = NBL_CHAN_RESP_OK; - chip_tempetature = NBL_OPS_CALL(res_ops->get_chip_temperature, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_CHIP_TEMPERATURE, msg_id, - ret, &chip_tempetature, sizeof(chip_tempetature)); + result = NBL_OPS_CALL(res_ops->get_fd_flow_max, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + if (result < 0) { + err = result; + result = 0; + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW_MAX, msg_id, + err, &result, sizeof(result)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static u32 nbl_disp_get_chip_temperature_max(void *priv) +static int nbl_disp_get_fd_flow_max(void *priv) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return res_ops->get_chip_temperature_max(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + return NBL_OPS_CALL(res_ops->get_fd_flow_max, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_chan_replace_fd_flow_req(void *priv, struct nbl_chan_param_fdir_replace *info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REPLACE_FD_FLOW, info, + sizeof(struct nbl_chan_param_fdir_replace) + info->tlv_length, NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static u32 nbl_disp_get_chip_temperature_crit(void *priv) +static void nbl_disp_chan_replace_fd_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_fdir_replace *param = NULL; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + param = (struct nbl_chan_param_fdir_replace *)data; - return res_ops->get_chip_temperature_crit(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->replace_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REPLACE_FD_FLOW, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_get_module_temperature(void *priv, u8 eth_id, - enum nbl_module_temp_type type) +static int nbl_disp_replace_fd_flow(void *priv, struct nbl_chan_param_fdir_replace *info) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->get_module_temperature, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, type)); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->replace_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), info); } -static int nbl_disp_chan_get_module_temperature_req(void *priv, u8 eth_id, - enum nbl_module_temp_type type) +static int nbl_disp_chan_remove_fd_flow_req(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u32 loc, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - int module_temp; - struct nbl_chan_param_get_module_tempetature param = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_param_fdir_del param = {0}; struct nbl_chan_send_info chan_send = {0}; - struct nbl_common_info *common; - - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - param.eth_id = eth_id; - param.type = type; - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_MODULE_TEMPERATURE, - ¶m, sizeof(param), &module_temp, sizeof(module_temp), 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + param.rule_type = rule_type; + param.location = loc; + param.vsi = vsi_id; - return module_temp; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REMOVE_FD_FLOW, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_get_module_temperature_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_remove_fd_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - int module_temp; - struct nbl_chan_param_get_module_tempetature *param; + struct nbl_chan_param_fdir_del *param = NULL; struct nbl_chan_ack_info chan_ack; - int ret = NBL_CHAN_RESP_OK; + int ret = 0; - param = (struct nbl_chan_param_get_module_tempetature *)data; - module_temp = NBL_OPS_CALL(res_ops->get_module_temperature, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - param->eth_id, param->type)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MODULE_TEMPERATURE, msg_id, - ret, &module_temp, sizeof(module_temp)); + param = (struct nbl_chan_param_fdir_del *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->rule_type, + param->location, param->vsi); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_FD_FLOW, msg_id, ret, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_process_abnormal_event(void *priv, struct nbl_abnormal_event_info *abnomal_info) +static int nbl_disp_remove_fd_flow(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u32 loc, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return res_ops->process_abnormal_event(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), abnomal_info); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rule_type, loc, vsi_id); } -static void nbl_disp_adapt_desc_gother(void *priv) +static int nbl_disp_chan_config_fd_flow_state_req(void *priv, + enum nbl_chan_fdir_rule_type rule_type, + u16 vsi_id, u16 state) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_param_config_fd_flow_state param = {0}; + struct nbl_chan_send_info chan_send = {0}; - NBL_OPS_CALL(res_ops->adapt_desc_gother, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + param.rule_type = rule_type; + param.vsi_id = vsi_id; + param.state = state; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_CFG_FD_FLOW_STATE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_flr_clear_net(void *priv, u16 vf_id) +static void nbl_disp_chan_config_fd_flow_state_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_config_fd_flow_state *param = NULL; + struct nbl_chan_ack_info chan_ack; + int ret = 0; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_net, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); + param = (struct nbl_chan_param_config_fd_flow_state *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->config_fd_flow_state, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->rule_type, + param->vsi_id, param->state); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_FD_FLOW_STATE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void nbl_disp_flr_clear_queues(void *priv, u16 vf_id) +static int nbl_disp_config_fd_flow_state(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u16 vsi_id, u16 state) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_queues, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->config_fd_flow_state, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rule_type, vsi_id, state); } -static void nbl_disp_flr_clear_flows(void *priv, u16 vf_id) +static void nbl_disp_cfg_fd_update_event(void *priv, bool enable) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_flows, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_fd_update_event, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), enable); } -static void nbl_disp_flr_clear_interrupt(void *priv, u16 vf_id) +static void nbl_disp_cfg_mirror_outputport_event(void *priv, bool enable) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_interrupt, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_mirror_outputport_event, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), enable); } -static void nbl_disp_unmask_all_interrupts(void *priv) +static void nbl_disp_dump_fd_flow(void *priv, struct seq_file *m) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unmask_all_interrupts, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->dump_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m); } -static void nbl_disp_keep_alive_req(void *priv) +static void nbl_disp_chan_get_xdp_queue_info_req(void *priv, u16 *queue_num, u16 *queue_size, + u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_queue_info result = {0}; struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_KEEP_ALIVE, - NULL, 0, NULL, 0, 1); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_XDP_QUEUE_INFO, + &vsi_id, sizeof(vsi_id), &result, sizeof(result), 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (!chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send)) { + *queue_num = result.queue_num; + *queue_size = result.queue_size; + } } -static void nbl_disp_chan_keep_alive_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_get_xdp_queue_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_get_queue_info result = {0}; + int ret = NBL_CHAN_RESP_OK; - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_KEEP_ALIVE, msg_id, - 0, NULL, 0); - + NBL_OPS_CALL(res_ops->get_xdp_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &result.queue_num, + &result.queue_size, *(u16 *)data)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_XDP_QUEUE_INFO, msg_id, + ret, &result, sizeof(result)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void nbl_disp_chan_get_user_queue_info_req(void *priv, u16 *queue_num, u16 *queue_size, - u16 vsi_id) +static void nbl_disp_get_xdp_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_param_get_queue_info result = {0}; - struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_USER_QUEUE_INFO, - &vsi_id, sizeof(vsi_id), &result, sizeof(result), 1); + NBL_OPS_CALL(res_ops->get_xdp_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), queue_num, queue_size, vsi_id)); +} - if (!chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send)) { - *queue_num = result.queue_num; - *queue_size = result.queue_size; - } +static void nbl_disp_set_hw_status(void *priv, enum nbl_hw_status hw_status) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_hw_status, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), hw_status); +} + +static void nbl_disp_get_active_func_bitmaps(void *priv, unsigned long *bitmap, int max_func) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_active_func_bitmaps, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), bitmap, max_func); +} + +static int nbl_disp_set_tc_wgt(void *priv, u16 vsi_id, u8 *weight, u8 num_tc) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_tc_wgt, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, weight, num_tc); } -static void nbl_disp_chan_get_user_queue_info_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static int nbl_disp_chan_set_tc_wgt_req(void *priv, u16 vsi_id, u8 *weight, u8 num_tc) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_ack_info chan_ack; - struct nbl_chan_param_get_queue_info result = {0}; - int ret = NBL_CHAN_RESP_OK; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_set_tc_wgt param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - NBL_OPS_CALL(res_ops->get_user_queue_info, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &result.queue_num, - &result.queue_size, *(u16 *)data)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_USER_QUEUE_INFO, msg_id, - ret, &result, sizeof(result)); - chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + param.vsi_id = vsi_id; + param.num_tc = num_tc; + memcpy(param.weight, weight, num_tc); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_TC_WGT, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_get_user_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +static void nbl_disp_chan_set_tc_wgt_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_tc_wgt *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; - NBL_OPS_CALL(res_ops->get_user_queue_info, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), queue_num, queue_size, vsi_id)); + param = (struct nbl_chan_param_set_tc_wgt *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_tc_wgt, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->weight, param->num_tc); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_TC_WGT, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_ctrl_port_led(void *priv, u8 eth_id, - enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg) +static int nbl_disp_configure_rdma_bw(void *priv, u8 eth_id, int rdma_bw) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->ctrl_port_led, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, led_ctrl, led_reg)); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_rdma_bw, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, rdma_bw); } -static int nbl_disp_chan_ctrl_port_led_req(void *priv, u8 eth_id, - enum nbl_led_reg_ctrl led_ctrl, - u32 *led_reg) +static int nbl_disp_chan_configure_rdma_bw_req(void *priv, u8 eth_id, int rdma_bw) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_param_ctrl_port_led param = {0}; + struct nbl_chan_param_configure_rdma_bw param; struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); param.eth_id = eth_id; - param.led_status = led_ctrl; - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CTRL_PORT_LED, - ¶m, sizeof(param), NULL, 0, 1); + param.rdma_bw = rdma_bw; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_CONFIGURE_RDMA_BW, ¶m, sizeof(param), NULL, 0, 1); return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_ctrl_port_led_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_configure_rdma_bw_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_configure_rdma_bw *param; struct nbl_chan_ack_info chan_ack; - struct nbl_chan_param_ctrl_port_led *param = {0}; int ret = NBL_CHAN_RESP_OK; - param = (struct nbl_chan_param_ctrl_port_led *)data; - ret = NBL_OPS_CALL(res_ops->ctrl_port_led, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - param->eth_id, param->led_status, NULL)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CTRL_PORT_LED, msg_id, - ret, NULL, 0); + param = (struct nbl_chan_param_configure_rdma_bw *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_rdma_bw, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->rdma_bw); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CONFIGURE_RDMA_BW, msg_id, ret, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_passthrough_fw_cmd(void *priv, struct nbl_passthrough_fw_cmd_param *param, - struct nbl_passthrough_fw_cmd_param *result) +static int nbl_disp_configure_qos(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret; - return NBL_OPS_CALL(res_ops->passthrough_fw_cmd, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, result)); -} + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_qos, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, pfc, + trust, dscp2prio_map); + if (ret) + return ret; -static int nbl_disp_nway_reset(void *priv, u8 eth_id) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_eth_pfc, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, pfc); - return NBL_OPS_CALL(res_ops->nway_reset, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id)); + return ret; } -static int nbl_disp_chan_nway_reset_req(void *priv, u8 eth_id) +static int nbl_disp_chan_configure_qos_req(void *priv, u8 eth_id, u8 *pfc, + u8 trust, u8 *dscp2prio_map) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_configure_qos param; struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_NWAY_RESET, - ð_id, sizeof(eth_id), NULL, 0, 1); + param.eth_id = eth_id; + memcpy(param.pfc, pfc, NBL_MAX_PFC_PRIORITIES); + memcpy(param.dscp2prio_map, dscp2prio_map, NBL_DSCP_MAX); + param.trust = trust; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_CONFIGURE_QOS, ¶m, sizeof(param), NULL, 0, 1); return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_nway_reset_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_configure_qos_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_configure_qos *param; struct nbl_chan_ack_info chan_ack; - u8 *eth_id; int ret = NBL_CHAN_RESP_OK; - eth_id = (u8 *)data; - ret = NBL_OPS_CALL(res_ops->nway_reset, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *eth_id)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_NWAY_RESET, msg_id, - ret, NULL, 0); + param = (struct nbl_chan_param_configure_qos *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_qos, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->pfc, param->trust, param->dscp2prio_map); + if (ret) + goto send_ack; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_eth_pfc, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->pfc); + +send_ack: + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CONFIGURE_QOS, msg_id, ret, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static u16 nbl_disp_get_vf_base_vsi_id(void *priv, u16 func_id) +static int nbl_disp_set_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int xoff, int xon) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret; - return NBL_OPS_CALL(res_ops->get_vf_base_vsi_id, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id)); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_pfc_buffer_size, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, prio, xoff, xon); + + return ret; } -static u16 nbl_disp_chan_get_vf_base_vsi_id_req(void *priv, u16 func_id) +static int nbl_disp_chan_set_pfc_buffer_size_req(void *priv, u8 eth_id, u8 prio, int xoff, int xon) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_set_pfc_buffer_size param; struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - u16 vf_base_vsi_id = 0; - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_VF_BASE_VSI_ID, - NULL, 0, &vf_base_vsi_id, sizeof(vf_base_vsi_id), 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + param.eth_id = eth_id; + param.prio = prio; + param.xoff = xoff; + param.xon = xon; - return vf_base_vsi_id; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_PFC_BUFFER_SIZE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_get_vf_base_vsi_id_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_set_pfc_buffer_size_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_pfc_buffer_size *param; struct nbl_chan_ack_info chan_ack; int ret = NBL_CHAN_RESP_OK; - u16 vf_base_vsi_id; - vf_base_vsi_id = NBL_OPS_CALL(res_ops->get_vf_base_vsi_id, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VF_BASE_VSI_ID, msg_id, - ret, &vf_base_vsi_id, sizeof(vf_base_vsi_id)); + param = (struct nbl_chan_param_set_pfc_buffer_size *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_pfc_buffer_size, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->prio, param->xoff, param->xon); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_PFC_BUFFER_SIZE, msg_id, ret, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static u16 nbl_disp_get_intr_suppress_level(void *priv, u64 pkt_rates, u16 last_level) +static int nbl_disp_get_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->get_intr_suppress_level, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), pkt_rates, last_level)); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_pfc_buffer_size, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, prio, xoff, xon); + + return ret; } -static void nbl_disp_set_intr_suppress_level(void *priv, u16 vector_id, u16 vector_num, u16 level) +static int +nbl_disp_chan_get_pfc_buffer_size_req(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_pfc_buffer_size param = {0}; + struct nbl_chan_param_get_pfc_buffer_size_resp resp; struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + int ret; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_intr_suppress_level, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), common->mgt_pf, - vector_id, vector_num, level); + param.eth_id = eth_id; + param.prio = prio; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_PFC_BUFFER_SIZE, ¶m, sizeof(param), + &resp, sizeof(resp), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + *xoff = resp.xoff; + *xon = resp.xon; + + return ret; } -static void nbl_disp_chan_set_intr_suppress_level_req(void *priv, u16 vector_id, - u16 vector_num, u16 level) +static void nbl_disp_chan_get_pfc_buffer_size_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_set_intr_suppress_level param = {0}; - struct nbl_chan_send_info chan_send = {0}; - struct nbl_common_info *common; - - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - - param.local_vector_id = vector_id; - param.vector_num = vector_num; - param.level = level; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_pfc_buffer_size *param; + struct nbl_chan_param_get_pfc_buffer_size_resp resp; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_INTL_SUPPRESS_LEVEL, - ¶m, sizeof(param), NULL, 0, 0); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + param = (struct nbl_chan_param_get_pfc_buffer_size *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_pfc_buffer_size, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->prio, &resp.xoff, &resp.xon); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PFC_BUFFER_SIZE, msg_id, ret, + &resp, sizeof(resp)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void nbl_disp_chan_set_intr_suppress_level_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static int nbl_disp_set_rate_limit(void *priv, enum nbl_traffic_type type, u32 rate) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops; - struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_set_intr_suppress_level *param; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret; - res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - param = (struct nbl_chan_param_set_intr_suppress_level *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_rate_limit, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, type, rate); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_intr_suppress_level, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, param->local_vector_id, - param->vector_num, param->level); + return ret; } -static int nbl_disp_get_p4_info(void *priv, char *verify_code) +static int +nbl_disp_chan_set_rate_limit_req(void *priv, enum nbl_traffic_type type, u32 rate) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_set_rate_limit param = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - return NBL_OPS_CALL(res_ops->get_p4_info, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), verify_code)); + param.type = type; + param.rate = rate; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_RATE_LIMIT, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static int nbl_disp_load_p4(void *priv, struct nbl_load_p4_param *param) +static void nbl_disp_chan_set_rate_limit_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_rate_limit *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; - return NBL_OPS_CALL(res_ops->load_p4, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); + param = (struct nbl_chan_param_set_rate_limit *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_rate_limit, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + src_id, param->type, param->rate); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_RATE_LIMIT, msg_id, ret, + NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_load_p4_default(void *priv) +static void nbl_disp_register_dev_name(void *priv, u16 vsi_id, char *name) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->load_p4_default, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_dev_name, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, name); } -static int nbl_disp_chan_get_p4_used_req(void *priv) +static void +nbl_disp_chan_register_dev_name_req(void *priv, u16 vsi_id, char *name) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - int p4_type; + struct nbl_chan_param_pf_name param = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_P4_USED, - NULL, 0, &p4_type, sizeof(p4_type), 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + param.vsi_id = vsi_id; + strscpy(param.dev_name, name, IFNAMSIZ); - return p4_type; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REGISTER_PF_NAME, ¶m, sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_get_p4_used_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_register_dev_name_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_pf_name *param; struct nbl_chan_ack_info chan_ack; int ret = NBL_CHAN_RESP_OK; - int p4_type; - p4_type = NBL_OPS_CALL(res_ops->get_p4_used, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_P4_USED, msg_id, - ret, &p4_type, sizeof(p4_type)); + param = (struct nbl_chan_param_pf_name *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_dev_name, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->dev_name); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_PF_NAME, msg_id, ret, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_get_p4_used(void *priv) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - - return NBL_OPS_CALL(res_ops->get_p4_used, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); -} - -static int nbl_disp_set_p4_used(void *priv, int p4_type) +static void nbl_disp_get_dev_name(void *priv, u16 vsi_id, char *name) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->set_p4_used, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), p4_type)); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_dev_name, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, name); } -static int nbl_disp_chan_get_board_id_req(void *priv) +static void +nbl_disp_chan_get_dev_name_req(void *priv, u16 vsi_id, char *name) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - int result = -1; + struct nbl_chan_param_pf_name param = {0}; + struct nbl_chan_param_pf_name resp = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_BOARD_ID, - NULL, 0, &result, sizeof(result), 1); + param.vsi_id = vsi_id; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_PF_NAME, ¶m, sizeof(param), &resp, sizeof(resp), 1); chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - return result; + strscpy(name, resp.dev_name, IFNAMSIZ); } -static void nbl_disp_chan_get_board_id_resp(void *priv, u16 src_id, u16 msg_id, +static void nbl_disp_chan_get_dev_name_resp(void *priv, u16 src_id, u16 msg_id, void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_pf_name *param; + struct nbl_chan_param_pf_name resp = {0}; struct nbl_chan_ack_info chan_ack; - int ret = NBL_CHAN_RESP_OK, result = -1; - - result = NBL_OPS_CALL(res_ops->get_board_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + int ret = NBL_CHAN_RESP_OK; - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_BOARD_ID, - msg_id, ret, &result, sizeof(result)); + param = (struct nbl_chan_param_pf_name *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_dev_name, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, resp.dev_name); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PF_NAME, msg_id, ret, &resp, sizeof(resp)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_get_board_id(void *priv) +static int nbl_disp_get_mirror_table_id(void *priv, u16 vsi_id, int dir, + bool mirror_en, u8 *mt_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->get_board_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_mirror_table_id, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, dir, mirror_en, mt_id); } -static dma_addr_t nbl_disp_restore_abnormal_ring(void *priv, int ring_index, int type) +static int nbl_disp_chan_get_mirror_table_id_req(void *priv, u16 vsi_id, int dir, + bool mirror_en, u8 *mt_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_mirror_table_id param = {0}; + struct nbl_chan_param_get_mirror_table_id resp = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + int ret; + + param.vsi_id = vsi_id; + param.dir = dir; + param.mirror_en = mirror_en; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_MIRROR_TABLE_ID, ¶m, sizeof(param), + &resp, sizeof(resp), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + *mt_id = resp.mt_id; + + return ret; +} + +static void nbl_disp_chan_get_mirror_table_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_mirror_table_id *param; + struct nbl_chan_param_get_mirror_table_id resp = {0}; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; - return NBL_OPS_CALL(res_ops->restore_abnormal_ring, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, type)); + param = (struct nbl_chan_param_get_mirror_table_id *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_mirror_table_id, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->dir, param->mirror_en, &resp.mt_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MIRROR_TABLE_ID, msg_id, ret, + &resp, sizeof(resp)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_restart_abnormal_ring(void *priv, int ring_index, int type) +static int nbl_disp_configure_mirror(void *priv, u16 func_id, bool mirror_en, int dir, + u8 mt_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->restart_abnormal_ring, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, type)); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_mirror, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, + mirror_en, dir, mt_id); } -static int nbl_disp_chan_restore_hw_queue_req(void *priv, u16 vsi_id, u16 local_queue_id, - dma_addr_t dma, int type) +static int nbl_disp_chan_configure_mirror_req(void *priv, u16 func_id, bool mirror_en, + int dir, u8 mt_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - struct nbl_chan_param_restore_hw_queue param = {0}; struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_mirror param = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + int ret; - param.vsi_id = vsi_id; - param.local_queue_id = local_queue_id; - param.dma = dma; - param.type = type; + param.mirror_en = mirror_en; + param.dir = dir; + param.mt_id = mt_id; NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_RESTORE_HW_QUEUE, ¶m, sizeof(param), NULL, 0, 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + NBL_CHAN_MSG_CONFIGURE_MIRROR, ¶m, sizeof(param), + NULL, 0, 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return ret; } -static void nbl_disp_chan_restore_hw_queue_resp(void *priv, u16 src_id, u16 msg_id, +static void nbl_disp_chan_configure_mirror_resp(void *priv, u16 src_id, u16 msg_id, void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_restore_hw_queue *param = NULL; + struct nbl_chan_param_mirror *param; struct nbl_chan_ack_info chan_ack; int ret = NBL_CHAN_RESP_OK; - param = (struct nbl_chan_param_restore_hw_queue *)data; - - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->restore_hw_queue, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - param->vsi_id, param->local_queue_id, param->dma, param->type); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_RESTORE_HW_QUEUE, msg_id, ret, NULL, 0); + param = (struct nbl_chan_param_mirror *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_mirror, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + src_id, param->mirror_en, param->dir, param->mt_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CONFIGURE_MIRROR, msg_id, ret, + NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_restore_hw_queue(void *priv, u16 vsi_id, u16 local_queue_id, - dma_addr_t dma, int type) +static int nbl_disp_configure_mirror_table(void *priv, bool mirror_en, + u16 func_id, u8 mt_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->restore_hw_queue, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - vsi_id, local_queue_id, dma, type); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_mirror_table, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mirror_en, + func_id, mt_id); } -static u16 nbl_disp_get_local_queue_id(void *priv, u16 vsi_id, u16 global_queue_id) +static int nbl_disp_chan_configure_mirror_table_req(void *priv, bool mirror_en, + u16 func_id, u8 mt_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_mirror_table param = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + int ret; + + param.mirror_en = mirror_en; + param.func_id = func_id; + param.mt_id = mt_id; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_CONFIGURE_MIRROR_TABLE, ¶m, sizeof(param), + NULL, 0, 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return ret; +} + +static void nbl_disp_chan_configure_mirror_table_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_mirror_table *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; - return NBL_OPS_CALL(res_ops->get_local_queue_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - vsi_id, global_queue_id)); + param = (struct nbl_chan_param_mirror_table *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_mirror_table, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->mirror_en, param->func_id, param->mt_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CONFIGURE_MIRROR_TABLE, msg_id, ret, + NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_set_bridge_mode(void *priv, u16 bmode) +static int nbl_disp_clear_mirror_cfg(void *priv, u16 func_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_bridge_mode, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - NBL_COMMON_TO_MGT_PF(common), bmode); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_mirror_cfg, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id); } -static int nbl_disp_chan_set_bridge_mode_req(void *priv, u16 bmode) +static int nbl_disp_chan_clear_mirror_cfg_req(void *priv, u16 func_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + int ret; NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_SET_BRIDGE_MODE, &bmode, sizeof(bmode), NULL, 0, 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + NBL_CHAN_MSG_CLEAR_MIRROR_CFG, NULL, 0, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return ret; } -static void nbl_disp_chan_set_bridge_mode_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_clear_mirror_cfg_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_ack_info chan_ack; int ret = NBL_CHAN_RESP_OK; - u16 *bmode; - bmode = (u16 *)data; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_bridge_mode, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, *bmode); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_BRIDGE_MODE, - msg_id, ret, NULL, 0); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_mirror_cfg, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CLEAR_MIRROR_CFG, msg_id, ret, + NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static u16 nbl_disp_get_vf_function_id(void *priv, u16 vsi_id, int vf_id) +static int nbl_disp_check_flow_table_spec(void *priv, u16 vlan_list_cnt, + u16 unicast_mac_cnt, u16 multi_mac_cnt) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->get_vf_function_id, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vf_id)); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->check_flow_table_spec, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vlan_list_cnt, + unicast_mac_cnt, multi_mac_cnt); } -static u16 nbl_disp_chan_get_vf_function_id_req(void *priv, u16 vsi_id, int vf_id) +static int +nbl_disp_chan_check_flow_table_spec_req(void *priv, u16 vlan_list_cnt, + u16 unicast_mac_cnt, u16 multi_mac_cnt) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_param_get_vf_func_id param; - struct nbl_common_info *common; - u16 func_id = 0; + struct nbl_chan_param_check_flow_spec param = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - param.vsi_id = vsi_id; - param.vf_id = vf_id; + param.vlan_list_cnt = vlan_list_cnt; + param.unicast_mac_cnt = unicast_mac_cnt; + param.multi_mac_cnt = multi_mac_cnt; NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_GET_VF_FUNCTION_ID, ¶m, - sizeof(param), &func_id, sizeof(func_id), 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + NBL_CHAN_MSG_CHECK_FLOWTABLE_SPEC, ¶m, + sizeof(param), NULL, 0, 1); - return func_id; + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_get_vf_function_id_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void +nbl_disp_chan_check_flow_table_spec_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_get_vf_func_id *param; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_chan_ack_info chan_ack; - int ret = NBL_CHAN_RESP_OK; - u16 func_id; + struct nbl_chan_param_check_flow_spec *param = {0}; + int ret; - param = (struct nbl_chan_param_get_vf_func_id *)data; - func_id = NBL_OPS_CALL(res_ops->get_vf_function_id, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->vf_id)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VF_FUNCTION_ID, msg_id, - ret, &func_id, sizeof(func_id)); + param = (struct nbl_chan_param_check_flow_spec *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->check_flow_table_spec, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vlan_list_cnt, + param->unicast_mac_cnt, param->multi_mac_cnt); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CHECK_FLOWTABLE_SPEC, + msg_id, ret, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } +static u32 nbl_disp_get_dvn_desc_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_dvn_desc_req, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_set_dvn_desc_req(void *priv, u32 desc_req) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->set_dvn_desc_req, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), desc_req)); +} + /* NBL_DISP_SET_OPS(disp_op_name, res_func, ctrl_lvl, msg_type, msg_req, msg_resp) * ctrl_lvl is to define when this disp_op should go directly to res_op, not sending a channel msg. * @@ -4687,6 +10501,15 @@ do { \ NBL_DISP_SET_OPS(dump_ring_stats, nbl_disp_dump_ring_stats, \ NBL_DISP_CTRL_LVL_NET, -1, \ NULL, NULL); \ + NBL_DISP_SET_OPS(set_rings_xdp_prog, nbl_disp_set_rings_xdp_prog, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(register_xdp_rxq, nbl_disp_register_xdp_rxq, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(unregister_xdp_rxq, nbl_disp_unregister_xdp_rxq, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ NBL_DISP_SET_OPS(get_vector_napi, nbl_disp_get_vector_napi, \ NBL_DISP_CTRL_LVL_NET, -1, \ NULL, NULL); \ @@ -4733,6 +10556,9 @@ do { \ NBL_DISP_SET_OPS(setup_queue, nbl_disp_setup_queue, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SETUP_QUEUE, \ nbl_disp_chan_setup_queue_req, nbl_disp_chan_setup_queue_resp); \ + NBL_DISP_SET_OPS(remove_queue, nbl_disp_remove_queue, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_QUEUE, \ + nbl_disp_chan_remove_queue_req, nbl_disp_chan_remove_queue_resp); \ NBL_DISP_SET_OPS(remove_all_queues, nbl_disp_remove_all_queues, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_ALL_QUEUES, \ nbl_disp_chan_remove_all_queues_req, \ @@ -4746,9 +10572,10 @@ do { \ NBL_DISP_SET_OPS(remove_cqs, nbl_disp_remove_cqs, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_CQS, \ nbl_disp_chan_remove_cqs_req, nbl_disp_chan_remove_cqs_resp); \ - NBL_DISP_SET_OPS(enable_msix_irq, nbl_disp_enable_msix_irq, \ - NBL_DISP_CTRL_LVL_NET, -1, \ - NULL, NULL); \ + NBL_DISP_SET_OPS(cfg_qdisc_mqprio, nbl_disp_cfg_qdisc_mqprio, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_QDISC_MQPRIO, \ + nbl_disp_chan_cfg_qdisc_mqprio_req, \ + nbl_disp_chan_cfg_qdisc_mqprio_resp); \ NBL_DISP_SET_OPS(get_msix_irq_enable_info, nbl_disp_get_msix_irq_enable_info, \ NBL_DISP_CTRL_LVL_NET, -1, \ NULL, NULL); \ @@ -4764,6 +10591,9 @@ do { \ NBL_DISP_SET_OPS(del_multi_rule, nbl_disp_del_multi_rule, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_MULTI_RULE, \ nbl_disp_chan_del_multi_rule_req, nbl_disp_chan_del_multi_rule_resp); \ + NBL_DISP_SET_OPS(cfg_multi_mcast, nbl_disp_cfg_multi_mcast, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_MULTI_MCAST_RULE, \ + nbl_disp_chan_cfg_multi_mcast_req, nbl_disp_chan_cfg_multi_mcast_resp);\ NBL_DISP_SET_OPS(setup_multi_group, nbl_disp_setup_multi_group, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SETUP_MULTI_GROUP, \ nbl_disp_chan_setup_multi_group_req, \ @@ -4781,6 +10611,25 @@ do { \ NBL_DISP_SET_OPS(get_eth_id, nbl_disp_get_eth_id, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_ID, \ nbl_disp_chan_get_eth_id_req, nbl_disp_chan_get_eth_id_resp); \ + NBL_DISP_SET_OPS(enable_lag_protocol, nbl_disp_enable_lag_protocol, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(cfg_lag_hash_algorithm, nbl_disp_cfg_lag_hash_algorithm, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_LAG_HASH_ALGORITHM, \ + nbl_disp_chan_cfg_lag_hash_algorithm_req, \ + nbl_disp_chan_cfg_lag_hash_algorithm_resp); \ + NBL_DISP_SET_OPS(cfg_lag_member_fwd, nbl_disp_cfg_lag_member_fwd, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_LAG_MEMBER_FWD, \ + nbl_disp_chan_cfg_lag_member_fwd_req, \ + nbl_disp_chan_cfg_lag_member_fwd_resp); \ + NBL_DISP_SET_OPS(cfg_lag_member_list, nbl_disp_cfg_lag_member_list, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_LAG_MEMBER_LIST, \ + nbl_disp_chan_cfg_lag_member_list_req, \ + nbl_disp_chan_cfg_lag_member_list_resp); \ + NBL_DISP_SET_OPS(cfg_lag_member_up_attr, nbl_disp_cfg_lag_member_up_attr, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_LAG_MEMBER_UP_ATTR, \ + nbl_disp_chan_cfg_lag_member_up_attr_req, \ + nbl_disp_chan_cfg_lag_member_up_attr_resp); \ NBL_DISP_SET_OPS(add_lldp_flow, nbl_disp_add_lldp_flow, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADD_LLDP_FLOW, \ nbl_disp_chan_add_lldp_flow_req, nbl_disp_chan_add_lldp_flow_resp); \ @@ -4793,6 +10642,19 @@ do { \ NBL_DISP_SET_OPS(del_lag_flow, nbl_disp_del_lag_flow, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_LAG_FLOW, \ nbl_disp_chan_del_lag_flow_req, nbl_disp_chan_del_lag_flow_resp); \ + NBL_DISP_SET_OPS(cfg_duppkt_info, nbl_disp_cfg_duppkt_info, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(cfg_duppkt_mcc, nbl_disp_cfg_duppkt_mcc, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_DUPPKT_MCC, \ + nbl_disp_chan_cfg_duppkt_mcc_req, nbl_disp_chan_cfg_duppkt_mcc_resp); \ + NBL_DISP_SET_OPS(cfg_bond_shaping, nbl_disp_cfg_bond_shaping, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_BOND_SHAPING, \ + nbl_disp_chan_cfg_bond_shaping_req, \ + nbl_disp_chan_cfg_bond_shaping_resp); \ + NBL_DISP_SET_OPS(cfg_bgid_back_pressure, nbl_disp_cfg_bgid_back_pressure, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_BGID_BACK_PRESSURE, \ + nbl_disp_chan_cfg_bgid_back_pressure_req, \ + nbl_disp_chan_cfg_bgid_back_pressure_resp); \ NBL_DISP_SET_OPS(set_promisc_mode, nbl_disp_set_promisc_mode, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_PROSISC_MODE, \ nbl_disp_chan_set_promisc_mode_req, \ @@ -4809,9 +10671,45 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_BASE_MAC_ADDR, \ nbl_disp_chan_get_base_mac_addr_req, \ nbl_disp_chan_get_base_mac_addr_resp); \ + NBL_DISP_SET_OPS(get_eth_mac_stats, nbl_disp_get_eth_mac_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_MAC_STATS, \ + nbl_disp_chan_get_eth_mac_stats_req, \ + nbl_disp_chan_get_eth_mac_stats_resp); \ + NBL_DISP_SET_OPS(get_rmon_stats, nbl_disp_get_rmon_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RMON_STATS, \ + nbl_disp_chan_get_rmon_stats_req, \ + nbl_disp_chan_get_rmon_stats_resp); \ NBL_DISP_SET_OPS(get_tx_headroom, nbl_disp_get_tx_headroom, \ NBL_DISP_CTRL_LVL_NET, -1, \ NULL, NULL); \ + NBL_DISP_SET_OPS(get_rep_feature, nbl_disp_get_rep_feature, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(set_eswitch_mode, nbl_disp_set_eswitch_mode, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_eswitch_mode, nbl_disp_get_eswitch_mode, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(alloc_rep_data, nbl_disp_alloc_rep_data, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(free_rep_data, nbl_disp_free_rep_data, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(set_rep_netdev_info, nbl_disp_set_rep_netdev_info, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(unset_rep_netdev_info, nbl_disp_unset_rep_netdev_info, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_rep_netdev_info, nbl_disp_get_rep_netdev_info, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_rep_stats, nbl_disp_get_rep_stats, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_rep_index, nbl_disp_get_rep_index, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ NBL_DISP_SET_OPS(get_firmware_version, nbl_disp_get_firmware_version, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FIRMWARE_VERSION, \ nbl_disp_chan_get_firmware_version_req, \ @@ -4832,8 +10730,20 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_STATS, \ nbl_disp_get_private_stat_data_req, \ nbl_disp_chan_get_private_stat_data_resp); \ + NBL_DISP_SET_OPS(get_eth_ctrl_stats, nbl_disp_get_eth_ctrl_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_CTRL_STATS, \ + nbl_disp_chan_get_eth_ctrl_stats_req, \ + nbl_disp_chan_get_eth_ctrl_stats_resp); \ + NBL_DISP_SET_OPS(get_pause_stats, nbl_disp_get_pause_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PAUSE_STATS, \ + nbl_disp_chan_get_pause_stats_req, \ + nbl_disp_chan_get_pause_stats_resp); \ NBL_DISP_SET_OPS(fill_private_stat_strings, nbl_disp_fill_private_stat_strings, \ NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_eth_abnormal_stats, nbl_disp_get_eth_abnormal_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_ABNORMAL_STATS, \ + nbl_disp_chan_get_eth_abnormal_stats_req, \ + nbl_disp_chan_get_eth_abnormal_stats_resp); \ NBL_DISP_SET_OPS(get_max_desc_num, nbl_disp_get_max_desc_num, \ NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ NBL_DISP_SET_OPS(get_min_desc_num, nbl_disp_get_min_desc_num, \ @@ -4876,6 +10786,9 @@ do { \ NBL_DISP_SET_OPS(get_rxfh_indir, nbl_disp_get_rxfh_indir, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RXFH_INDIR, \ nbl_disp_chan_get_rxfh_indir_req, nbl_disp_chan_get_rxfh_indir_resp); \ + NBL_DISP_SET_OPS(set_rxfh_indir, nbl_disp_set_rxfh_indir, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_RXFH_INDIR, \ + nbl_disp_chan_set_rxfh_indir_req, nbl_disp_chan_set_rxfh_indir_resp); \ NBL_DISP_SET_OPS(get_rxfh_rss_key, nbl_disp_get_rxfh_rss_key, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RXFH_RSS_KEY, \ nbl_disp_chan_get_rxfh_rss_key_req, \ @@ -4884,6 +10797,30 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, \ nbl_disp_chan_get_rxfh_rss_alg_sel_req, \ nbl_disp_chan_get_rxfh_rss_alg_sel_resp); \ + NBL_DISP_SET_OPS(set_rxfh_rss_alg_sel, nbl_disp_set_rxfh_rss_alg_sel, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_RXFH_RSS_ALG_SEL, \ + nbl_disp_chan_set_rxfh_rss_alg_sel_req, \ + nbl_disp_chan_set_rxfh_rss_alg_sel_resp); \ + NBL_DISP_SET_OPS(cfg_txrx_vlan, nbl_disp_cfg_txrx_vlan, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(setup_rdma_id, nbl_disp_setup_rdma_id, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(remove_rdma_id, nbl_disp_remove_rdma_id, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(register_rdma, nbl_disp_register_rdma, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_RDMA, \ + nbl_disp_chan_register_rdma_req, nbl_disp_chan_register_rdma_resp); \ + NBL_DISP_SET_OPS(unregister_rdma, nbl_disp_unregister_rdma, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_UNREGISTER_RDMA, \ + nbl_disp_chan_unregister_rdma_req, nbl_disp_chan_unregister_rdma_resp);\ + NBL_DISP_SET_OPS(register_rdma_bond, nbl_disp_register_rdma_bond, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_RDMA_BOND, \ + nbl_disp_chan_register_rdma_bond_req, \ + nbl_disp_chan_register_rdma_bond_resp); \ + NBL_DISP_SET_OPS(unregister_rdma_bond, nbl_disp_unregister_rdma_bond, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_UNREGISTER_RDMA_BOND, \ + nbl_disp_chan_unregister_rdma_bond_req, \ + nbl_disp_chan_unregister_rdma_bond_resp); \ NBL_DISP_SET_OPS(get_hw_addr, nbl_disp_get_hw_addr, \ NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ NBL_DISP_SET_OPS(get_real_hw_addr, nbl_disp_get_real_hw_addr, \ @@ -4914,10 +10851,6 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PHY_CAPS, \ nbl_disp_chan_get_phy_caps_req, \ nbl_disp_chan_get_phy_caps_resp); \ - NBL_DISP_SET_OPS(get_phy_state, nbl_disp_get_phy_state, \ - NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PHY_STATE, \ - nbl_disp_chan_get_phy_state_req, \ - nbl_disp_chan_get_phy_state_resp); \ NBL_DISP_SET_OPS(set_sfp_state, nbl_disp_set_sfp_state, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_SFP_STATE, \ nbl_disp_chan_set_sfp_state_req, \ @@ -4930,12 +10863,111 @@ do { \ nbl_disp_chan_check_active_vf_resp); \ NBL_DISP_SET_OPS(get_adminq_tx_buf_size, nbl_disp_get_adminq_tx_buf_size, \ NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(emp_console_write, nbl_disp_adminq_emp_console_write, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ NBL_DISP_SET_OPS(get_product_flex_cap, nbl_disp_get_product_flex_cap, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PRODUCT_FLEX_CAP, \ nbl_disp_chan_get_product_flex_cap_req, \ nbl_disp_chan_get_product_flex_cap_resp); \ NBL_DISP_SET_OPS(get_product_fix_cap, nbl_disp_get_product_fix_cap, \ NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(alloc_ktls_tx_index, nbl_disp_alloc_ktls_tx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ALLOC_KTLS_TX_INDEX, \ + nbl_disp_chan_alloc_ktls_tx_index_req, \ + nbl_disp_chan_alloc_ktls_tx_index_resp); \ + NBL_DISP_SET_OPS(free_ktls_tx_index, nbl_disp_free_ktls_tx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_FREE_KTLS_TX_INDEX, \ + nbl_disp_chan_free_ktls_tx_index_req, \ + nbl_disp_chan_free_ktls_tx_index_resp); \ + NBL_DISP_SET_OPS(cfg_ktls_tx_keymat, nbl_disp_cfg_ktls_tx_keymat, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_KTLS_TX_KEYMAT, \ + nbl_disp_chan_cfg_ktls_tx_keymat_req, \ + nbl_disp_chan_cfg_ktls_tx_keymat_resp); \ + NBL_DISP_SET_OPS(alloc_ktls_rx_index, nbl_disp_alloc_ktls_rx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ALLOC_KTLS_RX_INDEX, \ + nbl_disp_chan_alloc_ktls_rx_index_req, \ + nbl_disp_chan_alloc_ktls_rx_index_resp); \ + NBL_DISP_SET_OPS(free_ktls_rx_index, nbl_disp_free_ktls_rx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_FREE_KTLS_RX_INDEX, \ + nbl_disp_chan_free_ktls_rx_index_req, \ + nbl_disp_chan_free_ktls_rx_index_resp); \ + NBL_DISP_SET_OPS(cfg_ktls_rx_keymat, nbl_disp_cfg_ktls_rx_keymat, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_KTLS_RX_KEYMAT, \ + nbl_disp_chan_cfg_ktls_rx_keymat_req, \ + nbl_disp_chan_cfg_ktls_rx_keymat_resp); \ + NBL_DISP_SET_OPS(cfg_ktls_rx_record, nbl_disp_cfg_ktls_rx_record, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_KTLS_RX_RECORD, \ + nbl_disp_chan_cfg_ktls_rx_record_req, \ + nbl_disp_chan_cfg_ktls_rx_record_resp); \ + NBL_DISP_SET_OPS(add_ktls_rx_flow, nbl_disp_add_ktls_rx_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADD_KTLS_RX_FLOW, \ + nbl_disp_chan_add_ktls_rx_flow_req, \ + nbl_disp_chan_add_ktls_rx_flow_resp); \ + NBL_DISP_SET_OPS(del_ktls_rx_flow, nbl_disp_del_ktls_rx_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_KTLS_RX_FLOW, \ + nbl_disp_chan_del_ktls_rx_flow_req, \ + nbl_disp_chan_del_ktls_rx_flow_resp); \ + NBL_DISP_SET_OPS(alloc_ipsec_tx_index, nbl_disp_alloc_ipsec_tx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ALLOC_IPSEC_TX_INDEX, \ + nbl_disp_chan_alloc_ipsec_tx_index_req, \ + nbl_disp_chan_alloc_ipsec_tx_index_resp); \ + NBL_DISP_SET_OPS(free_ipsec_tx_index, nbl_disp_free_ipsec_tx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_FREE_IPSEC_TX_INDEX, \ + nbl_disp_chan_free_ipsec_tx_index_req, \ + nbl_disp_chan_free_ipsec_tx_index_resp); \ + NBL_DISP_SET_OPS(alloc_ipsec_rx_index, nbl_disp_alloc_ipsec_rx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ALLOC_IPSEC_RX_INDEX, \ + nbl_disp_chan_alloc_ipsec_rx_index_req, \ + nbl_disp_chan_alloc_ipsec_rx_index_resp); \ + NBL_DISP_SET_OPS(free_ipsec_rx_index, nbl_disp_free_ipsec_rx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_FREE_IPSEC_RX_INDEX, \ + nbl_disp_chan_free_ipsec_rx_index_req, \ + nbl_disp_chan_free_ipsec_rx_index_resp); \ + NBL_DISP_SET_OPS(cfg_ipsec_tx_sad, nbl_disp_cfg_ipsec_tx_sad, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_IPSEC_TX_SAD, \ + nbl_disp_chan_cfg_ipsec_tx_sad_req, \ + nbl_disp_chan_cfg_ipsec_tx_sad_resp); \ + NBL_DISP_SET_OPS(cfg_ipsec_rx_sad, nbl_disp_cfg_ipsec_rx_sad, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_IPSEC_RX_SAD, \ + nbl_disp_chan_cfg_ipsec_rx_sad_req, \ + nbl_disp_chan_cfg_ipsec_rx_sad_resp); \ + NBL_DISP_SET_OPS(add_ipsec_tx_flow, nbl_disp_add_ipsec_tx_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADD_IPSEC_TX_FLOW, \ + nbl_disp_chan_add_ipsec_tx_flow_req, \ + nbl_disp_chan_add_ipsec_tx_flow_resp); \ + NBL_DISP_SET_OPS(del_ipsec_tx_flow, nbl_disp_del_ipsec_tx_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_IPSEC_TX_FLOW, \ + nbl_disp_chan_del_ipsec_tx_flow_req, \ + nbl_disp_chan_del_ipsec_tx_flow_resp); \ + NBL_DISP_SET_OPS(add_ipsec_rx_flow, nbl_disp_add_ipsec_rx_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADD_IPSEC_RX_FLOW, \ + nbl_disp_chan_add_ipsec_rx_flow_req, \ + nbl_disp_chan_add_ipsec_rx_flow_resp); \ + NBL_DISP_SET_OPS(del_ipsec_rx_flow, nbl_disp_del_ipsec_rx_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_IPSEC_RX_FLOW, \ + nbl_disp_chan_del_ipsec_rx_flow_req, \ + nbl_disp_chan_del_ipsec_rx_flow_resp); \ + NBL_DISP_SET_OPS(check_ipsec_status, nbl_disp_check_ipsec_status, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_dipsec_lft_info, nbl_disp_get_dipsec_lft_info, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(handle_dipsec_soft_expire, nbl_disp_handle_dipsec_soft_expire, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(handle_dipsec_hard_expire, nbl_disp_handle_dipsec_hard_expire, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_uipsec_lft_info, nbl_disp_get_uipsec_lft_info, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(handle_uipsec_soft_expire, nbl_disp_handle_uipsec_soft_expire, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(handle_uipsec_hard_expire, nbl_disp_handle_uipsec_hard_expire, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ NBL_DISP_SET_OPS(get_mbx_irq_num, nbl_disp_get_mbx_irq_num, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_MBX_IRQ_NUM, \ nbl_disp_chan_get_mbx_irq_num_req, \ @@ -4944,12 +10976,50 @@ do { \ NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ NBL_DISP_SET_OPS(get_abnormal_irq_num, nbl_disp_get_abnormal_irq_num, \ NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(clear_accel_flow, nbl_disp_clear_accel_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CLEAR_ACCEL_FLOW, \ + nbl_disp_chan_clear_accel_flow_req, \ + nbl_disp_chan_clear_accel_flow_resp); \ NBL_DISP_SET_OPS(clear_flow, nbl_disp_clear_flow, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CLEAR_FLOW, \ nbl_disp_chan_clear_flow_req, nbl_disp_chan_clear_flow_resp); \ NBL_DISP_SET_OPS(clear_queues, nbl_disp_clear_queues, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CLEAR_QUEUE, \ nbl_disp_chan_clear_queues_req, nbl_disp_chan_clear_queues_resp); \ + NBL_DISP_SET_OPS(disable_phy_flow, nbl_disp_disable_phy_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DISABLE_PHY_FLOW, \ + nbl_disp_chan_disable_phy_flow_req, \ + nbl_disp_chan_disable_phy_flow_resp); \ + NBL_DISP_SET_OPS(enable_phy_flow, nbl_disp_enable_phy_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ENABLE_PHY_FLOW, \ + nbl_disp_chan_enable_phy_flow_req, \ + nbl_disp_chan_enable_phy_flow_resp); \ + NBL_DISP_SET_OPS(init_acl, nbl_disp_init_acl, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_INIT_ACL, \ + nbl_disp_chan_init_acl_req, \ + nbl_disp_chan_init_acl_resp); \ + NBL_DISP_SET_OPS(uninit_acl, nbl_disp_uninit_acl, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_UNINIT_ACL, \ + nbl_disp_chan_uninit_acl_req, \ + nbl_disp_chan_uninit_acl_resp); \ + NBL_DISP_SET_OPS(set_upcall_rule, nbl_disp_set_upcall_rule, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_UPCALL_RULE, \ + nbl_disp_chan_set_upcall_rule_req, \ + nbl_disp_chan_set_upcall_rule_resp); \ + NBL_DISP_SET_OPS(unset_upcall_rule, nbl_disp_unset_upcall_rule, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_UNSET_UPCALL_RULE, \ + nbl_disp_chan_unset_upcall_rule_req, \ + nbl_disp_chan_unset_upcall_rule_resp); \ + NBL_DISP_SET_OPS(set_shaping_dport_vld, nbl_disp_set_shaping_dport_vld, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_SHAPING_DPORT_VLD, \ + nbl_disp_chan_set_shaping_dport_vld_req, \ + nbl_disp_chan_set_shaping_dport_vld_resp); \ + NBL_DISP_SET_OPS(set_dport_fc_th_vld, nbl_disp_set_dport_fc_th_vld, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_DPORT_FC_TH_VLD, \ + nbl_disp_chan_set_dport_fc_th_vld_req, \ + nbl_disp_chan_set_dport_fc_th_vld_resp); \ + NBL_DISP_SET_OPS(check_offload_status, nbl_disp_check_offload_status, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ NBL_DISP_SET_OPS(get_reg_dump, nbl_disp_get_reg_dump, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_REG_DUMP, \ nbl_disp_chan_get_reg_dump_req, \ @@ -4969,6 +11039,8 @@ do { \ nbl_disp_chan_get_p4_used_req, nbl_disp_chan_get_p4_used_resp); \ NBL_DISP_SET_OPS(set_p4_used, nbl_disp_set_p4_used, \ NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_p4_version, nbl_disp_get_p4_version, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ NBL_DISP_SET_OPS(get_board_id, nbl_disp_get_board_id, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_BOARD_ID, \ nbl_disp_chan_get_board_id_req, nbl_disp_chan_get_board_id_resp); \ @@ -4980,23 +11052,62 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_RESTORE_HW_QUEUE, \ nbl_disp_chan_restore_hw_queue_req, \ nbl_disp_chan_restore_hw_queue_resp); \ + NBL_DISP_SET_OPS(stop_abnormal_hw_queue, nbl_disp_stop_abnormal_hw_queue, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_STOP_ABNORMAL_HW_QUEUE, \ + nbl_disp_chan_stop_abnormal_hw_queue_req, \ + nbl_disp_chan_stop_abnormal_hw_queue_resp); \ + NBL_DISP_SET_OPS(stop_abnormal_sw_queue, nbl_disp_stop_abnormal_sw_queue, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ NBL_DISP_SET_OPS(get_local_queue_id, nbl_disp_get_local_queue_id, \ NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_REGISTER_NET_REP, NULL, \ + nbl_disp_chan_register_net_rep_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_UNREGISTER_NET_REP, NULL, \ + nbl_disp_chan_unregister_net_rep_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_REGISTER_ETH_REP, NULL, \ + nbl_disp_chan_register_eth_rep_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_UNREGISTER_ETH_REP, NULL, \ + nbl_disp_chan_unregister_eth_rep_resp); \ NBL_DISP_SET_OPS(get_vsi_global_queue_id, nbl_disp_get_vsi_global_qid, \ NBL_DISP_CTRL_LVL_MGT, \ NBL_CHAN_MSG_GET_VSI_GLOBAL_QUEUE_ID, \ nbl_disp_chan_get_vsi_global_qid_req, \ nbl_disp_chan_get_vsi_global_qid_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_GET_LINE_RATE_INFO, \ + NULL, nbl_disp_chan_get_line_rate_info_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_REGISTER_UPCALL_PORT, NULL, \ + nbl_disp_chan_register_upcall_port_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_UNREGISTER_UPCALL_PORT, NULL, \ + nbl_disp_chan_unregister_upcall_port_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_SET_OFFLOAD_STATUS, NULL, \ + nbl_disp_chan_set_offload_status_resp); \ NBL_DISP_SET_OPS(get_port_attributes, nbl_disp_get_port_attributes, \ NBL_DISP_CTRL_LVL_MGT, -1, \ NULL, NULL); \ NBL_DISP_SET_OPS(update_ring_num, nbl_disp_update_ring_num, \ NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(update_rdma_cap, nbl_disp_update_rdma_cap, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_rdma_cap_num, nbl_disp_get_rdma_cap_num, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(update_rdma_mem_type, nbl_disp_update_rdma_mem_type, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ NBL_DISP_SET_OPS(set_ring_num, nbl_disp_set_ring_num, \ NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ NBL_DISP_SET_OPS(enable_port, nbl_disp_enable_port, \ NBL_DISP_CTRL_LVL_MGT, -1, \ NULL, NULL); \ + NBL_DISP_SET_OPS(init_port, nbl_disp_init_port, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ NBL_DISP_SET_OPS(dummy_func, NULL, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADMINQ_PORT_NOTIFY, \ NULL, \ @@ -5005,6 +11116,10 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PORT_STATE, \ nbl_disp_chan_get_port_state_req, \ nbl_disp_chan_get_port_state_resp); \ + NBL_DISP_SET_OPS(get_fec_stats, nbl_disp_get_fec_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FEC_STATS, \ + nbl_disp_chan_get_fec_stats_req, \ + nbl_disp_chan_get_fec_stats_resp); \ NBL_DISP_SET_OPS(set_port_advertising, nbl_disp_set_port_advertising, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_PORT_ADVERTISING, \ nbl_disp_chan_set_port_advertising_req, \ @@ -5021,6 +11136,64 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_LINK_STATE, \ nbl_disp_chan_get_link_state_req, \ nbl_disp_chan_get_link_state_resp); \ + NBL_DISP_SET_OPS(get_link_down_count, nbl_disp_get_link_down_count, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_LINK_DOWN_COUNT, \ + nbl_disp_chan_get_link_down_count_req, \ + nbl_disp_chan_get_link_down_count_resp); \ + NBL_DISP_SET_OPS(get_link_status_opcode, nbl_disp_get_link_status_opcode, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_LINK_STATUS_OPCODE, \ + nbl_disp_chan_get_link_status_opcode_req, \ + nbl_disp_chan_get_link_status_opcode_resp); \ + NBL_DISP_SET_OPS(set_wol, nbl_disp_set_wol, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_WOL, \ + nbl_disp_chan_set_wol_req, nbl_disp_chan_set_wol_resp); \ + NBL_DISP_SET_OPS(cfg_eth_bond_event, nbl_disp_cfg_eth_bond_event, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_INIT_OFLD, NULL, \ + nbl_disp_chan_init_offload_fwd_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_INIT_CMDQ, NULL, \ + nbl_disp_chan_init_cmdq_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_DESTROY_CMDQ, NULL, \ + nbl_disp_chan_destroy_cmdq_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_RESET_CMDQ, NULL, \ + nbl_disp_chan_reset_cmdq_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_OFFLOAD_FLOW_RULE, NULL, \ + nbl_disp_chan_offload_flow_rule_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_GET_ACL_SWITCH, NULL, \ + nbl_disp_chan_get_flow_acl_switch_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_INIT_REP, NULL, \ + nbl_disp_chan_init_rep_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_INIT_FLOW, NULL, \ + nbl_disp_chan_init_flow_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_DEINIT_FLOW, NULL, \ + nbl_disp_chan_deinit_flow_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_GET_QUEUE_CXT, NULL, \ + nbl_disp_chan_get_queue_cxt_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_CFG_LOG, NULL, \ + nbl_disp_chan_cfg_log_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_INIT_VDPAQ, NULL, \ + nbl_disp_chan_init_vdpaq_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_DESTROY_VDPAQ, NULL, \ + nbl_disp_chan_destroy_vdpaq_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_GET_UPCALL_PORT, NULL, \ + nbl_disp_chan_get_upcall_port_resp); \ + NBL_DISP_SET_OPS(configure_rdma_msix_off, nbl_disp_configure_rdma_msix_off, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ NBL_DISP_SET_OPS(set_eth_mac_addr, nbl_disp_set_eth_mac_addr, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_ETH_MAC_ADDR, \ nbl_disp_chan_set_eth_mac_addr_req, \ @@ -5029,31 +11202,82 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_CHIP_TEMPERATURE, \ nbl_disp_chan_get_chip_temperature_req, \ nbl_disp_chan_get_chip_temperature_resp); \ - NBL_DISP_SET_OPS(get_chip_temperature_max, nbl_disp_get_chip_temperature_max, \ - NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ - NBL_DISP_SET_OPS(get_chip_temperature_crit, nbl_disp_get_chip_temperature_crit, \ - NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ NBL_DISP_SET_OPS(get_module_temperature, nbl_disp_get_module_temperature, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_MODULE_TEMPERATURE, \ nbl_disp_chan_get_module_temperature_req, \ nbl_disp_chan_get_module_temperature_resp); \ NBL_DISP_SET_OPS(process_abnormal_event, nbl_disp_process_abnormal_event, \ NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(switchdev_init_cmdq, nbl_disp_switchdev_init_cmdq, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SWITCHDEV_INIT_CMDQ, \ + nbl_disp_chan_switchdev_init_cmdq_req, \ + nbl_disp_chan_switchdev_init_cmdq_resp); \ + NBL_DISP_SET_OPS(switchdev_deinit_cmdq, nbl_disp_switchdev_deinit_cmdq, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SWITCHDEV_DEINIT_CMDQ, \ + nbl_disp_chan_switchdev_deinit_cmdq_req, \ + nbl_disp_chan_switchdev_deinit_cmdq_resp); \ + NBL_DISP_SET_OPS(add_tc_flow, nbl_disp_add_tc_flow, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(del_tc_flow, nbl_disp_del_tc_flow, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(tc_tun_encap_lookup, nbl_disp_tc_tun_encap_lookup, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(tc_tun_encap_del, nbl_disp_tc_tun_encap_del, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(tc_tun_encap_add, nbl_disp_tc_tun_encap_add, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(flow_index_lookup, nbl_disp_flow_index_lookup, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(set_tc_flow_info, nbl_disp_set_tc_flow_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_TC_FLOW_INFO, \ + nbl_disp_chan_set_tc_flow_info_req, \ + nbl_disp_chan_set_tc_flow_info_resp); \ + NBL_DISP_SET_OPS(unset_tc_flow_info, nbl_disp_unset_tc_flow_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_UNSET_TC_FLOW_INFO, \ + nbl_disp_chan_unset_tc_flow_info_req, \ + nbl_disp_chan_unset_tc_flow_info_resp); \ + NBL_DISP_SET_OPS(get_tc_flow_info, nbl_disp_get_tc_flow_info, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(query_tc_stats, nbl_disp_query_tc_stats, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ NBL_DISP_SET_OPS(adapt_desc_gother, nbl_disp_adapt_desc_gother, \ NBL_DISP_CTRL_LVL_MGT, -1, \ NULL, NULL); \ + NBL_DISP_SET_OPS(set_desc_high_throughput, nbl_disp_set_desc_high_throughput, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ NBL_DISP_SET_OPS(flr_clear_net, nbl_disp_flr_clear_net, \ NBL_DISP_CTRL_LVL_MGT, -1, \ NULL, NULL); \ + NBL_DISP_SET_OPS(flr_clear_accel, nbl_disp_flr_clear_accel, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ NBL_DISP_SET_OPS(flr_clear_queues, nbl_disp_flr_clear_queues, \ NBL_DISP_CTRL_LVL_MGT, -1, \ NULL, NULL); \ + NBL_DISP_SET_OPS(flr_clear_accel_flow, nbl_disp_flr_clear_accel_flow, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ NBL_DISP_SET_OPS(flr_clear_flows, nbl_disp_flr_clear_flows, \ NBL_DISP_CTRL_LVL_MGT, -1, \ NULL, NULL); \ NBL_DISP_SET_OPS(flr_clear_interrupt, nbl_disp_flr_clear_interrupt, \ NBL_DISP_CTRL_LVL_MGT, -1, \ NULL, NULL); \ + NBL_DISP_SET_OPS(flr_clear_rdma, nbl_disp_flr_clear_rdma, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(covert_vfid_to_vsi_id, nbl_disp_covert_vfid_to_vsi_id, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ NBL_DISP_SET_OPS(unmask_all_interrupts, nbl_disp_unmask_all_interrupts, \ NBL_DISP_CTRL_LVL_MGT, -1, \ NULL, NULL); \ @@ -5067,17 +11291,39 @@ do { \ NBL_DISP_SET_OPS(nway_reset, nbl_disp_nway_reset, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_NWAY_RESET, \ nbl_disp_chan_nway_reset_req, nbl_disp_chan_nway_reset_resp); \ + NBL_DISP_SET_OPS(get_rep_queue_info, nbl_disp_get_rep_queue_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_REP_QUEUE_INFO, \ + nbl_disp_chan_get_rep_queue_info_req, \ + nbl_disp_chan_get_rep_queue_info_resp); \ NBL_DISP_SET_OPS(get_user_queue_info, nbl_disp_get_user_queue_info, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_USER_QUEUE_INFO, \ nbl_disp_chan_get_user_queue_info_req, \ nbl_disp_chan_get_user_queue_info_resp); \ - NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ - NBL_CHAN_MSG_GET_BOARD_INFO, NULL, \ + NBL_DISP_SET_OPS(get_board_info, nbl_disp_get_board_info, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_GET_BOARD_INFO, nbl_disp_chan_get_board_info_req, \ nbl_disp_chan_get_board_info_resp); \ NBL_DISP_SET_OPS(get_vf_base_vsi_id, nbl_disp_get_vf_base_vsi_id, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_VF_BASE_VSI_ID, \ nbl_disp_chan_get_vf_base_vsi_id_req, \ nbl_disp_chan_get_vf_base_vsi_id_resp); \ + NBL_DISP_SET_OPS(cfg_eth_bond_info, nbl_disp_cfg_eth_bond_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_ETH_BOND_INFO, \ + nbl_disp_chan_cfg_eth_bond_info_req, \ + nbl_disp_chan_cfg_eth_bond_info_resp); \ + NBL_DISP_SET_OPS(get_eth_bond_info, nbl_disp_get_eth_bond_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_BOND_INFO, \ + nbl_disp_chan_get_eth_bond_info_req, \ + nbl_disp_chan_get_eth_bond_info_resp); \ + NBL_DISP_SET_OPS(add_nd_upcall_flow, nbl_disp_chan_add_nd_upcall_flow, \ + NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_ADD_ND_UPCALL_FLOW, \ + nbl_disp_chan_add_nd_upcall_flow_req, \ + nbl_disp_chan_add_nd_upcall_flow_resp); \ + NBL_DISP_SET_OPS(del_nd_upcall_flow, nbl_disp_chan_del_nd_upcall_flow, \ + NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_DEL_ND_UPCALL_FLOW, \ + nbl_disp_chan_del_nd_upcall_flow_req, \ + nbl_disp_chan_del_nd_upcall_flow_resp); \ NBL_DISP_SET_OPS(set_bridge_mode, nbl_disp_set_bridge_mode, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_BRIDGE_MODE, \ nbl_disp_chan_set_bridge_mode_req, \ @@ -5086,6 +11332,178 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_VF_FUNCTION_ID, \ nbl_disp_chan_get_vf_function_id_req, \ nbl_disp_chan_get_vf_function_id_resp); \ + NBL_DISP_SET_OPS(get_vf_vsi_id, nbl_disp_get_vf_vsi_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_VF_VSI_ID, \ + nbl_disp_chan_get_vf_vsi_id_req, \ + nbl_disp_chan_get_vf_vsi_id_resp); \ + NBL_DISP_SET_OPS(check_vf_is_active, nbl_disp_check_vf_is_active, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_CHECK_VF_IS_ACTIVE, \ + nbl_disp_chan_check_vf_is_active_req, \ + nbl_disp_chan_check_vf_is_active_resp); \ + NBL_DISP_SET_OPS(check_vf_is_vdpa, nbl_disp_check_vf_is_vdpa, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_CHECK_VF_IS_VDPA, \ + nbl_disp_chan_check_vf_is_vdpa_req, \ + nbl_disp_chan_check_vf_is_vdpa_resp); \ + NBL_DISP_SET_OPS(get_vdpa_vf_stats, nbl_disp_get_vdpa_vf_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_VDPA_VF_STATS, \ + nbl_disp_chan_get_vdpa_vf_stats_req, \ + nbl_disp_chan_get_vdpa_vf_stats_resp); \ + NBL_DISP_SET_OPS(get_uvn_pkt_drop_stats, nbl_disp_get_uvn_pkt_drop_stats, \ + NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_GET_UVN_PKT_DROP_STATS, \ + nbl_disp_chan_get_uvn_pkt_drop_stats_req, \ + nbl_disp_chan_get_uvn_pkt_drop_stats_resp); \ + NBL_DISP_SET_OPS(get_ustore_pkt_drop_stats, nbl_disp_get_ustore_pkt_drop_stats, \ + NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_GET_USTORE_PKT_DROP_STATS, \ + nbl_disp_chan_get_ustore_pkt_drop_stats_req, \ + nbl_disp_chan_get_ustore_pkt_drop_stats_resp); \ + NBL_DISP_SET_OPS(get_ustore_total_pkt_drop_stats, \ + nbl_disp_get_ustore_total_pkt_drop_stats, \ + NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_GET_USTORE_TOTAL_PKT_DROP_STATS, \ + nbl_disp_chan_get_ustore_total_pkt_drop_stats_req, \ + nbl_disp_chan_get_ustore_total_pkt_drop_stats_resp); \ + NBL_DISP_SET_OPS(set_pmd_debug, nbl_disp_set_pmd_debug, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_PMD_DEBUG, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(register_func_mac, nbl_disp_register_func_mac, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_FUNC_MAC, \ + nbl_disp_chan_register_func_mac_req, \ + nbl_disp_chan_register_func_mac_resp); \ + NBL_DISP_SET_OPS(set_tx_rate, nbl_disp_set_tx_rate, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_TX_RATE, \ + nbl_disp_chan_set_tx_rate_req, nbl_disp_chan_set_tx_rate_resp); \ + NBL_DISP_SET_OPS(set_rx_rate, nbl_disp_set_rx_rate, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_RX_RATE, \ + nbl_disp_chan_set_rx_rate_req, nbl_disp_chan_set_rx_rate_resp); \ + NBL_DISP_SET_OPS(register_func_link_forced, nbl_disp_register_func_link_forced, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_FUNC_LINK_FORCED, \ + nbl_disp_chan_register_func_link_forced_req, \ + nbl_disp_chan_register_func_link_forced_resp); \ + NBL_DISP_SET_OPS(get_link_forced, nbl_disp_get_link_forced, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_LINK_FORCED, \ + nbl_disp_chan_get_link_forced_req, nbl_disp_chan_get_link_forced_resp);\ + NBL_DISP_SET_OPS(get_driver_version, nbl_disp_get_driver_version, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(register_func_trust, nbl_disp_register_func_trust, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_FUNC_TRUST, \ + nbl_disp_chan_register_func_trust_req, \ + nbl_disp_chan_register_func_trust_resp); \ + NBL_DISP_SET_OPS(register_func_vlan, nbl_disp_register_func_vlan, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_FUNC_VLAN, \ + nbl_disp_chan_register_func_vlan_req, \ + nbl_disp_chan_register_func_vlan_resp); \ + NBL_DISP_SET_OPS(register_func_rate, nbl_disp_register_func_rate, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_FUNC_RATE, \ + nbl_disp_chan_register_func_rate_req, \ + nbl_disp_chan_register_func_rate_resp); \ + NBL_DISP_SET_OPS(set_mtu, nbl_disp_set_mtu, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_MTU_SET, \ + nbl_disp_chan_set_mtu_req, \ + nbl_disp_chan_set_mtu_resp); \ + NBL_DISP_SET_OPS(get_max_mtu, nbl_disp_get_max_mtu, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_fd_flow, nbl_disp_get_fd_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FD_FLOW, \ + nbl_disp_chan_get_fd_flow_req, nbl_disp_chan_get_fd_flow_resp); \ + NBL_DISP_SET_OPS(get_fd_flow_cnt, nbl_disp_get_fd_flow_cnt, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FD_FLOW_CNT, \ + nbl_disp_chan_get_fd_flow_cnt_req, nbl_disp_chan_get_fd_flow_cnt_resp);\ + NBL_DISP_SET_OPS(get_fd_flow_all, nbl_disp_get_fd_flow_all, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FD_FLOW_ALL, \ + nbl_disp_chan_get_fd_flow_all_req, nbl_disp_chan_get_fd_flow_all_resp);\ + NBL_DISP_SET_OPS(get_fd_flow_max, nbl_disp_get_fd_flow_max, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FD_FLOW_MAX, \ + nbl_disp_chan_get_fd_flow_max_req, nbl_disp_chan_get_fd_flow_max_resp);\ + NBL_DISP_SET_OPS(replace_fd_flow, nbl_disp_replace_fd_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REPLACE_FD_FLOW, \ + nbl_disp_chan_replace_fd_flow_req, nbl_disp_chan_replace_fd_flow_resp);\ + NBL_DISP_SET_OPS(remove_fd_flow, nbl_disp_remove_fd_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_FD_FLOW, \ + nbl_disp_chan_remove_fd_flow_req, nbl_disp_chan_remove_fd_flow_resp); \ + NBL_DISP_SET_OPS(config_fd_flow_state, nbl_disp_config_fd_flow_state, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_FD_FLOW_STATE, \ + nbl_disp_chan_config_fd_flow_state_req, \ + nbl_disp_chan_config_fd_flow_state_resp); \ + NBL_DISP_SET_OPS(cfg_fd_update_event, nbl_disp_cfg_fd_update_event, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(dump_fd_flow, nbl_disp_dump_fd_flow, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_xdp_queue_info, nbl_disp_get_xdp_queue_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_XDP_QUEUE_INFO, \ + nbl_disp_chan_get_xdp_queue_info_req, \ + nbl_disp_chan_get_xdp_queue_info_resp); \ + NBL_DISP_SET_OPS(set_hw_status, nbl_disp_set_hw_status, \ + NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_active_func_bitmaps, nbl_disp_get_active_func_bitmaps, \ + NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(configure_rdma_bw, nbl_disp_configure_rdma_bw, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CONFIGURE_RDMA_BW, \ + nbl_disp_chan_configure_rdma_bw_req, \ + nbl_disp_chan_configure_rdma_bw_resp); \ + NBL_DISP_SET_OPS(configure_qos, nbl_disp_configure_qos, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CONFIGURE_QOS, \ + nbl_disp_chan_configure_qos_req, \ + nbl_disp_chan_configure_qos_resp); \ + NBL_DISP_SET_OPS(set_tc_wgt, nbl_disp_set_tc_wgt, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_TC_WGT, \ + nbl_disp_chan_set_tc_wgt_req, \ + nbl_disp_chan_set_tc_wgt_resp); \ + NBL_DISP_SET_OPS(get_pfc_buffer_size, nbl_disp_get_pfc_buffer_size, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PFC_BUFFER_SIZE, \ + nbl_disp_chan_get_pfc_buffer_size_req, \ + nbl_disp_chan_get_pfc_buffer_size_resp); \ + NBL_DISP_SET_OPS(set_pfc_buffer_size, nbl_disp_set_pfc_buffer_size, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_PFC_BUFFER_SIZE, \ + nbl_disp_chan_set_pfc_buffer_size_req, \ + nbl_disp_chan_set_pfc_buffer_size_resp); \ + NBL_DISP_SET_OPS(set_rate_limit, nbl_disp_set_rate_limit, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_RATE_LIMIT, \ + nbl_disp_chan_set_rate_limit_req, \ + nbl_disp_chan_set_rate_limit_resp); \ + NBL_DISP_SET_OPS(get_perf_dump_length, nbl_disp_get_perf_dump_length, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_perf_dump_data, nbl_disp_get_perf_dump_data, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(register_dev_name, nbl_disp_register_dev_name, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_PF_NAME, \ + nbl_disp_chan_register_dev_name_req, \ + nbl_disp_chan_register_dev_name_resp); \ + NBL_DISP_SET_OPS(get_dev_name, nbl_disp_get_dev_name, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PF_NAME, \ + nbl_disp_chan_get_dev_name_req, \ + nbl_disp_chan_get_dev_name_resp); \ + NBL_DISP_SET_OPS(get_mirror_table_id, nbl_disp_get_mirror_table_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_MIRROR_TABLE_ID, \ + nbl_disp_chan_get_mirror_table_id_req, \ + nbl_disp_chan_get_mirror_table_id_resp); \ + NBL_DISP_SET_OPS(configure_mirror, nbl_disp_configure_mirror, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CONFIGURE_MIRROR, \ + nbl_disp_chan_configure_mirror_req, \ + nbl_disp_chan_configure_mirror_resp); \ + NBL_DISP_SET_OPS(configure_mirror_table, nbl_disp_configure_mirror_table, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CONFIGURE_MIRROR_TABLE, \ + nbl_disp_chan_configure_mirror_table_req, \ + nbl_disp_chan_configure_mirror_table_resp); \ + NBL_DISP_SET_OPS(clear_mirror_cfg, nbl_disp_clear_mirror_cfg, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CLEAR_MIRROR_CFG, \ + nbl_disp_chan_clear_mirror_cfg_req, \ + nbl_disp_chan_clear_mirror_cfg_resp); \ + NBL_DISP_SET_OPS(cfg_mirror_outputport_event, nbl_disp_cfg_mirror_outputport_event, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(check_flow_table_spec, nbl_disp_check_flow_table_spec, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CHECK_FLOWTABLE_SPEC, \ + nbl_disp_chan_check_flow_table_spec_req, \ + nbl_disp_chan_check_flow_table_spec_resp); \ + NBL_DISP_SET_OPS(get_dvn_desc_req, nbl_disp_get_dvn_desc_req, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(set_dvn_desc_req, nbl_disp_set_dvn_desc_req, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ } while (0) /* Structure starts here, adding an op should not modify anything below */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.c index ca7716045ae69fb068310fa37a323db0edbe2080..4d521adca925170719083ce31f00a7f7eaeca05f 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.c @@ -6,6 +6,37 @@ #include "nbl_ethtool.h" +#define DIAG_BLK_SZ(data_size) (sizeof(struct nbl_diag_blk) + (data_size)) +#define DIAG_GET_NEXT_BLK(dump_hdr) \ + ({ typeof(dump_hdr) _dump_hdr = (dump_hdr); \ + (struct nbl_diag_blk *)(_dump_hdr->dump + _dump_hdr->total_length); }) + +#define NBL_DIAG_DUMP_VERSION 1 +#define NBL_DIAG_FLAG_PERFORMANCE BIT(0) + +#define NBL_DRV_VER_SZ 64 +#define NBL_DEV_NAME_SZ 64 + +enum nbl_diag_type { + NBL_DIAG_DRV_VERSION = 0, + NBL_DIAG_DEVICE_NAME, + NBL_DIAG_PERFORMANCE, +}; + +struct nbl_diag_blk { + u32 type; + u32 length; + char data[]; +} __packed; + +struct nbl_diag_dump { + u32 version; + u32 flag; + u32 num_blocks; + u32 total_length; + char dump[]; +} __packed; + enum NBL_STATS_TYPE { NBL_NETDEV_STATS, NBL_ETH_STATS, @@ -38,6 +69,8 @@ enum nbl_ethtool_test_id { NBL_ETH_TEST_MAX }; +#define NBL_LEONIS_LANE_NUM (4) + #define NBL_TEST_LEN (sizeof(nbl_gstrings_test) / ETH_GSTRING_LEN) #define NBL_NETDEV_STAT(_name, stat_m, stat_n) { \ @@ -74,7 +107,6 @@ static const struct nbl_ethtool_stats nbl_gstrings_stats[] = { NBL_NETDEV_STAT("tx_errors", tx_errors, tx_errors), NBL_NETDEV_STAT("rx_dropped", rx_dropped, rx_dropped), NBL_NETDEV_STAT("tx_dropped", tx_dropped, tx_dropped), - NBL_NETDEV_STAT("eth_multicast", multicast, multicast), NBL_NETDEV_STAT("collisions", collisions, collisions), NBL_NETDEV_STAT("rx_over_errors", rx_over_errors, rx_over_errors), NBL_NETDEV_STAT("rx_crc_errors", rx_crc_errors, rx_crc_errors), @@ -104,8 +136,15 @@ static const struct nbl_ethtool_stats nbl_gstrings_stats[] = { NBL_STAT("rx_cache_busy", rx_cache_busy, rx_cache_busy), NBL_STAT("rx_cache_waive", rx_cache_waive, rx_cache_waive), - NBL_PRIV_STAT("total_dvn_pkt_drop_cnt", total_dvn_pkt_drop_cnt, total_dvn_pkt_drop_cnt), - NBL_PRIV_STAT("total_uvn_stat_pkt_drop", total_uvn_stat_pkt_drop, total_uvn_stat_pkt_drop), + NBL_STAT("xdp_tx_packets", xdp_tx_packets, xdp_tx_packets), + NBL_STAT("xdp_redirect_packets", xdp_redirect_packets, xdp_redirect_packets), + NBL_STAT("xdp_drop_packets", xdp_drop_packets, xdp_drop_packets), + NBL_STAT("xdp_oversize_packets", xdp_oversize_packets, xdp_oversize_packets), + NBL_STAT("tls_encrypted_packets", tls_encrypted_packets, tls_encrypted_packets), + NBL_STAT("tls_encrypted_bytes", tls_encrypted_bytes, tls_encrypted_bytes), + NBL_STAT("tls_ooo_packets", tls_ooo_packets, tls_ooo_packets), + NBL_STAT("tls_decrypted_packets", tls_decrypted_packets, tls_decrypted_packets), + NBL_STAT("tls_resync_req_num", tls_resync_req_num, tls_resync_req_num), }; #define NBL_GLOBAL_STATS_LEN ARRAY_SIZE(nbl_gstrings_stats) @@ -120,7 +159,8 @@ struct nbl_priv_flags_info { static const struct nbl_priv_flags_info nbl_gstrings_priv_flags[NBL_ADAPTER_FLAGS_MAX] = { {1, 0, NBL_P4_CAP, "P4-default"}, {0, 1, 0, "link-down-on-close"}, - {0, 0, 0, "mini-driver"}, + {1, 1, NBL_ETH_SUPPORT_NRZ_RS_FEC_544, "nrz-rs-fec-544"}, + {1, 1, NBL_HIGH_THROUGHPUT_CAP, "high-throughput"}, }; #define NBL_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(nbl_gstrings_priv_flags) @@ -130,11 +170,13 @@ static void nbl_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *d struct nbl_adapter *adapter; struct nbl_service_mgt *serv_mgt; struct nbl_dispatch_ops *disp_ops; + struct nbl_netdev_priv *priv; struct nbl_driver_info driver_info; char firmware_version[ETHTOOL_FWVERS_LEN] = {' '}; memset(&driver_info, 0, sizeof(driver_info)); + priv = netdev_priv(netdev); adapter = NBL_NETDEV_TO_ADAPTER(netdev); serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); @@ -145,8 +187,12 @@ static void nbl_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *d else strscpy(drvinfo->version, NBL_DRIVER_VERSION, sizeof(drvinfo->version)); strscpy(drvinfo->fw_version, firmware_version, sizeof(drvinfo->fw_version)); - strscpy(drvinfo->driver, NBL_DRIVER_NAME, sizeof(drvinfo->driver)); - strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); + if (!priv->rep) { + strscpy(drvinfo->driver, NBL_DRIVER_NAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); + } else { + strscpy(drvinfo->driver, NBL_REP_DRIVER_NAME, sizeof(drvinfo->driver)); + } drvinfo->regdump_len = 0; } @@ -158,29 +204,52 @@ static void nbl_stats_fill_strings(struct net_device *netdev, u8 *data) struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - struct nbl_serv_ring_vsi_info *vsi_info; + struct nbl_serv_ring_vsi_info *vsi_info, *xdp_vsi_info; u8 *p = (char *)data; unsigned int i; + u32 xdp_ring_num = 0; vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + xdp_vsi_info = &ring_mgt->vsi_info[NBL_VSI_XDP]; - for (i = 0; i < NBL_GLOBAL_STATS_LEN; i++) - ethtool_sprintf(&p, nbl_gstrings_stats[i].stat_string); + for (i = 0; i < NBL_GLOBAL_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", nbl_gstrings_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } for (i = 0; i < vsi_info->active_ring_num; i++) { - ethtool_sprintf(&p, "tx_queue_%u_packets", i); - ethtool_sprintf(&p, "tx_queue_%u_bytes", i); - ethtool_sprintf(&p, "tx_queue_%u_descs", i); - ethtool_sprintf(&p, "tx_queue_%u_dvn_pkt_drop_cnt", i); - ethtool_sprintf(&p, "tx_queue_%u_tx_timeout_cnt", i); + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_descs", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_tx_timeout_cnt", i); + p += ETH_GSTRING_LEN; } for (i = 0; i < vsi_info->active_ring_num; i++) { - ethtool_sprintf(&p, "rx_queue_%u_packets", i); - ethtool_sprintf(&p, "rx_queue_%u_bytes", i); - ethtool_sprintf(&p, "rx_queue_%u_descs", i); - ethtool_sprintf(&p, "rx_queue_%u_uvn_stat_pkt_drop", i); + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_descs", i); + p += ETH_GSTRING_LEN; } + + if (xdp_vsi_info) + xdp_ring_num = xdp_vsi_info->ring_num < num_online_cpus() ? + xdp_vsi_info->ring_num : num_online_cpus(); + + for (i = 0; i < xdp_ring_num; i++) { + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_xdp_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_xdp_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_xdp_descs", i); + p += ETH_GSTRING_LEN; + } + if (!common->is_vf) disp_ops->fill_private_stat_strings(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), p); } @@ -201,7 +270,8 @@ static void nbl_priv_flags_fill_strings(struct net_device *netdev, u8 *data) capability_type)) continue; } - ethtool_sprintf(&p, nbl_gstrings_priv_flags[i].flag_name); + snprintf(p, ETH_GSTRING_LEN, "%s", nbl_gstrings_priv_flags[i].flag_name); + p += ETH_GSTRING_LEN; } } @@ -229,17 +299,25 @@ static int nbl_sset_fill_count(struct net_device *netdev) struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - struct nbl_serv_ring_vsi_info *vsi_info; - u32 total_queues, private_len = 0, extra_per_queue_entry = 0; + struct nbl_serv_ring_vsi_info *vsi_info, *xdp_vsi_info; + u32 total_queues = 0, private_len = 0, extra_per_queue_entry = 0; + u32 xdp_queue_num = 0; vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + xdp_vsi_info = &ring_mgt->vsi_info[NBL_VSI_XDP]; total_queues = vsi_info->active_ring_num * 2; if (!common->is_vf) disp_ops->get_private_stat_len(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &private_len); - /* For dvn drop and tx_timeout */ - extra_per_queue_entry = total_queues + vsi_info->active_ring_num; + /* For tx_timeout */ + extra_per_queue_entry = vsi_info->active_ring_num; + + /* xdp queue stat */ + if (xdp_vsi_info) + xdp_queue_num = xdp_vsi_info->ring_num < num_online_cpus() ? + xdp_vsi_info->ring_num : num_online_cpus(); + total_queues += xdp_queue_num; return NBL_GLOBAL_STATS_LEN + total_queues * (sizeof(struct nbl_queue_stats) / sizeof(u64)) + @@ -297,17 +375,19 @@ static void nbl_serv_adjust_interrpt_param(struct nbl_service_mgt *serv_mgt, boo struct nbl_serv_ring_mgt *ring_mgt; struct nbl_dispatch_ops *disp_ops; struct net_device *netdev; + struct nbl_netdev_priv *net_priv; struct nbl_serv_ring_vsi_info *vsi_info; u64 last_tx_packets; u64 last_rx_packets; u64 last_get_stats_jiffies, time_diff; u64 tx_packets, rx_packets; - u64 tx_rates, rx_rates, pkt_rates; + u64 tx_rates, rx_rates, pkt_rates, normalized_pkt_rates; u16 local_vector_id, vector_num; u16 intr_suppress_level; net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); netdev = net_resource_mgt->netdev; + net_priv = netdev_priv(netdev); ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; @@ -315,22 +395,27 @@ static void nbl_serv_adjust_interrpt_param(struct nbl_service_mgt *serv_mgt, boo last_tx_packets = net_resource_mgt->stats.tx_packets; last_rx_packets = net_resource_mgt->stats.rx_packets; last_get_stats_jiffies = net_resource_mgt->get_stats_jiffies; + time_diff = jiffies - last_get_stats_jiffies; disp_ops->get_net_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &net_resource_mgt->stats); /* ethtool -S don't adaptive interrupt suppression param */ - if (!vsi_info->itr_dynamic || ethtool) + if (!vsi_info->itr_dynamic || ethtool || !time_diff) return; tx_packets = net_resource_mgt->stats.tx_packets; rx_packets = net_resource_mgt->stats.rx_packets; - time_diff = jiffies - last_get_stats_jiffies; net_resource_mgt->get_stats_jiffies = jiffies; tx_rates = (tx_packets - last_tx_packets) / time_diff * HZ; rx_rates = (rx_packets - last_rx_packets) / time_diff * HZ; pkt_rates = max_t(u64, tx_rates, rx_rates); + if (netdev->mtu < ETH_DATA_LEN) + normalized_pkt_rates = pkt_rates; + else + normalized_pkt_rates = (netdev->mtu / ETH_DATA_LEN) * pkt_rates; intr_suppress_level = - disp_ops->get_intr_suppress_level(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), pkt_rates, + disp_ops->get_intr_suppress_level(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + normalized_pkt_rates, ring_mgt->vectors->intr_suppress_level); if (intr_suppress_level != ring_mgt->vectors->intr_suppress_level) { local_vector_id = ring_mgt->vectors[vsi_info->ring_offset].local_vector_id; @@ -342,34 +427,106 @@ static void nbl_serv_adjust_interrpt_param(struct nbl_service_mgt *serv_mgt, boo } } -void nbl_serv_update_stats(struct nbl_service_mgt *serv_mgt, bool ethtool) +static int nbl_serv_update_hw_stats(struct nbl_service_mgt *serv_mgt, + u64 last_rx_packets, u64 rx_packets) { - struct nbl_serv_net_resource_mgt *net_resource_mgt; - struct net_device *netdev; - struct nbl_adapter *adapter; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct net_device *netdev = net_resource_mgt->netdev; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + u16 vsi_id = NBL_COMMON_TO_VSI_ID(common); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + u32 *uvn_stat_pkt_drop = NULL; + u64 rx_rates; + u64 time_diff; + int i = 0; + int ret = 0; - net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - netdev = net_resource_mgt->netdev; - adapter = NBL_NETDEV_TO_ADAPTER(netdev); + if (time_after(jiffies, + net_resource_mgt->hw_stats_jiffies + net_resource_mgt->hw_stats_period)) { + time_diff = jiffies - net_resource_mgt->hw_stats_jiffies; + rx_rates = (rx_packets - last_rx_packets) / time_diff * HZ; + net_resource_mgt->hw_stats_jiffies = jiffies; + if (!common->is_vf || rx_rates > NBL_HW_STATS_RX_RATE_THRESHOLD) { + uvn_stat_pkt_drop = devm_kcalloc(dev, vsi_info->ring_num, + sizeof(*uvn_stat_pkt_drop), GFP_KERNEL); + if (!uvn_stat_pkt_drop) { + ret = -ENOMEM; + goto alloc_uvn_stat_pkt_drop_fail; + } + ret = disp_ops->get_uvn_pkt_drop_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, vsi_info->ring_num, + uvn_stat_pkt_drop); + if (ret) + goto get_uvn_pkt_drop_stats_fail; + for (i = 0; i < vsi_info->ring_num; i++) + net_resource_mgt->hw_stats.total_uvn_stat_pkt_drop[i] += + uvn_stat_pkt_drop[i]; + } + } + + if (!common->is_vf && adapter->init_param.caps.has_ctrl) { + ret = disp_ops->get_ustore_pkt_drop_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + goto get_ustore_pkt_drop_stats_fail; + } + if (uvn_stat_pkt_drop) { + devm_kfree(dev, uvn_stat_pkt_drop); + uvn_stat_pkt_drop = NULL; + } + return 0; + +get_ustore_pkt_drop_stats_fail: +get_uvn_pkt_drop_stats_fail: + if (uvn_stat_pkt_drop) { + devm_kfree(dev, uvn_stat_pkt_drop); + uvn_stat_pkt_drop = NULL; + } +alloc_uvn_stat_pkt_drop_fail: + return ret; +} + +void nbl_serv_update_stats(struct nbl_service_mgt *serv_mgt, bool ethtool) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_eth_abnormal_stats eth_abnormal_stats = { 0 }; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct net_device *netdev = net_resource_mgt->netdev; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + u64 last_rx_packets = 0; + int ret = 0; if (!test_bit(NBL_RUNNING, adapter->state) || test_bit(NBL_RESETTING, adapter->state)) return; + last_rx_packets = net_resource_mgt->stats.rx_packets; nbl_serv_adjust_interrpt_param(serv_mgt, ethtool); netdev->stats.tx_packets = net_resource_mgt->stats.tx_packets; netdev->stats.tx_bytes = net_resource_mgt->stats.tx_bytes; - netdev->stats.rx_packets = net_resource_mgt->stats.rx_packets; netdev->stats.rx_bytes = net_resource_mgt->stats.rx_bytes; + if (!common->is_vf) + disp_ops->get_eth_abnormal_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(common), ð_abnormal_stats); + + ret = nbl_serv_update_hw_stats(serv_mgt, last_rx_packets, + net_resource_mgt->stats.rx_packets); + /* net_device_stats */ + netdev->stats.multicast = 0; netdev->stats.rx_errors = 0; netdev->stats.tx_errors = 0; + netdev->stats.rx_length_errors = eth_abnormal_stats.rx_length_errors; + netdev->stats.rx_crc_errors = eth_abnormal_stats.rx_crc_errors; + netdev->stats.rx_frame_errors = eth_abnormal_stats.rx_frame_errors; netdev->stats.rx_dropped = 0; netdev->stats.tx_dropped = 0; - netdev->stats.multicast = 0; - netdev->stats.rx_length_errors = 0; } static void @@ -383,27 +540,23 @@ nbl_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u6 struct rtnl_link_stats64 temp_stats; struct rtnl_link_stats64 *net_stats; struct nbl_stats *nbl_stats; - struct nbl_priv_stats *nbl_priv_stats; struct nbl_queue_stats queue_stats = { 0 }; struct nbl_queue_err_stats queue_err_stats = { 0 }; - struct nbl_serv_ring_vsi_info *vsi_info; + struct nbl_serv_ring_vsi_info *vsi_info, *xdp_vsi_info; u32 private_len = 0; + u32 xdp_ring_num = 0; char *p = NULL; int i, j, k; vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + xdp_vsi_info = &ring_mgt->vsi_info[NBL_VSI_XDP]; nbl_serv_update_stats(serv_mgt, true); net_stats = dev_get_stats(netdev, &temp_stats); nbl_stats = (struct nbl_stats *)((char *)net_resource_mgt + offsetof(struct nbl_serv_net_resource_mgt, stats)); - nbl_priv_stats = (struct nbl_priv_stats *)((char *)net_resource_mgt + - offsetof(struct nbl_serv_net_resource_mgt, priv_stats)); - i = NBL_GLOBAL_STATS_LEN; - nbl_priv_stats->total_dvn_pkt_drop_cnt = 0; - nbl_priv_stats->total_uvn_stat_pkt_drop = 0; for (j = 0; j < vsi_info->active_ring_num; j++) { disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), j, &queue_stats, true); @@ -412,10 +565,8 @@ nbl_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u6 data[i] = queue_stats.packets; data[i + 1] = queue_stats.bytes; data[i + 2] = queue_stats.descs; - data[i + 3] = queue_err_stats.dvn_pkt_drop_cnt; - data[i + 4] = ring_mgt->tx_rings[vsi_info->ring_offset + j].tx_timeout_count; - nbl_priv_stats->total_dvn_pkt_drop_cnt += queue_err_stats.dvn_pkt_drop_cnt; - i += 5; + data[i + 3] = ring_mgt->tx_rings[vsi_info->ring_offset + j].tx_timeout_count; + i += 4; } for (j = 0; j < vsi_info->active_ring_num; j++) { @@ -426,9 +577,20 @@ nbl_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u6 data[i] = queue_stats.packets; data[i + 1] = queue_stats.bytes; data[i + 2] = queue_stats.descs; - data[i + 3] = queue_err_stats.uvn_stat_pkt_drop; - nbl_priv_stats->total_uvn_stat_pkt_drop += queue_err_stats.uvn_stat_pkt_drop; - i += 4; + i += 3; + } + + if (xdp_vsi_info) + xdp_ring_num = xdp_vsi_info->ring_num < num_online_cpus() ? + xdp_vsi_info->ring_num : num_online_cpus(); + + for (j = 0; j < xdp_ring_num; j++) { + disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + ring_mgt->xdp_ring_offset + j, &queue_stats, true); + data[i] = queue_stats.packets; + data[i + 1] = queue_stats.bytes; + data[i + 2] = queue_stats.descs; + i += 3; } for (k = 0; k < NBL_GLOBAL_STATS_LEN; k++) { @@ -439,9 +601,6 @@ nbl_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u6 case NBL_STATS: p = (char *)nbl_stats + nbl_gstrings_stats[k].stat_offset; break; - case NBL_PRIV_STATS: - p = (char *)nbl_priv_stats + nbl_gstrings_stats[k].stat_offset; - break; default: data[k] = 0; continue; @@ -521,6 +680,7 @@ static int nbl_set_channels(struct net_device *netdev, struct ethtool_channels * { struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_tc_mgt *tc_mgt = NBL_SERV_MGT_TO_TC_MGT(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_common_info *common = NBL_NETDEV_TO_COMMON(netdev); struct nbl_serv_ring_vsi_info *vsi_info; @@ -528,6 +688,11 @@ static int nbl_set_channels(struct net_device *netdev, struct ethtool_channels * vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + if (tc_mgt->num_tc) { + netdev_info(netdev, "Cannot set channels since mqprio is enabled.\n"); + return -EINVAL; + } + /* We don't support separate rx/tx channels. * We don't allow setting 'other' channels. */ @@ -539,11 +704,12 @@ static int nbl_set_channels(struct net_device *netdev, struct ethtool_channels * vsi_info->active_ring_num = queue_pairs; + nbl_serv_cpu_affinity_init(serv_mgt, queue_pairs); netif_set_real_num_tx_queues(netdev, queue_pairs); netif_set_real_num_rx_queues(netdev, queue_pairs); disp_ops->setup_cqs(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_VSI_ID(common), queue_pairs); + NBL_COMMON_TO_VSI_ID(common), queue_pairs, true); return 0; } @@ -553,6 +719,168 @@ static u32 nbl_get_link(struct net_device *netdev) return netif_carrier_ok(netdev) ? 1 : 0; } +struct nbl_ethtool_link_ext_state_opcode_mapping { + u32 status_opcode; + enum ethtool_link_ext_state link_ext_state; + u8 link_ext_substate; +}; + +static const struct nbl_ethtool_link_ext_state_opcode_mapping nbl_link_ext_state_opcode_map[] = { + /* States relating to the autonegotiation or issues therein */ + {10, ETHTOOL_LINK_EXT_STATE_AUTONEG, 0}, + {11, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED}, + {12, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED}, + {13, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED}, + {14, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE}, + {15, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE}, + {16, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD}, + + /* Failure during link training */ + {20, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, 0}, + {21, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED}, + {22, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT}, + {23, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY}, + {24, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT}, + + /* Logical mismatch in physical coding sublayer or forward error correction sublayer */ + {30, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, 0}, + {31, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK}, + {32, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK}, + {33, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS}, + {34, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED}, + {35, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED}, + + /* Signal integrity issues */ + {40, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, 0}, + {41, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS}, + {42, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE}, + + {43, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST}, + {44, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS}, + + /* No cable connected */ + {50, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0}, + + /* Failure is related to cable, e.g., unsupported cable */ + {60, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, 0}, + {61, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {62, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE}, + + /* Failure is related to EEPROM, e.g., failure during reading or parsing the data */ + {70, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0}, + + /* Failure during calibration algorithm */ + {80, ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE, 0}, + + /* The hardware is not able to provide the power required from cable or module */ + {90, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0}, + + /* The module is overheated */ + {100, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0}, + + /* module */ + {110, ETHTOOL_LINK_EXT_STATE_MODULE, 0}, + {111, ETHTOOL_LINK_EXT_STATE_MODULE, ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY}, +}; + +static void nbl_set_link_ext_state(struct nbl_ethtool_link_ext_state_opcode_mapping + link_ext_state_mapping, + struct ethtool_link_ext_state_info *link_ext_state_info) +{ + switch (link_ext_state_mapping.link_ext_state) { + case ETHTOOL_LINK_EXT_STATE_AUTONEG: + link_ext_state_info->autoneg = link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE: + link_ext_state_info->link_training = link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH: + link_ext_state_info->link_logical_mismatch = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY: + link_ext_state_info->bad_signal_integrity = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE: + link_ext_state_info->cable_issue = link_ext_state_mapping.link_ext_substate; + break; + default: + break; + } + + link_ext_state_info->link_ext_state = link_ext_state_mapping.link_ext_state; +} + +static int nbl_get_link_ext_state(struct net_device *netdev, + struct ethtool_link_ext_state_info *link_ext_state_info) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_NETDEV_TO_COMMON(netdev); + struct nbl_ethtool_link_ext_state_opcode_mapping link_ext_state_mapping; + u32 status_opcode = 0; + int i = 0; + int ret = 0; + + if (netif_carrier_ok(netdev)) + return -ENODATA; + + ret = disp_ops->get_link_status_opcode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(common), &status_opcode); + if (ret) { + netdev_err(netdev, "Get link stats opcode failed %d\n", ret); + return ret; + } + + for (i = 0; i < ARRAY_SIZE(nbl_link_ext_state_opcode_map); i++) { + link_ext_state_mapping = nbl_link_ext_state_opcode_map[i]; + if (link_ext_state_mapping.status_opcode == status_opcode) { + nbl_set_link_ext_state(link_ext_state_mapping, link_ext_state_info); + return 0; + } + } + + return -ENODATA; +} + +static void nbl_get_link_ext_stats(struct net_device *netdev, struct ethtool_link_ext_stats *stats) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u8 eth_id = NBL_COMMON_TO_ETH_ID(serv_mgt->common); + u64 link_down_count = 0; + int ret = 0; + + ret = disp_ops->get_link_down_count(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, &link_down_count); + if (ret) + netdev_err(netdev, "Get link down count failed %d\n", ret); + else + stats->link_down_events = link_down_count; +} + static void nbl_link_modes_to_ethtool(u64 modes, unsigned long *ethtool_modes_map) { if (modes & BIT(NBL_PORT_CAP_AUTONEG)) @@ -581,7 +909,7 @@ static void nbl_link_modes_to_ethtool(u64 modes, unsigned long *ethtool_modes_ma if (modes & BIT(NBL_PORT_CAP_1000BASE_X)) __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, ethtool_modes_map); if (modes & BIT(NBL_PORT_CAP_10GBASE_T)) - __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, ethtool_modes_map); + __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, ethtool_modes_map); if (modes & BIT(NBL_PORT_CAP_10GBASE_KR)) __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, ethtool_modes_map); if (modes & BIT(NBL_PORT_CAP_10GBASE_SR)) @@ -628,17 +956,35 @@ static void nbl_link_modes_to_ethtool(u64 modes, unsigned long *ethtool_modes_ma __set_bit(ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, ethtool_modes_map); } +static int nbl_serv_get_port_state(struct nbl_service_mgt *serv_mgt, + struct nbl_port_state *port_state) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + int ret; + + ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(serv_mgt->common), port_state); + + if (port_state->module_repluged) + net_resource_mgt->configured_fec = 0; + + return ret; +} + static int nbl_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); struct nbl_port_state port_state = {0}; u32 advertising_speed = 0; int ret = 0; - ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_ETH_ID(serv_mgt->common), &port_state); + if (test_bit(NBL_FATAL_ERR, adapter->state)) + return -EIO; + + ret = nbl_serv_get_port_state(serv_mgt, &port_state); if (ret) { netdev_err(netdev, "Get port_state failed %d\n", ret); return -EIO; @@ -656,13 +1002,13 @@ static int nbl_get_ksettings(struct net_device *netdev, struct ethtool_link_kset if (port_state.link_state) { cmd->base.speed = port_state.link_speed; cmd->base.duplex = DUPLEX_FULL; + advertising_speed = port_state.link_speed; } else { cmd->base.speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN; - } - - advertising_speed = net_resource_mgt->configured_speed ? + advertising_speed = net_resource_mgt->configured_speed ? net_resource_mgt->configured_speed : cmd->base.speed; + } switch (port_state.port_type) { case NBL_PORT_TYPE_UNKNOWN: @@ -757,8 +1103,10 @@ static int nbl_set_ksettings(struct net_device *netdev, const struct ethtool_lin struct nbl_service_mgt *serv_mgt; struct nbl_serv_net_resource_mgt *net_resource_mgt; struct nbl_dispatch_ops *disp_ops; + struct nbl_phy_caps *phy_caps; struct nbl_port_state port_state = {0}; struct nbl_port_advertising port_advertising = {0}; + u32 autoneg = 0; u32 speed, fw_speed, module_speed, max_speed; u64 speed_advert = 0; u8 active_fec = 0; @@ -767,9 +1115,9 @@ static int nbl_set_ksettings(struct net_device *netdev, const struct ethtool_lin serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + phy_caps = &net_resource_mgt->phy_caps; - ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_ETH_ID(serv_mgt->common), &port_state); + ret = nbl_serv_get_port_state(serv_mgt, &port_state); if (ret) { netdev_err(netdev, "Get port_state failed %d\n", ret); return -EIO; @@ -792,11 +1140,25 @@ static int nbl_set_ksettings(struct net_device *netdev, const struct ethtool_lin return -EOPNOTSUPP; } + autoneg = (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + if (cmd->base.autoneg == autoneg && cmd->base.speed == port_state.link_speed && + port_state.link_state) { + netdev_info(netdev, "eth configure is not changed\n"); + return 0; + } + + if (autoneg == AUTONEG_ENABLE && cmd->base.autoneg == autoneg) { + netdev_err(netdev, "unsupport to change eth configure when autoneg\n"); + return -EOPNOTSUPP; + } + speed = cmd->base.speed; fw_speed = nbl_conver_fw_rate_to_speed(port_state.fw_port_max_speed); module_speed = nbl_conver_portrate_to_speed(port_state.port_max_rate); max_speed = fw_speed > module_speed ? module_speed : fw_speed; - if (speed == SPEED_UNKNOWN) + if (speed == SPEED_UNKNOWN || cmd->base.autoneg) speed = max_speed; if (speed > max_speed) { @@ -811,10 +1173,7 @@ static int nbl_set_ksettings(struct net_device *netdev, const struct ethtool_lin return -EINVAL; } - if (cmd->base.autoneg) - speed = max_speed; - - if (cmd->base.autoneg) { + if (cmd->base.autoneg || port_state.port_caps & BIT(NBL_PORT_CAP_FEC_AUTONEG)) { switch (net_resource_mgt->configured_fec) { case ETHTOOL_FEC_OFF: active_fec = NBL_PORT_FEC_OFF; @@ -877,16 +1236,24 @@ static void nbl_get_ringparam(struct net_device *netdev, struct ethtool_ringpara struct kernel_ethtool_ringparam *k_ringparam, struct netlink_ext_ack *extack) { + struct nbl_netdev_priv *priv = netdev_priv(netdev); struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dispatch_mgt *disp_mgt = NBL_ADAPTER_TO_DISP_MGT(adapter); struct nbl_dispatch_ops *disp_ops = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter)->ops; u16 max_desc_num; - max_desc_num = disp_ops->get_max_desc_num(disp_mgt); - ringparam->tx_max_pending = max_desc_num; - ringparam->rx_max_pending = max_desc_num; - ringparam->tx_pending = disp_ops->get_tx_desc_num(disp_mgt, 0); - ringparam->rx_pending = disp_ops->get_rx_desc_num(disp_mgt, 0); + if (!priv->rep) { + max_desc_num = disp_ops->get_max_desc_num(disp_mgt); + ringparam->tx_max_pending = max_desc_num; + ringparam->rx_max_pending = max_desc_num; + ringparam->tx_pending = disp_ops->get_tx_desc_num(disp_mgt, 0); + ringparam->rx_pending = disp_ops->get_rx_desc_num(disp_mgt, 0); + } else { + ringparam->tx_max_pending = NBL_REP_QUEUE_MGT_DESC_MAX; + ringparam->rx_max_pending = NBL_REP_QUEUE_MGT_DESC_MAX; + ringparam->tx_pending = NBL_REP_QUEUE_MGT_DESC_NUM; + ringparam->rx_pending = NBL_REP_QUEUE_MGT_DESC_NUM; + } } static int nbl_check_set_ringparam(struct net_device *netdev, @@ -965,6 +1332,7 @@ static int nbl_set_ringparam(struct net_device *netdev, struct ethtool_ringparam struct nbl_serv_ring_vsi_info *vsi_info; u16 max_desc_num, min_desc_num; u16 new_tx_count, new_rx_count; + u16 old_tx_count, old_rx_count; int was_running; int i; int err; @@ -981,6 +1349,8 @@ static int nbl_set_ringparam(struct net_device *netdev, struct ethtool_ringparam if (err <= 0) return err; + old_tx_count = ring_mgt->tx_desc_num; + old_rx_count = ring_mgt->rx_desc_num; new_tx_count = ringparam->tx_pending; new_rx_count = ringparam->rx_pending; @@ -990,7 +1360,7 @@ static int nbl_set_ringparam(struct net_device *netdev, struct ethtool_ringparam if (was_running) { err = nbl_serv_netdev_stop(netdev); - if (err) { + if (err && err != -EBUSY) { netdev_err(netdev, "Netdev stop failed while setting ringparam\n"); clear_bit(NBL_RESETTING, adapter->state); return err; @@ -1011,6 +1381,8 @@ static int nbl_set_ringparam(struct net_device *netdev, struct ethtool_ringparam if (err) { netdev_err(netdev, "Netdev open failed after setting ringparam\n"); clear_bit(NBL_RESETTING, adapter->state); + ring_mgt->tx_desc_num = old_tx_count; + ring_mgt->rx_desc_num = old_rx_count; return err; } } @@ -1020,45 +1392,1057 @@ static int nbl_set_ringparam(struct net_device *netdev, struct ethtool_ringparam return 0; } +static int nbl_fd_translate_cls_rule(u16 type, u16 length, u8 *val, void *data) +{ + struct ethtool_rxnfc *cmd = (struct ethtool_rxnfc *)(data); + struct ethtool_rx_flow_spec *fs = &cmd->fs; + u64 udf_val, udf_mask; + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + u16 ring, vf, vsi; + + switch (type) { + case NBL_CHAN_FDIR_KEY_SRC_MAC: + ether_addr_copy(fs->h_u.ether_spec.h_source, val); + ether_addr_copy(fs->m_u.ether_spec.h_source, val + 6); + break; + case NBL_CHAN_FDIR_KEY_DST_MAC: + if (flow_type == ETHER_FLOW) { + ether_addr_copy(fs->h_u.ether_spec.h_dest, val); + ether_addr_copy(fs->m_u.ether_spec.h_dest, val + 6); + } else { + ether_addr_copy(fs->h_ext.h_dest, val); + ether_addr_copy(fs->m_ext.h_dest, val + 6); + fs->flow_type |= FLOW_MAC_EXT; + } + break; + case NBL_CHAN_FDIR_KEY_PROTO: + if (flow_type == ETHER_FLOW) { + fs->h_u.ether_spec.h_proto = *(u16 *)val; + fs->m_u.ether_spec.h_proto = *(u16 *)(val + 2); + } + break; + case NBL_CHAN_FDIR_KEY_SRC_IPv4: + if (flow_type == IPV4_USER_FLOW) { + fs->h_u.usr_ip4_spec.ip4src = *(u32 *)val; + fs->m_u.usr_ip4_spec.ip4src = *(u32 *)(val + 4); + } else { + fs->h_u.tcp_ip4_spec.ip4src = *(u32 *)val; + fs->m_u.tcp_ip4_spec.ip4src = *(u32 *)(val + 4); + } + break; + case NBL_CHAN_FDIR_KEY_DST_IPv4: + if (flow_type == IPV4_USER_FLOW) { + fs->h_u.usr_ip4_spec.ip4dst = *(u32 *)val; + fs->m_u.usr_ip4_spec.ip4dst = *(u32 *)(val + 4); + } else { + fs->h_u.tcp_ip4_spec.ip4dst = *(u32 *)val; + fs->m_u.tcp_ip4_spec.ip4dst = *(u32 *)(val + 4); + } + break; + case NBL_CHAN_FDIR_KEY_L4PROTO: + if (flow_type == IPV4_USER_FLOW) { + fs->h_u.usr_ip4_spec.proto = *(u8 *)val; + fs->m_u.usr_ip4_spec.proto = *(u8 *)(val + 1); + } else if (flow_type == IPV6_USER_FLOW) { + fs->h_u.usr_ip6_spec.l4_proto = *(u8 *)val; + fs->m_u.usr_ip6_spec.l4_proto = *(u8 *)(val + 1); + } + break; + case NBL_CHAN_FDIR_KEY_SRC_IPv6: + if (flow_type == IPV6_USER_FLOW) { + memcpy(&fs->h_u.usr_ip6_spec.ip6src, val, + sizeof(fs->h_u.usr_ip6_spec.ip6src)); + memcpy(&fs->m_u.usr_ip6_spec.ip6src, val + 16, + sizeof(fs->m_u.usr_ip6_spec.ip6src)); + } else { + memcpy(&fs->h_u.tcp_ip6_spec.ip6src, val, + sizeof(fs->h_u.tcp_ip6_spec.ip6src)); + memcpy(&fs->m_u.tcp_ip6_spec.ip6src, val + 16, + sizeof(fs->m_u.tcp_ip6_spec.ip6src)); + } + break; + case NBL_CHAN_FDIR_KEY_DST_IPv6: + if (flow_type == IPV6_USER_FLOW) { + memcpy(&fs->h_u.usr_ip6_spec.ip6dst, val, + sizeof(fs->h_u.usr_ip6_spec.ip6dst)); + memcpy(&fs->m_u.usr_ip6_spec.ip6dst, val + 16, + sizeof(fs->m_u.usr_ip6_spec.ip6dst)); + } else { + memcpy(&fs->h_u.tcp_ip6_spec.ip6dst, val, + sizeof(fs->h_u.tcp_ip6_spec.ip6dst)); + memcpy(&fs->m_u.tcp_ip6_spec.ip6dst, val + 16, + sizeof(fs->m_u.tcp_ip6_spec.ip6dst)); + } + break; + case NBL_CHAN_FDIR_KEY_SPORT: + if (flow_type == TCP_V4_FLOW || flow_type == UDP_V4_FLOW) { + fs->h_u.tcp_ip4_spec.psrc = *(u16 *)val; + fs->m_u.tcp_ip4_spec.psrc = *(u16 *)(val + 2); + } else if (flow_type == TCP_V6_FLOW || flow_type == UDP_V6_FLOW) { + fs->h_u.tcp_ip6_spec.psrc = *(u16 *)val; + fs->m_u.tcp_ip6_spec.psrc = *(u16 *)(val + 2); + } + break; + case NBL_CHAN_FDIR_KEY_DPORT: + if (flow_type == TCP_V4_FLOW || flow_type == UDP_V4_FLOW) { + fs->h_u.tcp_ip4_spec.pdst = *(u16 *)val; + fs->m_u.tcp_ip4_spec.pdst = *(u16 *)(val + 2); + } else if (flow_type == TCP_V6_FLOW || flow_type == UDP_V6_FLOW) { + fs->h_u.tcp_ip6_spec.pdst = *(u16 *)val; + fs->m_u.tcp_ip6_spec.pdst = *(u16 *)(val + 2); + } + break; + case NBL_CHAN_FDIR_KEY_UDF: + udf_val = cpu_to_be64p((u64 *)val); + udf_mask = cpu_to_be64p((u64 *)(val + 8)); + + memcpy(fs->h_ext.data, &udf_val, sizeof(udf_val)); + memcpy(fs->m_ext.data, &udf_mask, sizeof(udf_mask)); + fs->flow_type |= FLOW_EXT; + break; + case NBL_CHAN_FDIR_ACTION_QUEUE: + ring = *(u16 *)val; + vf = *(u16 *)(val + 2); + fs->ring_cookie = (u64)ring | (u64)vf << ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + break; + case NBL_CHAN_FDIR_ACTION_VSI: + vsi = *(u16 *)(val + 4); + if (vsi == 0xFFFF) + fs->ring_cookie = RX_CLS_FLOW_DISC; + break; + default: + break; + } + + return 0; +} + +static void nbl_fd_flow_type_translate(enum nbl_chan_fdir_flow_type flow_type, + struct ethtool_rxnfc *cmd) +{ + switch (flow_type) { + case NBL_CHAN_FDIR_FLOW_FULL: + case NBL_CHAN_FDIR_FLOW_ETHER: + cmd->fs.flow_type = ETHER_FLOW; + break; + case NBL_CHAN_FDIR_FLOW_IPv4: + cmd->fs.flow_type = IPV4_USER_FLOW; + cmd->fs.h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + cmd->fs.m_u.usr_ip4_spec.ip_ver = 0xFF; + break; + case NBL_CHAN_FDIR_FLOW_IPv6: + cmd->fs.flow_type = IPV6_USER_FLOW; + break; + case NBL_CHAN_FDIR_FLOW_TCP_IPv4: + cmd->fs.flow_type = TCP_V4_FLOW; + break; + case NBL_CHAN_FDIR_FLOW_TCP_IPv6: + cmd->fs.flow_type = TCP_V6_FLOW; + break; + case NBL_CHAN_FDIR_FLOW_UDP_IPv4: + cmd->fs.flow_type = UDP_V4_FLOW; + break; + case NBL_CHAN_FDIR_FLOW_UDP_IPv6: + cmd->fs.flow_type = UDP_V6_FLOW; + break; + default: + break; + } +} + +static int nbl_get_rss_hash_opt(struct net_device *netdev, struct ethtool_rxnfc *nfc) +{ + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + break; + default: + return -EOPNOTSUPP; + } + + nfc->data = 0; + nfc->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; + + return 0; +} + +static int nbl_set_rss_hash_opt(struct net_device *netdev, struct ethtool_rxnfc *nfc) +{ + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + break; + default: + return -EOPNOTSUPP; + } + + if (nfc->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return 0; + else + return -EOPNOTSUPP; +} + static int nbl_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_serv_ring_vsi_info *vsi_info; - int ret = -EOPNOTSUPP; + struct nbl_chan_param_fdir_replace *info; + struct nbl_chan_param_get_fd_flow_all param; + u32 *locs_tmp = NULL; + int ret = 0, start = 0, num = 0, total_num = 0, i; vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: cmd->data = vsi_info->active_ring_num; - ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + ret = disp_ops->get_fd_flow_cnt(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_CHAN_FDIR_RULE_NORMAL, + NBL_COMMON_TO_VSI_ID(common)); + if (ret < 0) + return ret; + + cmd->rule_cnt = ret; + return 0; + case ETHTOOL_GRXCLSRULE: + info = kzalloc(NBL_CHAN_FDIR_FLOW_RULE_SIZE, GFP_KERNEL); + if (!info) + return -ENOMEM; + ret = disp_ops->get_fd_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), + cmd->fs.location, + NBL_CHAN_FDIR_RULE_NORMAL, + info); + if (!ret) { + nbl_fd_flow_type_translate(info->flow_type, cmd); + cmd->fs.location = info->location; + nbl_flow_direct_parse_tlv_data(info->tlv, info->tlv_length, + nbl_fd_translate_cls_rule, cmd); + } + kfree(info); + break; + case ETHTOOL_GRXCLSRLALL: + total_num = cmd->rule_cnt; + + locs_tmp = kcalloc(NBL_CHAN_GET_FD_LOCS_MAX, sizeof(*locs_tmp), GFP_KERNEL); + if (!locs_tmp) + return -ENOMEM; + + while (total_num > 0) { + num = total_num > NBL_CHAN_GET_FD_LOCS_MAX ? NBL_CHAN_GET_FD_LOCS_MAX + : total_num; + param.rule_type = NBL_CHAN_FDIR_RULE_NORMAL; + param.start = start; + param.num = num; + param.vsi_id = NBL_COMMON_TO_VSI_ID(common); + ret = disp_ops->get_fd_flow_all(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + ¶m, locs_tmp); + if (ret) { + kfree(locs_tmp); + return ret; + } + + for (i = 0; i < num; i++) + rule_locs[start + i] = locs_tmp[i]; + + start += num; + total_num -= num; + } + + cmd->data = disp_ops->get_fd_flow_max(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + kfree(locs_tmp); + break; + case ETHTOOL_GRXFH: + ret = nbl_get_rss_hash_opt(netdev, cmd); break; default: + ret = -EOPNOTSUPP; break; } return ret; } -static u32 nbl_get_rxfh_indir_size(struct net_device *netdev) +static int nbl_format_flow_ext_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) { - struct nbl_service_mgt *serv_mgt; - struct nbl_dispatch_ops *disp_ops; - struct nbl_common_info *common; - u32 rxfh_indir_size = 0; + u64 udf_value = be64_to_cpup((__force __be64 *)fs->h_ext.data); + u64 udf_mask = be64_to_cpup((__force __be64 *)fs->m_ext.data); + u8 *tlv_start = info->tlv + *offset; + u16 tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 16; - serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + if (fs->m_ext.vlan_etype || fs->m_ext.vlan_tci) + return -EINVAL; - disp_ops->get_rxfh_indir_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_VSI_ID(common), &rxfh_indir_size); + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; - return rxfh_indir_size; + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_UDF; + *(u16 *)(tlv_start + 2) = 16; + memcpy(tlv_start + 4, &udf_value, sizeof(udf_value)); + memcpy(tlv_start + 12, &udf_mask, sizeof(udf_mask)); + *offset += tlv_length; + + return 0; } -static u32 nbl_get_rxfh_key_size(struct net_device *netdev) +static int nbl_format_flow_mac_ext_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + u8 *tlv_start = info->tlv + *offset; + u16 tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 2 * ETH_ALEN; + + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_MAC; + *(u16 *)(tlv_start + 2) = 2 * ETH_ALEN; + ether_addr_copy(tlv_start + 4, fs->h_ext.h_dest); + ether_addr_copy(tlv_start + 10, fs->m_ext.h_dest); + *offset += tlv_length; + + return 0; +} + +static int nbl_format_ether_flow_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + struct ethhdr *ether_spec = &fs->h_u.ether_spec; + struct ethhdr *ether_mask = &fs->m_u.ether_spec; + u8 *tlv_start; + u16 tlv_length; + bool valid = 0; + + if (!is_zero_ether_addr(ether_mask->h_dest)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 2 * ETH_ALEN; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_MAC; + *(u16 *)(tlv_start + 2) = 2 * ETH_ALEN; + ether_addr_copy(tlv_start + 4, ether_spec->h_dest); + ether_addr_copy(tlv_start + 10, ether_mask->h_dest); + *offset += tlv_length; + valid = 1; + } + + if (!is_zero_ether_addr(ether_mask->h_source)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 2 * ETH_ALEN; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SRC_MAC; + *(u16 *)(tlv_start + 2) = 2 * ETH_ALEN; + ether_addr_copy(tlv_start + 4, ether_spec->h_source); + ether_addr_copy(tlv_start + 10, ether_mask->h_source); + *offset += tlv_length; + valid = 1; + } + + if (ether_mask->h_proto) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = ether_spec->h_proto; + *(u16 *)(tlv_start + 6) = ether_mask->h_proto; + *offset += tlv_length; + valid = 1; + } + + if (!valid) + return -EINVAL; + + return 0; +} + +static int nbl_format_ipv4_flow_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + struct ethtool_usrip4_spec *usr_ip4_spec = &fs->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *usr_ip4_mask = &fs->m_u.usr_ip4_spec; + u8 *tlv_start; + u16 tlv_length; + + if (usr_ip4_mask->ip4src) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 8; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SRC_IPv4; + *(u16 *)(tlv_start + 2) = 8; + *(u32 *)(tlv_start + 4) = usr_ip4_spec->ip4src; + *(u32 *)(tlv_start + 8) = usr_ip4_mask->ip4src; + *offset += tlv_length; + } + + if (usr_ip4_mask->ip4dst) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 8; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_IPv4; + *(u16 *)(tlv_start + 2) = 8; + *(u32 *)(tlv_start + 4) = usr_ip4_spec->ip4dst; + *(u32 *)(tlv_start + 8) = usr_ip4_mask->ip4dst; + *offset += tlv_length; + } + + if (usr_ip4_mask->proto) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_L4PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u8 *)(tlv_start + 4) = usr_ip4_spec->proto; + *(u8 *)(tlv_start + 5) = usr_ip4_mask->proto; + *offset += tlv_length; + } + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = htons(ETH_P_IP); + *(u16 *)(tlv_start + 6) = 0xFFFF; + *offset += tlv_length; + return 0; +} + +static int nbl_format_ipv6_flow_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + struct ethtool_usrip6_spec *usr_ip6_spec = &fs->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *usr_ip6_mask = &fs->m_u.usr_ip6_spec; + u8 *tlv_start; + u16 tlv_length; + + if (!ipv6_addr_any((struct in6_addr *)usr_ip6_mask->ip6src)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 32; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SRC_IPv6; + *(u16 *)(tlv_start + 2) = 32; + memcpy(tlv_start + 4, usr_ip6_spec->ip6src, sizeof(usr_ip6_spec->ip6src)); + memcpy(tlv_start + 20, usr_ip6_mask->ip6src, sizeof(usr_ip6_mask->ip6src)); + *offset += tlv_length; + } + + if (!ipv6_addr_any((struct in6_addr *)usr_ip6_mask->ip6dst)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 32; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_IPv6; + *(u16 *)(tlv_start + 2) = 32; + memcpy(tlv_start + 4, usr_ip6_spec->ip6dst, sizeof(usr_ip6_spec->ip6dst)); + memcpy(tlv_start + 20, usr_ip6_mask->ip6dst, sizeof(usr_ip6_mask->ip6dst)); + *offset += tlv_length; + } + + if (usr_ip6_mask->l4_proto) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_L4PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u8 *)(tlv_start + 4) = usr_ip6_spec->l4_proto; + *(u8 *)(tlv_start + 5) = usr_ip6_mask->l4_proto; + *offset += tlv_length; + } + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = htons(ETH_P_IPV6); + *(u16 *)(tlv_start + 6) = 0xFFFF; + *offset += tlv_length; + return 0; +} + +static int nbl_format_tcpv4_flow_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + struct ethtool_tcpip4_spec *tcp_ip4_spec = &fs->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *tcp_ip4_mask = &fs->m_u.tcp_ip4_spec; + u8 *tlv_start; + u16 tlv_length; + + if (tcp_ip4_mask->ip4src) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 8; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SRC_IPv4; + *(u16 *)(tlv_start + 2) = 8; + *(u32 *)(tlv_start + 4) = tcp_ip4_spec->ip4src; + *(u32 *)(tlv_start + 8) = tcp_ip4_mask->ip4src; + *offset += tlv_length; + } + + if (tcp_ip4_mask->ip4dst) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 8; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_IPv4; + *(u16 *)(tlv_start + 2) = 8; + *(u32 *)(tlv_start + 4) = tcp_ip4_spec->ip4dst; + *(u32 *)(tlv_start + 8) = tcp_ip4_mask->ip4dst; + *offset += tlv_length; + } + + if (tcp_ip4_mask->psrc) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = tcp_ip4_spec->psrc; + *(u16 *)(tlv_start + 6) = tcp_ip4_mask->psrc; + *offset += tlv_length; + } + + if (tcp_ip4_mask->pdst) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = tcp_ip4_spec->pdst; + *(u16 *)(tlv_start + 6) = tcp_ip4_mask->pdst; + *offset += tlv_length; + } + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_L4PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u8 *)(tlv_start + 4) = IPPROTO_TCP; + *(u8 *)(tlv_start + 5) = 0xFF; + *offset += tlv_length; + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = htons(ETH_P_IP); + *(u16 *)(tlv_start + 6) = 0xFFFF; + *offset += tlv_length; + + return 0; +} + +static int nbl_format_tcpv6_flow_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + struct ethtool_tcpip6_spec *tcp_ip6_spec = &fs->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *tcp_ip6_mask = &fs->m_u.tcp_ip6_spec; + u8 *tlv_start; + u16 tlv_length; + + if (!ipv6_addr_any((struct in6_addr *)tcp_ip6_mask->ip6src)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 32; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SRC_IPv6; + *(u16 *)(tlv_start + 2) = 32; + memcpy(tlv_start + 4, tcp_ip6_spec->ip6src, sizeof(tcp_ip6_spec->ip6src)); + memcpy(tlv_start + 20, tcp_ip6_mask->ip6src, sizeof(tcp_ip6_mask->ip6src)); + *offset += tlv_length; + } + + if (!ipv6_addr_any((struct in6_addr *)tcp_ip6_mask->ip6dst)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 32; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_IPv6; + *(u16 *)(tlv_start + 2) = 32; + memcpy(tlv_start + 4, tcp_ip6_spec->ip6dst, sizeof(tcp_ip6_spec->ip6dst)); + memcpy(tlv_start + 20, tcp_ip6_mask->ip6dst, sizeof(tcp_ip6_mask->ip6dst)); + *offset += tlv_length; + } + + if (tcp_ip6_mask->psrc) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = tcp_ip6_spec->psrc; + *(u16 *)(tlv_start + 6) = tcp_ip6_mask->psrc; + *offset += tlv_length; + } + + if (tcp_ip6_mask->pdst) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = tcp_ip6_spec->pdst; + *(u16 *)(tlv_start + 6) = tcp_ip6_mask->pdst; + *offset += tlv_length; + } + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_L4PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u8 *)(tlv_start + 4) = IPPROTO_TCP; + *(u8 *)(tlv_start + 5) = 0xFF; + *offset += tlv_length; + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = htons(ETH_P_IPV6); + *(u16 *)(tlv_start + 6) = 0xFFFF; + *offset += tlv_length; + + return 0; +} + +static int nbl_format_udpv4_flow_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + struct ethtool_tcpip4_spec *udp_ip4_spec = &fs->h_u.udp_ip4_spec; + struct ethtool_tcpip4_spec *udp_ip4_mask = &fs->m_u.udp_ip4_spec; + u8 *tlv_start; + u16 tlv_length; + + if (udp_ip4_mask->ip4src) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 8; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SRC_IPv4; + *(u16 *)(tlv_start + 2) = 8; + *(u32 *)(tlv_start + 4) = udp_ip4_spec->ip4src; + *(u32 *)(tlv_start + 8) = udp_ip4_mask->ip4src; + *offset += tlv_length; + } + + if (udp_ip4_mask->ip4dst) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 8; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_IPv4; + *(u16 *)(tlv_start + 2) = 8; + *(u32 *)(tlv_start + 4) = udp_ip4_spec->ip4dst; + *(u32 *)(tlv_start + 8) = udp_ip4_mask->ip4dst; + *offset += tlv_length; + } + + if (udp_ip4_mask->psrc) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = udp_ip4_spec->psrc; + *(u16 *)(tlv_start + 6) = udp_ip4_mask->psrc; + *offset += tlv_length; + } + + if (udp_ip4_mask->pdst) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = udp_ip4_spec->pdst; + *(u16 *)(tlv_start + 6) = udp_ip4_mask->pdst; + *offset += tlv_length; + } + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_L4PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u8 *)(tlv_start + 4) = IPPROTO_UDP; + *(u8 *)(tlv_start + 5) = 0xFF; + *offset += tlv_length; + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = htons(ETH_P_IP); + *(u16 *)(tlv_start + 6) = 0xFFFF; + *offset += tlv_length; + + return 0; +} + +static int nbl_format_udpv6_flow_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + struct ethtool_tcpip6_spec *udp_ip6_spec = &fs->h_u.udp_ip6_spec; + struct ethtool_tcpip6_spec *udp_ip6_mask = &fs->m_u.udp_ip6_spec; + u8 *tlv_start; + u16 tlv_length; + + if (!ipv6_addr_any((struct in6_addr *)udp_ip6_mask->ip6src)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 32; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SRC_IPv6; + *(u16 *)(tlv_start + 2) = 32; + memcpy(tlv_start + 4, udp_ip6_spec->ip6src, sizeof(udp_ip6_spec->ip6src)); + memcpy(tlv_start + 20, udp_ip6_mask->ip6src, sizeof(udp_ip6_mask->ip6src)); + *offset += tlv_length; + } + + if (!ipv6_addr_any((struct in6_addr *)udp_ip6_mask->ip6dst)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 32; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_IPv6; + *(u16 *)(tlv_start + 2) = 32; + memcpy(tlv_start + 4, udp_ip6_spec->ip6dst, sizeof(udp_ip6_spec->ip6dst)); + memcpy(tlv_start + 20, udp_ip6_mask->ip6dst, sizeof(udp_ip6_mask->ip6dst)); + *offset += tlv_length; + } + + if (udp_ip6_mask->psrc) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = udp_ip6_spec->psrc; + *(u16 *)(tlv_start + 6) = udp_ip6_mask->psrc; + *offset += tlv_length; + } + + if (udp_ip6_mask->pdst) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = udp_ip6_spec->pdst; + *(u16 *)(tlv_start + 6) = udp_ip6_mask->pdst; + *offset += tlv_length; + } + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_L4PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u8 *)(tlv_start + 4) = IPPROTO_UDP; + *(u8 *)(tlv_start + 5) = 0xFF; + *offset += tlv_length; + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = htons(ETH_P_IPV6); + *(u16 *)(tlv_start + 6) = 0xFFFF; + *offset += tlv_length; + + return 0; +} + +static struct nbl_chan_param_fdir_replace *nbl_format_fdir_rule(struct ethtool_rx_flow_spec *fs) +{ + struct nbl_chan_param_fdir_replace *info; + int ret = 0, offset = 0; + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + + info = kzalloc(NBL_CHAN_FDIR_FLOW_RULE_SIZE, GFP_KERNEL); + if (!info) + return NULL; + + if (fs->flow_type & FLOW_RSS) { + ret = -EINVAL; + goto check_failed; + } + + if (fs->flow_type & FLOW_EXT) { + ret = nbl_format_flow_ext_rule(fs, info, &offset); + if (ret) + goto check_failed; + } + + if (fs->flow_type & FLOW_MAC_EXT) { + ret = nbl_format_flow_mac_ext_rule(fs, info, &offset); + if (ret) + goto check_failed; + } + + switch (flow_type) { + case ETHER_FLOW: + info->flow_type = NBL_CHAN_FDIR_FLOW_ETHER; + ret = nbl_format_ether_flow_rule(fs, info, &offset); + if (ret) + goto check_failed; + break; + case IPV4_USER_FLOW: + info->flow_type = NBL_CHAN_FDIR_FLOW_IPv4; + ret = nbl_format_ipv4_flow_rule(fs, info, &offset); + if (ret) + goto check_failed; + break; + case IPV6_USER_FLOW: + info->flow_type = NBL_CHAN_FDIR_FLOW_IPv6; + ret = nbl_format_ipv6_flow_rule(fs, info, &offset); + if (ret) + goto check_failed; + break; + case TCP_V4_FLOW: + info->flow_type = NBL_CHAN_FDIR_FLOW_TCP_IPv4; + ret = nbl_format_tcpv4_flow_rule(fs, info, &offset); + if (ret) + goto check_failed; + break; + case TCP_V6_FLOW: + info->flow_type = NBL_CHAN_FDIR_FLOW_TCP_IPv6; + ret = nbl_format_tcpv6_flow_rule(fs, info, &offset); + if (ret) + goto check_failed; + break; + case UDP_V4_FLOW: + info->flow_type = NBL_CHAN_FDIR_FLOW_UDP_IPv4; + ret = nbl_format_udpv4_flow_rule(fs, info, &offset); + if (ret) + goto check_failed; + break; + case UDP_V6_FLOW: + info->flow_type = NBL_CHAN_FDIR_FLOW_UDP_IPv6; + ret = nbl_format_udpv6_flow_rule(fs, info, &offset); + if (ret) + goto check_failed; + break; + default: + ret = -EOPNOTSUPP; + goto check_failed; + } + + info->rule_type = NBL_CHAN_FDIR_RULE_NORMAL; + info->order = 1; + info->tlv_length = offset; + info->base_length = sizeof(*info); + info->location = fs->location; + return info; + +check_failed: + kfree(info); + return NULL; +} + +static int nbl_format_fdir_action(struct nbl_chan_param_fdir_replace *info, + u16 ring, u16 vf_id, u16 dport, u16 global_queue_id) +{ + u8 *tlv_start; + u16 tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 8; + + if (info->tlv_length > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + tlv_start = info->tlv + info->tlv_length; + if (dport != 0xFFFF) + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_ACTION_QUEUE; + else + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_ACTION_VSI; + + *(u16 *)(tlv_start + 2) = 8; + *(u16 *)(tlv_start + 4) = info->ring = ring; + *(u16 *)(tlv_start + 6) = info->vf = vf_id; + *(u16 *)(tlv_start + 8) = info->dport = dport; + *(u16 *)(tlv_start + 10) = info->global_queue_id = global_queue_id; + + info->tlv_length += tlv_length; + return 0; +} + +static int nbl_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + struct nbl_chan_param_fdir_replace *info; + u64 ring_cookie = cmd->fs.ring_cookie; + int ret = -EOPNOTSUPP; + u32 ring = 0; + u16 vf = 0; + u16 vsi_id = NBL_COMMON_TO_VSI_ID(common); + u16 global_queue_id = NBL_INVALID_QUEUE_ID, dport = 0xFFFF; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + if (common->is_vf) + return -EOPNOTSUPP; + if (ring_cookie == RX_CLS_FLOW_WAKE) + return -EINVAL; + + if (ring_cookie != RX_CLS_FLOW_DISC) { + dport = vsi_id; + ring = ethtool_get_flow_spec_ring(cmd->fs.ring_cookie); + vf = ethtool_get_flow_spec_ring_vf(cmd->fs.ring_cookie); + + if (vf == 0 && (ring < vsi_info->ring_offset || + ring >= vsi_info->ring_offset + vsi_info->active_ring_num)) + return -EINVAL; + + /* vf = real_vf_idx + 1, 0 means direct to rx queue. */ + if (vf > net_resource_mgt->total_vfs) + return -EINVAL; + + if (vf) + dport = disp_ops->get_vf_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, vf - 1); + global_queue_id = disp_ops->get_vsi_global_queue_id + (NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), dport, ring); + } + + info = nbl_format_fdir_rule(&cmd->fs); + if (!info) + return -EINVAL; + + info->vsi = vsi_id; + ret = nbl_format_fdir_action(info, ring, vf, dport, global_queue_id); + if (ret) { + kfree(info); + return ret; + } + ret = disp_ops->replace_fd_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), info); + kfree(info); + break; + case ETHTOOL_SRXCLSRLDEL: + if (common->is_vf) + return -EOPNOTSUPP; + ret = disp_ops->remove_fd_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_CHAN_FDIR_RULE_NORMAL, + cmd->fs.location, vsi_id); + break; + case ETHTOOL_SRXFH: + ret = nbl_set_rss_hash_opt(netdev, cmd); + break; + default: + break; + } + + return ret; +} + +static u32 nbl_get_rxfh_indir_size(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + u32 rxfh_indir_size = 0; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + disp_ops->get_rxfh_indir_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), &rxfh_indir_size); + + return rxfh_indir_size; +} + +static u32 nbl_get_rxfh_key_size(struct net_device *netdev) { struct nbl_service_mgt *serv_mgt; struct nbl_dispatch_ops *disp_ops; @@ -1095,11 +2479,54 @@ static int nbl_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfun disp_ops->get_rxfh_rss_key(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), key, rxfh_key_size); if (hfunc) disp_ops->get_rxfh_rss_alg_sel(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - hfunc, NBL_COMMON_TO_ETH_ID(serv_mgt->common)); + NBL_COMMON_TO_VSI_ID(common), hfunc); return 0; } +static int nbl_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + u32 rxfh_indir_size = 0; + int ret = 0; + + if (indir) { + disp_ops->get_rxfh_indir_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), &rxfh_indir_size); + ret = disp_ops->set_rxfh_indir(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), + indir, rxfh_indir_size); + if (ret) { + netdev_err(netdev, "set RSS indirection table failed %d\n", ret); + return ret; + } + if (!ring_mgt->rss_indir_user) { + ring_mgt->rss_indir_user = devm_kcalloc(dev, rxfh_indir_size, + sizeof(u32), GFP_KERNEL); + if (!ring_mgt->rss_indir_user) + return -ENOMEM; + } + memcpy(ring_mgt->rss_indir_user, indir, rxfh_indir_size * sizeof(u32)); + } + if (key) { + netdev_err(netdev, "rss key donot support modify\n"); + return -EOPNOTSUPP; + } + if (hfunc) { + ret = disp_ops->set_rxfh_rss_alg_sel(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), hfunc); + if (ret) { + netdev_err(netdev, "set RSS hash function failed %d\n", ret); + return ret; + } + } + return 0; +} + static u32 nbl_get_msglevel(struct net_device *netdev) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); @@ -1147,6 +2574,7 @@ static int nbl_get_per_queue_coalesce(struct net_device *netdev, struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_serv_ring_vsi_info *vsi_info; u16 local_vector_id, configured_usecs; + struct nbl_chan_param_get_coalesce coalesce_param = {0}; vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; @@ -1157,7 +2585,13 @@ static int nbl_get_per_queue_coalesce(struct net_device *netdev, local_vector_id = ring_mgt->vectors[q_num + vsi_info->ring_offset].local_vector_id; configured_usecs = ring_mgt->vectors[q_num + vsi_info->ring_offset].intr_rate_usecs; - disp_ops->get_coalesce(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), local_vector_id, ec); + disp_ops->get_coalesce(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + local_vector_id, &coalesce_param); + + NBL_SET_INTR_COALESCE(ec, coalesce_param.tx_coalesce_usecs, + coalesce_param.tx_max_coalesced_frames, + coalesce_param.rx_coalesce_usecs, + coalesce_param.rx_max_coalesced_frames); if (vsi_info->itr_dynamic) { ec->use_adaptive_tx_coalesce = 1; @@ -1202,7 +2636,8 @@ static int __nbl_set_per_queue_coalesce(struct net_device *netdev, if (ec->tx_max_coalesced_frames != ec->rx_max_coalesced_frames || ec->tx_coalesce_usecs != ec->rx_coalesce_usecs) { - netdev_err(netdev, "tx and rx using the same interrupt, rx params should equal to tx params\n"); + netdev_err(netdev, "tx and rx using the same interrupt, " + "rx params should equal to tx params\n"); return -EINVAL; } @@ -1459,11 +2894,16 @@ static u64 nbl_loopback_test(struct net_device *netdev) struct nbl_netdev_priv *priv = netdev_priv(netdev); struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); struct nbl_serv_ring_mgt *ring_mgt = &serv_mgt->ring_mgt; struct nbl_dispatch_ops *disp_ops = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter)->ops; struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; u8 origin_num_txq, origin_num_rxq, origin_active_q; u64 result = 0; + u32 rxfh_indir_size = 0; + u32 *indir = NULL; + int i = 0; /* In loopback test, we only need one queue */ origin_num_txq = ring_mgt->tx_ring_num; @@ -1472,6 +2912,16 @@ static u64 nbl_loopback_test(struct net_device *netdev) ring_mgt->tx_ring_num = NBL_SELF_TEST_Q_NUM; ring_mgt->rx_ring_num = NBL_SELF_TEST_Q_NUM; + disp_ops->get_rxfh_indir_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), &rxfh_indir_size); + indir = devm_kcalloc(dev, rxfh_indir_size, sizeof(u32), GFP_KERNEL); + if (!indir) + return -ENOMEM; + for (i = 0; i < rxfh_indir_size; i++) + indir[i] = i % NBL_SELF_TEST_Q_NUM; + disp_ops->set_rxfh_indir(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), indir, rxfh_indir_size); + if (nbl_loopback_setup_rings(adapter, netdev)) { netdev_err(netdev, "Fail to setup rings"); result |= BIT(NBL_LB_ERR_RING_SETUP); @@ -1499,6 +2949,16 @@ static u64 nbl_loopback_test(struct net_device *netdev) ring_mgt->rx_ring_num = origin_num_rxq; vsi_info->active_ring_num = origin_active_q; + if (ring_mgt->rss_indir_user) { + memcpy(indir, ring_mgt->rss_indir_user, rxfh_indir_size * sizeof(u32)); + } else { + for (i = 0; i < rxfh_indir_size; i++) + indir[i] = i % vsi_info->active_ring_num; + } + disp_ops->set_rxfh_indir(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), indir, rxfh_indir_size); + devm_kfree(dev, indir); + return result; } @@ -1665,6 +3125,7 @@ static int nbl_set_pause_param(struct net_device *netdev, struct ethtool_pausepa struct nbl_service_mgt *serv_mgt; struct nbl_serv_net_resource_mgt *net_resource_mgt; struct nbl_dispatch_ops *disp_ops; + struct nbl_phy_caps *phy_caps; struct nbl_port_state port_state = {0}; struct nbl_port_advertising port_advertising = {0}; u32 autoneg = 0; @@ -1675,9 +3136,9 @@ static int nbl_set_pause_param(struct net_device *netdev, struct ethtool_pausepa serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + phy_caps = &net_resource_mgt->phy_caps; - ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_ETH_ID(serv_mgt->common), &port_state); + ret = nbl_serv_get_port_state(serv_mgt, &port_state); if (ret) { netdev_err(netdev, "Get port_state failed %d\n", ret); return -EIO; @@ -1727,12 +3188,10 @@ static int nbl_set_pause_param(struct net_device *netdev, struct ethtool_pausepa static void nbl_get_pause_param(struct net_device *netdev, struct ethtool_pauseparam *param) { struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_port_state port_state = {0}; int ret = 0; - ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_ETH_ID(serv_mgt->common), &port_state); + ret = nbl_serv_get_port_state(serv_mgt, &port_state); if (ret) { netdev_err(netdev, "Get port_state failed %d\n", ret); return; @@ -1743,6 +3202,142 @@ static void nbl_get_pause_param(struct net_device *netdev, struct ethtool_pausep param->tx_pause = !!(port_state.active_fc & NBL_PORT_TX_PAUSE); } +static void nbl_get_eth_ctrl_stats(struct net_device *netdev, + struct ethtool_eth_ctrl_stats *eth_ctrl_stats) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_eth_ctrl_stats eth_ctrl_stats_info = {0}; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->get_eth_ctrl_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, ð_ctrl_stats_info, + sizeof(struct nbl_eth_ctrl_stats)); + if (ret) { + netdev_err(netdev, "Get eth_ctrl_stats failed %d\n", ret); + return; + } + + eth_ctrl_stats->MACControlFramesTransmitted = + eth_ctrl_stats_info.macctrl_frames_txd_ok; + eth_ctrl_stats->MACControlFramesReceived = eth_ctrl_stats_info.macctrl_frames_rxd; + eth_ctrl_stats->UnsupportedOpcodesReceived = + eth_ctrl_stats_info.unsupported_opcodes_rx; +} + +static void nbl_get_pause_stats(struct net_device *netdev, struct ethtool_pause_stats *pause_stats) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_pause_stats pause_stats_info = {0}; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->get_pause_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, &pause_stats_info, + sizeof(struct nbl_pause_stats)); + if (ret) { + netdev_err(netdev, "Get pause_stats failed %d\n", ret); + return; + } + + pause_stats->rx_pause_frames = pause_stats_info.rx_pause_frames; + pause_stats->tx_pause_frames = pause_stats_info.tx_pause_frames; +} + +static void nbl_get_eth_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *eth_mac_stats) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops; + struct nbl_eth_mac_stats eth_mac_stats_info = {0}; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->get_eth_mac_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, ð_mac_stats_info, + sizeof(struct nbl_eth_mac_stats)); + if (ret) { + netdev_err(netdev, "Get eth_mac_stats failed %d\n", ret); + return; + } + + eth_mac_stats->FramesTransmittedOK = eth_mac_stats_info.frames_txd_ok; + eth_mac_stats->FramesReceivedOK = eth_mac_stats_info.frames_rxd_ok; + eth_mac_stats->OctetsTransmittedOK = eth_mac_stats_info.octets_txd_ok; + eth_mac_stats->OctetsReceivedOK = eth_mac_stats_info.octets_rxd_ok; + eth_mac_stats->MulticastFramesXmittedOK = eth_mac_stats_info.multicast_frames_txd_ok; + eth_mac_stats->BroadcastFramesXmittedOK = eth_mac_stats_info.broadcast_frames_txd_ok; + eth_mac_stats->MulticastFramesReceivedOK = eth_mac_stats_info.multicast_frames_rxd_ok; + eth_mac_stats->BroadcastFramesReceivedOK = eth_mac_stats_info.broadcast_frames_rxd_ok; +} + +static const struct ethtool_rmon_hist_range rmon_ranges[] = { + { 0, 64}, + { 65, 127}, + { 128, 255}, + { 256, 511}, + { 512, 1023}, + { 1024, 1518}, + { 1519, 2047}, + { 2048, 65535}, + {}, +}; + +static void nbl_get_rmon_stats(struct net_device *netdev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **range) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_rmon_stats rmon_stats_info = {0}; + struct nbl_dispatch_ops *disp_ops; + u64 *rx = rmon_stats_info.rmon_rx_range; + u64 *tx = rmon_stats_info.rmon_tx_range; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->get_rmon_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, &rmon_stats_info, + sizeof(struct nbl_rmon_stats)); + if (ret) { + netdev_err(netdev, "Get eth_mac_stats failed %d\n", ret); + return; + } + rmon_stats->undersize_pkts = rmon_stats_info.undersize_frames_rxd_goodfcs; + rmon_stats->oversize_pkts = rmon_stats_info.oversize_frames_rxd_goodfcs; + rmon_stats->fragments = rmon_stats_info.undersize_frames_rxd_badfcs; + rmon_stats->jabbers = rmon_stats_info.oversize_frames_rxd_badfcs; + + rmon_stats->hist[0] = rx[ETHER_STATS_PKTS_64_OCTETS]; + rmon_stats->hist[1] = rx[ETHER_STATS_PKTS_65_TO_127_OCTETS]; + rmon_stats->hist[2] = rx[ETHER_STATS_PKTS_128_TO_255_OCTETS]; + rmon_stats->hist[3] = rx[ETHER_STATS_PKTS_256_TO_511_OCTETS]; + rmon_stats->hist[4] = rx[ETHER_STATS_PKTS_512_TO_1023_OCTETS]; + rmon_stats->hist[5] = rx[ETHER_STATS_PKTS_1024_TO_1518_OCTETS]; + rmon_stats->hist[6] = rx[ETHER_STATS_PKTS_1519_TO_2047_OCTETS]; + rmon_stats->hist[7] = rx[ETHER_STATS_PKTS_2048_TO_MAX_OCTETS]; + + rmon_stats->hist_tx[0] = tx[ETHER_STATS_PKTS_64_OCTETS]; + rmon_stats->hist_tx[1] = tx[ETHER_STATS_PKTS_65_TO_127_OCTETS]; + rmon_stats->hist_tx[2] = tx[ETHER_STATS_PKTS_128_TO_255_OCTETS]; + rmon_stats->hist_tx[3] = tx[ETHER_STATS_PKTS_256_TO_511_OCTETS]; + rmon_stats->hist_tx[4] = tx[ETHER_STATS_PKTS_512_TO_1023_OCTETS]; + rmon_stats->hist_tx[5] = tx[ETHER_STATS_PKTS_1024_TO_1518_OCTETS]; + rmon_stats->hist_tx[6] = tx[ETHER_STATS_PKTS_1519_TO_2047_OCTETS]; + rmon_stats->hist_tx[7] = tx[ETHER_STATS_PKTS_2048_TO_MAX_OCTETS]; + *range = rmon_ranges; +} + + static int nbl_set_fec_param(struct net_device *netdev, struct ethtool_fecparam *fec) { struct nbl_service_mgt *serv_mgt; @@ -1759,8 +3354,7 @@ static int nbl_set_fec_param(struct net_device *netdev, struct ethtool_fecparam disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_ETH_ID(serv_mgt->common), &port_state); + ret = nbl_serv_get_port_state(serv_mgt, &port_state); if (ret) { netdev_err(netdev, "Get port_state failed %d\n", ret); return -EIO; @@ -1771,16 +3365,19 @@ static int nbl_set_fec_param(struct net_device *netdev, struct ethtool_fecparam return -EINVAL; } - autoneg = (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) ? - AUTONEG_ENABLE : AUTONEG_DISABLE; + if (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) { + netdev_err(netdev, "unsupport to set fec mode when autoneg\n"); + return -EOPNOTSUPP; + } - if (fec_mode == ETHTOOL_FEC_OFF) - fec_mode = ETHTOOL_FEC_NONE; + autoneg = ((port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) || + (port_state.port_caps & BIT(NBL_PORT_CAP_FEC_AUTONEG))) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; /* check if the fec mode is supported */ - if (fec_mode == ETHTOOL_FEC_NONE) { + if (fec_mode == ETHTOOL_FEC_OFF) { active_fec = NBL_PORT_FEC_OFF; - if (!(port_state.port_caps & BIT(NBL_PORT_CAP_FEC_NONE))) { + if (!(port_state.port_caps & BIT(NBL_PORT_CAP_FEC_OFF))) { netdev_err(netdev, "unsupported fec mode off\n"); return -EOPNOTSUPP; } @@ -1813,17 +3410,17 @@ static int nbl_set_fec_param(struct net_device *netdev, struct ethtool_fecparam } if (fec_mode == ETHTOOL_FEC_RS) { - if (port_state.link_speed == 10000) { + if ((port_state.link_speed == SPEED_10000 && port_state.link_state) || + net_resource_mgt->configured_speed == SPEED_10000) { netdev_err(netdev, "speed 10G cannot set fec RS, only can set fec baseR\n"); return -EINVAL; } } - net_resource_mgt->configured_fec = fec_mode; - port_advertising.eth_id = NBL_COMMON_TO_ETH_ID(serv_mgt->common); port_advertising.active_fec = active_fec; - port_advertising.autoneg = autoneg; + port_advertising.autoneg = (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; /* update fec mode */ ret = disp_ops->set_port_advertising(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), @@ -1833,21 +3430,22 @@ static int nbl_set_fec_param(struct net_device *netdev, struct ethtool_fecparam return ret; } + net_resource_mgt->configured_fec = fec_mode; + return 0; } static int nbl_get_fec_param(struct net_device *netdev, struct ethtool_fecparam *fecparam) { struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); struct nbl_port_state port_state = {0}; u32 fec = 0; u32 active_fec = 0; + u8 autoneg = 0; int ret = 0; - ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_ETH_ID(serv_mgt->common), &port_state); + ret = nbl_serv_get_port_state(serv_mgt, &port_state); if (ret) { netdev_err(netdev, "Get port_state failed %d\n", ret); return -EIO; @@ -1858,6 +3456,9 @@ static int nbl_get_fec_param(struct net_device *netdev, struct ethtool_fecparam return -EINVAL; } + autoneg = ((port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) || + (port_state.port_caps & BIT(NBL_PORT_CAP_FEC_AUTONEG))) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; if (port_state.active_fec == NBL_PORT_FEC_OFF) active_fec = ETHTOOL_FEC_OFF; @@ -1868,15 +3469,45 @@ static int nbl_get_fec_param(struct net_device *netdev, struct ethtool_fecparam if (net_resource_mgt->configured_fec) fec = net_resource_mgt->configured_fec; + else if (autoneg) + fec = ETHTOOL_FEC_AUTO; else fec = active_fec; + if (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) + fec = ETHTOOL_FEC_AUTO; + fecparam->fec = fec; fecparam->active_fec = active_fec; return 0; } +static void nbl_get_fec_stats(struct net_device *netdev, struct ethtool_fec_stats *fec_stats) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_fec_stats fec_stats_info = {0}; + unsigned int i; + int ret; + + ret = disp_ops->get_fec_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(serv_mgt->common), &fec_stats_info); + if (ret) { + netdev_err(netdev, "Get fec state failed %d\n", ret); + return; + } + fec_stats->corrected_blocks.total = fec_stats_info.corrected_blocks; + fec_stats->uncorrectable_blocks.total = fec_stats_info.uncorrectable_blocks; + fec_stats->corrected_bits.total = fec_stats_info.corrected_bits; + + for (i = 0; i < NBL_LEONIS_LANE_NUM; i++) { + fec_stats->corrected_blocks.lanes[i] = fec_stats_info.corrected_lane[i]; + fec_stats->uncorrectable_blocks.lanes[i] = fec_stats_info.uncorrectable_lane[i]; + fec_stats->corrected_bits.lanes[i] = fec_stats_info.corrected_bits_lane[i]; + } +} + static int nbl_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct nbl_service_mgt *serv_mgt; @@ -1923,8 +3554,7 @@ static int nbl_nway_reset(struct net_device *netdev) eth_id = NBL_COMMON_TO_ETH_ID(serv_mgt->common); net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_ETH_ID(serv_mgt->common), &port_state); + ret = nbl_serv_get_port_state(serv_mgt, &port_state); if (ret) { netdev_err(netdev, "Get port_state failed %d\n", ret); return -EIO; @@ -1942,6 +3572,227 @@ static int nbl_nway_reset(struct net_device *netdev) return disp_ops->nway_reset(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id); } +static void nbl_rep_stats_fill_strings(struct net_device *netdev, u8 *data) +{ + char *p = (char *)data; + + snprintf(p, ETH_GSTRING_LEN, "tx_packets"); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_bytes"); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_packets"); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_bytes"); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_dropped"); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_dropped"); + p += ETH_GSTRING_LEN; +} + +static void nbl_rep_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + if (stringset == ETH_SS_STATS) + nbl_rep_stats_fill_strings(netdev, data); +} + +static int nbl_rep_get_sset_count(struct net_device *netdev, int sset) +{ + u32 total_queues = 0; + + if (sset == ETH_SS_STATS) { + total_queues = NBL_REP_PER_VSI_QUEUE_NUM * 2; + return total_queues * (sizeof(struct nbl_rep_stats) / sizeof(u64)); + } else { + return -EOPNOTSUPP; + } +} + +static void +nbl_rep_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_rep_stats rep_stats = {0}; + int i = 0; + + disp_ops->get_rep_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + priv->rep->rep_vsi_id, &rep_stats, true); + data[i++] = rep_stats.packets; + data[i++] = rep_stats.bytes; + disp_ops->get_rep_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + priv->rep->rep_vsi_id, &rep_stats, false); + data[i++] = rep_stats.packets; + data[i++] = rep_stats.bytes; + nbl_serv_get_rep_drop_stats(serv_mgt, priv->rep->rep_vsi_id, &rep_stats); + data[i] = rep_stats.dropped; +} + +static int nbl_flash_device(struct net_device *netdev, struct ethtool_flash *flash) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + const struct firmware *fw; + int ret = 0; + + if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) + return -EOPNOTSUPP; + + if (!adapter->init_param.caps.has_ctrl) + return -EOPNOTSUPP; + + ret = request_firmware_direct(&fw, flash->data, &netdev->dev); + if (ret) + return ret; + + dev_hold(netdev); + rtnl_unlock(); + + ret = nbl_serv_update_firmware(serv_mgt, fw, NULL); + release_firmware(fw); + + rtnl_lock(); + dev_put(netdev); + + return ret; +} + +static int nbl_diag_fill_device_name(struct nbl_service_mgt *serv_mgt, void *buff) +{ + struct nbl_common_info *info = serv_mgt->common; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + + snprintf(buff, NBL_DEV_NAME_SZ, "%s:%s", pci_name(info->pdev), + net_resource_mgt->netdev->name); + + return NBL_DEV_NAME_SZ; +} + +static int nbl_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u32 extra_len = 0; + + if (!adapter->init_param.caps.has_ctrl) + return -EOPNOTSUPP; + + dump->version = NBL_DIAG_DUMP_VERSION; + dump->flag = serv_mgt->net_resource_mgt->dump_flag; + + if (dump->flag & NBL_DIAG_FLAG_PERFORMANCE) { + u32 length = disp_ops->get_perf_dump_length(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + + serv_mgt->net_resource_mgt->dump_perf_len = length; + extra_len += length ? DIAG_BLK_SZ(length) : 0; + } + + dump->len = sizeof(struct nbl_diag_dump) + DIAG_BLK_SZ(NBL_DRV_VER_SZ) + + DIAG_BLK_SZ(NBL_DEV_NAME_SZ) + extra_len; + + return 0; +} + +static int nbl_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, void *buffer) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_diag_dump *dump_hdr = buffer; + struct nbl_diag_blk *dump_blk; + + if (!adapter->init_param.caps.has_ctrl) + return -EOPNOTSUPP; + + memset(buffer, 0, dump->len); + dump_hdr->version = NBL_DIAG_DUMP_VERSION; + dump_hdr->flag = 0; + dump_hdr->num_blocks = 0; + dump_hdr->total_length = 0; + + /* Dump driver version */ + dump_blk = DIAG_GET_NEXT_BLK(dump_hdr); + dump_blk->type = NBL_DIAG_DRV_VERSION; + disp_ops->get_driver_version(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), dump_blk->data, + NBL_DRV_VER_SZ); + dump_blk->length = NBL_DRV_VER_SZ; + dump_hdr->total_length += DIAG_BLK_SZ(dump_blk->length); + dump_hdr->num_blocks++; + + /* Dump device name */ + dump_blk = DIAG_GET_NEXT_BLK(dump_hdr); + dump_blk->type = NBL_DIAG_DEVICE_NAME; + dump_blk->length = nbl_diag_fill_device_name(serv_mgt, &dump_blk->data); + dump_hdr->total_length += DIAG_BLK_SZ(dump_blk->length); + dump_hdr->num_blocks++; + + /* Dump performance registers */ + if (net_resource_mgt->dump_flag & NBL_DIAG_FLAG_PERFORMANCE) { + dump_blk = DIAG_GET_NEXT_BLK(dump_hdr); + dump_blk->type = NBL_DIAG_PERFORMANCE; + dump_blk->length = disp_ops->get_perf_dump_data(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + dump_blk->data, + net_resource_mgt->dump_perf_len); + dump_hdr->total_length += DIAG_BLK_SZ(dump_blk->length); + dump_hdr->num_blocks++; + dump_hdr->flag |= NBL_DIAG_FLAG_PERFORMANCE; + } + + return 0; +} + +static int nbl_set_dump(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + + if (!adapter->init_param.caps.has_ctrl) + return -EOPNOTSUPP; + + serv_mgt->net_resource_mgt->dump_flag = dump->flag; + + return 0; +} + +static void nbl_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + if (adapter->init_param.caps.is_ocp) { + wol->supported = WAKE_MAGIC; + wol->wolopts = common->wol_ena ? WAKE_MAGIC : 0; + } else { + wol->supported = 0; + wol->wolopts = 0; + } +} + +static int nbl_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + if (!adapter->init_param.caps.is_ocp) + return -EOPNOTSUPP; + + if (wol->wolopts && wol->wolopts != WAKE_MAGIC) + return -EOPNOTSUPP; + + if (common->wol_ena != !!wol->wolopts) { + common->wol_ena = !!wol->wolopts; + device_set_wakeup_enable(&common->pdev->dev, common->wol_ena); + netdev_dbg(netdev, "Wol magic packet %sabled", common->wol_ena ? "en" : "dis"); + } + + return 0; +} + /* NBL_SERV_ETHTOOL_OPS_TBL(ops_name, func) * * Use X Macros to reduce setup and remove codes. @@ -1966,9 +3817,11 @@ do { \ NBL_SERV_SET_ETHTOOL_OPS(get_coalesce, nbl_get_coalesce); \ NBL_SERV_SET_ETHTOOL_OPS(set_coalesce, nbl_set_coalesce); \ NBL_SERV_SET_ETHTOOL_OPS(get_rxnfc, nbl_get_rxnfc); \ + NBL_SERV_SET_ETHTOOL_OPS(set_rxnfc, nbl_set_rxnfc); \ NBL_SERV_SET_ETHTOOL_OPS(get_rxfh_indir_size, nbl_get_rxfh_indir_size); \ NBL_SERV_SET_ETHTOOL_OPS(get_rxfh_key_size, nbl_get_rxfh_key_size); \ NBL_SERV_SET_ETHTOOL_OPS(get_rxfh, nbl_get_rxfh); \ + NBL_SERV_SET_ETHTOOL_OPS(set_rxfh, nbl_set_rxfh); \ NBL_SERV_SET_ETHTOOL_OPS(get_msglevel, nbl_get_msglevel); \ NBL_SERV_SET_ETHTOOL_OPS(set_msglevel, nbl_set_msglevel); \ NBL_SERV_SET_ETHTOOL_OPS(get_regs_len, nbl_get_regs_len); \ @@ -1985,6 +3838,15 @@ do { \ NBL_SERV_SET_ETHTOOL_OPS(get_ts_info, ethtool_op_get_ts_info); \ NBL_SERV_SET_ETHTOOL_OPS(set_phys_id, nbl_set_phys_id); \ NBL_SERV_SET_ETHTOOL_OPS(nway_reset, nbl_nway_reset); \ + NBL_SERV_SET_ETHTOOL_OPS(get_rep_strings, nbl_rep_get_strings); \ + NBL_SERV_SET_ETHTOOL_OPS(get_rep_sset_count, nbl_rep_get_sset_count); \ + NBL_SERV_SET_ETHTOOL_OPS(get_rep_ethtool_stats, nbl_rep_get_ethtool_stats); \ + NBL_SERV_SET_ETHTOOL_OPS(flash_device, nbl_flash_device); \ + NBL_SERV_SET_ETHTOOL_OPS(get_dump_flag, nbl_get_dump_flag); \ + NBL_SERV_SET_ETHTOOL_OPS(get_dump_data, nbl_get_dump_data); \ + NBL_SERV_SET_ETHTOOL_OPS(set_dump, nbl_set_dump); \ + NBL_SERV_SET_ETHTOOL_OPS(set_wol, nbl_set_wol); \ + NBL_SERV_SET_ETHTOOL_OPS(get_wol, nbl_get_wol); \ } while (0) void nbl_serv_setup_ethtool_ops(struct nbl_service_ops *serv_ops) @@ -1992,4 +3854,11 @@ void nbl_serv_setup_ethtool_ops(struct nbl_service_ops *serv_ops) #define NBL_SERV_SET_ETHTOOL_OPS(name, func) do {serv_ops->NBL_NAME(name) = func; ; } while (0) NBL_SERV_ETHTOOL_OPS_TBL; #undef NBL_SERV_SET_ETHTOOL_OPS + serv_ops->get_eth_ctrl_stats = nbl_get_eth_ctrl_stats; + serv_ops->get_pause_stats = nbl_get_pause_stats; + serv_ops->get_eth_mac_stats = nbl_get_eth_mac_stats; + serv_ops->get_fec_stats = nbl_get_fec_stats; + serv_ops->get_link_ext_state = nbl_get_link_ext_state; + serv_ops->get_link_ext_stats = nbl_get_link_ext_stats; + serv_ops->get_rmon_stats = nbl_get_rmon_stats; } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.h index 23bcf51688ff0ab20d29e86c8aeb92fabc1aada2..205db0bce5eeccc5882a3e1d34f06abffcd1eca7 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.h @@ -38,5 +38,7 @@ enum nbl_ethtool_lb_test_err_code { void nbl_serv_update_stats(struct nbl_service_mgt *serv_mgt, bool ethtool); void nbl_serv_setup_ethtool_ops(struct nbl_service_ops *serv_ops_tbl); +int nbl_serv_update_firmware(struct nbl_service_mgt *serv_mgt, const struct firmware *fw, + struct netlink_ext_ack *extack); #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.c index 5089a50f3132d2fdf381e1b324bbd1f524774ce3..b806ece27061084e28159a73fc1456520063846d 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.c @@ -34,52 +34,49 @@ static int nbl_hwmon_read(struct device *dev, enum hwmon_sensor_types type, struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); u8 eth_id = NBL_COMMON_TO_ETH_ID(common); + enum nbl_hwmon_type hwmon_type; u32 temp; switch (channel) { case NBL_HWMON_CHIP_SENSOR: switch (attr) { case hwmon_temp_input: - temp = serv_ops->get_chip_temperature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); - *val = (temp & NBL_HWMON_TEMP_MAP) * NBL_HWMON_TEMP_UNIT; - return 0; + hwmon_type = NBL_HWMON_TEMP_INPUT; + break; case hwmon_temp_max: - temp = serv_ops->get_chip_temperature_max - (NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); - *val = temp * NBL_HWMON_TEMP_UNIT; - return 0; + hwmon_type = NBL_HWMON_TEMP_MAX; + break; case hwmon_temp_crit: - temp = serv_ops->get_chip_temperature_crit - (NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); - *val = temp * NBL_HWMON_TEMP_UNIT; - return 0; + hwmon_type = NBL_HWMON_TEMP_CRIT; + break; case hwmon_temp_highest: - temp = serv_ops->get_chip_temperature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); - *val = (temp >> NBL_HWMON_TEMP_OFF) * NBL_HWMON_TEMP_UNIT; - return 0; + hwmon_type = NBL_HWMON_TEMP_HIGHEST; + break; default: return -EOPNOTSUPP; } + temp = serv_ops->get_chip_temperature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + hwmon_type, channel); + *val = temp; + return 0; case NBL_HWMON_LIGHT_MODULE: switch (attr) { case hwmon_temp_input: - temp = serv_ops->get_module_temperature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - eth_id, NBL_MODULE_TEMP); - *val = temp * NBL_HWMON_TEMP_UNIT; - return 0; + hwmon_type = NBL_HWMON_TEMP_INPUT; + break; case hwmon_temp_max: - temp = serv_ops->get_module_temperature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - eth_id, NBL_MODULE_TEMP_MAX); - *val = temp * NBL_HWMON_TEMP_UNIT; - return 0; + hwmon_type = NBL_HWMON_TEMP_MAX; + break; case hwmon_temp_crit: - temp = serv_ops->get_module_temperature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - eth_id, NBL_MODULE_TEMP_CRIT); - *val = temp * NBL_HWMON_TEMP_UNIT; - return 0; + hwmon_type = NBL_HWMON_TEMP_CRIT; + break; default: return -EOPNOTSUPP; } + temp = serv_ops->get_module_temperature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + eth_id, hwmon_type); + *val = temp; + return 0; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.h index 61f7ef29731ddcd291e465983a2cc09a1c05d769..5f22de0220231119255497916dcb8a1efd69fe1c 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.h @@ -9,8 +9,6 @@ #include "nbl_dev.h" -#define NBL_HWMON_TEMP_MAP 0x000001FF -#define NBL_HWMON_TEMP_UNIT 1000 #define NBL_HWMON_TEMP_OFF 16 #define NBL_HWMON_VISIBLE 0444 #define NBL_HWMON_CHIP_SENSOR 0 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.c new file mode 100644 index 0000000000000000000000000000000000000000..119694bf6fcc3d22b41ae1567c24d1a6907d8262 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.c @@ -0,0 +1,579 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_ipsec.h" +static int nbl_validate_xfrm_state(struct net_device *netdev, struct xfrm_state *x) +{ + if (x->id.proto != IPPROTO_ESP) { + netdev_err(netdev, "Only ESP xfrm state may be offloaded\n"); + return -EINVAL; + } + + if (x->props.aalgo != SADB_AALG_NONE) { + netdev_err(netdev, "Cannot offload authenticated xfrm states\n"); + return -EINVAL; + } + + if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV8 && + x->props.ealgo != SADB_X_EALG_AES_GCM_ICV12 && + x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) { + netdev_err(netdev, "Only aes-gcm/sm4 xfrm state may be offloaded\n"); + return -EINVAL; + } + + if (x->props.family != AF_INET && x->props.family != AF_INET6) { + netdev_err(netdev, "Only IPv4/6 xfrm state may be offloaded\n"); + return -EINVAL; + } + + if (x->props.mode != XFRM_MODE_TRANSPORT && x->props.mode != XFRM_MODE_TUNNEL) { + netdev_err(netdev, "Only transport and tunnel xfrm state may be offloaded\n"); + return -EINVAL; + } + + if (!x->aead) { + netdev_err(netdev, "Cannot offload xfrm state without aead\n"); + return -EINVAL; + } + + if (x->aead->alg_key_len != NBL_IPSEC_AES_128_ALG_LEN && + x->aead->alg_key_len != NBL_IPSEC_AES_256_ALG_LEN) { + netdev_err(netdev, "Cannot offload xfrm key length other than 128/256 bit\n"); + return -EINVAL; + } + + if (x->aead->alg_icv_len != NBL_IPSEC_ICV_LEN_64 && + x->aead->alg_icv_len != NBL_IPSEC_ICV_LEN_96 && + x->aead->alg_icv_len != NBL_IPSEC_ICV_LEN_128) { + netdev_err(netdev, "Cannot offload xfrm icv length other than 64/96/128 bit\n"); + return -EINVAL; + } + + if (x->replay_esn && x->replay_esn->replay_window && + x->replay_esn->replay_window != NBL_IPSEC_WINDOW_32 && + x->replay_esn->replay_window != NBL_IPSEC_WINDOW_64 && + x->replay_esn->replay_window != NBL_IPSEC_WINDOW_128 && + x->replay_esn->replay_window != NBL_IPSEC_WINDOW_256) { + netdev_err(netdev, + "Cannot offload xfrm replay_window other than 32/64/128/256 bit\n"); + return -EINVAL; + } + + if (!(x->props.flags & XFRM_STATE_ESN) && x->props.replay_window && + x->props.replay_window != NBL_IPSEC_WINDOW_32 && + x->props.replay_window != NBL_IPSEC_WINDOW_64 && + x->props.replay_window != NBL_IPSEC_WINDOW_128 && + x->props.replay_window != NBL_IPSEC_WINDOW_256) { + netdev_err(netdev, + "Cannot offload xfrm replay_window other than 32/64/128/256 bit\n"); + return -EINVAL; + } + + if (!x->geniv) { + netdev_err(netdev, "Cannot offload xfrm state without geniv\n"); + return -EINVAL; + } + + if (strcmp(x->geniv, "seqiv")) { + netdev_err(netdev, "Cannot offload xfrm state with geniv other than seqiv\n"); + return -EINVAL; + } + + if ((x->lft.hard_byte_limit != XFRM_INF || x->lft.soft_byte_limit != XFRM_INF) && + (x->lft.hard_packet_limit != XFRM_INF || x->lft.soft_packet_limit != XFRM_INF)) { + netdev_err(netdev, + "Offloaded xfrm state does not support both byte & packet limits\n"); + return -EINVAL; + } + + if (x->lft.soft_byte_limit >= x->lft.hard_byte_limit && + x->lft.soft_byte_limit != XFRM_INF) { + netdev_err(netdev, "Hard byte limit must be greater than soft limit\n"); + return -EINVAL; + } + + if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit && + x->lft.soft_packet_limit != XFRM_INF) { + netdev_err(netdev, "Hard packet limit must be greater than soft limit\n"); + return -EINVAL; + } + + return 0; +} + +static void nbl_ipsec_update_esn_state(struct xfrm_state *x, struct nbl_ipsec_esn_state *esn_state) +{ + bool esn = !!(x->props.flags & XFRM_STATE_ESN); + bool inbound = (x->xso.dir == XFRM_DEV_OFFLOAD_IN); + + u32 bottom = 0; + + if (!esn) { + esn_state->enable = 0; + if (!inbound) { + esn_state->sn = x->replay.oseq + 1; + esn_state->wrap_en = (x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP); + return; + } + + esn_state->sn = x->replay.seq + 1; + if (x->props.replay_window) { + esn_state->window_en = 1; + esn_state->option = ilog2(x->props.replay_window / NBL_IPSEC_WINDOW_32); + } + return; + } + + esn_state->enable = 1; + if (!inbound) { + esn_state->sn = x->replay_esn->oseq + 1; + esn_state->esn = x->replay_esn->oseq_hi; + return; + } + + if (x->replay_esn->seq >= x->replay_esn->replay_window) + bottom = x->replay_esn->seq - x->replay_esn->replay_window + 1; + + if (x->replay_esn->seq < NBL_IPSEC_REPLAY_MID_SEQ) + esn_state->overlap = 1; + + esn_state->sn = x->replay_esn->seq + 1; + esn_state->esn = xfrm_replay_seqhi(x, htonl(bottom)); + if (x->replay_esn->replay_window) { + esn_state->window_en = 1; + esn_state->option = ilog2(x->replay_esn->replay_window / NBL_IPSEC_WINDOW_32); + } +} + +static void nbl_ipsec_init_cfg_info(struct xfrm_state *x, struct nbl_ipsec_cfg_info *cfg_info) +{ + cfg_info->sa_key.family = x->props.family; + cfg_info->sa_key.mark = x->mark.v & x->mark.m; + cfg_info->sa_key.spi = x->id.spi; + cfg_info->vld = true; + memcpy(&cfg_info->sa_key.daddr, x->id.daddr.a6, sizeof(x->id.daddr.a6)); + + if (x->lft.hard_byte_limit != XFRM_INF) { + cfg_info->limit_type = NBL_IPSEC_LIFETIME_BYTE; + cfg_info->hard_limit = x->lft.hard_byte_limit; + if (x->lft.soft_byte_limit != XFRM_INF) + cfg_info->soft_limit = x->lft.soft_byte_limit; + } + + if (x->lft.hard_packet_limit != XFRM_INF) { + cfg_info->limit_type = NBL_IPSEC_LIFETIME_PACKET; + cfg_info->hard_limit = x->lft.hard_packet_limit; + if (x->lft.soft_packet_limit != XFRM_INF) + cfg_info->soft_limit = x->lft.soft_packet_limit; + } + + if (cfg_info->hard_limit == 0) + return; + if (cfg_info->soft_limit == 0) + cfg_info->soft_limit = NBL_GET_SOFT_BY_HARD(cfg_info->hard_limit); + + cfg_info->limit_enable = 1; + cfg_info->hard_round = cfg_info->hard_limit >> NBL_IPSEC_LIFETIME_ROUND; + cfg_info->hard_remain = cfg_info->hard_limit & NBL_IPSEC_LIFETIME_REMAIN; + cfg_info->soft_round = cfg_info->soft_limit >> NBL_IPSEC_LIFETIME_ROUND; + cfg_info->soft_remain = cfg_info->soft_limit & NBL_IPSEC_LIFETIME_REMAIN; + + if (cfg_info->hard_round <= 1) { + cfg_info->lft_cnt = cfg_info->hard_limit; + cfg_info->lft_diff = cfg_info->hard_limit - cfg_info->soft_limit; + cfg_info->hard_round = 0; + cfg_info->soft_round = 0; + } else { + cfg_info->lft_cnt = (1 << NBL_IPSEC_LIFETIME_ROUND) + cfg_info->soft_remain; + cfg_info->lft_diff = (1 << NBL_IPSEC_LIFETIME_ROUND); + } +} + +static void nbl_ipsec_build_accel_xfrm_attrs(struct xfrm_state *x, + struct nbl_accel_esp_xfrm_attrs *attrs) +{ + struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm; + struct aead_geniv_ctx *geniv_ctx; + unsigned int key_len, icv_len; + int i; + u8 key[NBL_IPSEC_KEY_LEN_TOTAL] = {0}; + __be32 salt; + + /* key */ + key_len = NBL_GET_KEYLEN_BY_ALG(x->aead->alg_key_len); + for (i = 0; i < key_len; i++) + key[key_len - i - 1] = x->aead->alg_key[i]; + memcpy(aes_gcm->aes_key, key, key_len); + if (strncmp(x->aead->alg_name, "rfc4106(gcm(aes))", sizeof(x->aead->alg_name)) == 0) { + if (key_len == NBL_IPSEC_AES128_KEY_LEN) + aes_gcm->crypto_type = NBL_IPSEC_AES_GCM_128; + else + aes_gcm->crypto_type = NBL_IPSEC_AES_GCM_256; + } else { + aes_gcm->crypto_type = NBL_IPSEC_SM4_GCM; + } + + /* salt and seq_iv */ + geniv_ctx = crypto_aead_ctx(x->data); + memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, sizeof(u64)); + memcpy(&salt, x->aead->alg_key + key_len, sizeof(u32)); + aes_gcm->salt = be32_to_cpu(salt); + + /* icv len */ + icv_len = x->aead->alg_icv_len; + if (icv_len == NBL_IPSEC_ICV_LEN_64) + aes_gcm->icv_len = NBL_IPSEC_ICV_64_TYPE; + else if (icv_len == NBL_IPSEC_ICV_LEN_96) + aes_gcm->icv_len = NBL_IPSEC_ICV_96_TYPE; + else + aes_gcm->icv_len = NBL_IPSEC_ICV_128_TYPE; + + /* tunnel mode */ + attrs->tunnel_mode = x->props.mode; + /* spi */ + attrs->spi = be32_to_cpu(x->id.spi); + + /* nat traversal */ + if (x->encap) { + attrs->nat_flag = 1; + attrs->sport = be16_to_cpu(x->encap->encap_sport); + attrs->dport = be16_to_cpu(x->encap->encap_dport); + } + + /* source, destination ips */ + memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr)); + memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr)); + attrs->is_ipv6 = (x->props.family != AF_INET); +} + +static void nbl_ipsec_free_tx_index(struct net_device *netdev, u32 index) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + netdev_info(netdev, "nbl ipsec egress free index %u\n", index); + disp_ops->free_ipsec_tx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index); +} + +static void nbl_ipsec_free_rx_index(struct net_device *netdev, u32 index) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + netdev_info(netdev, "nbl ipsec ingress free index %u\n", index); + disp_ops->free_ipsec_rx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index); +} + +static int nbl_ipsec_alloc_tx_index(struct net_device *netdev, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_common_info *common; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + cfg_info->vsi = NBL_COMMON_TO_VSI_ID(common); + + return disp_ops->alloc_ipsec_tx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), cfg_info); +} + +static int nbl_ipsec_alloc_rx_index(struct net_device *netdev, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_common_info *common; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + cfg_info->vsi = NBL_COMMON_TO_VSI_ID(common); + + return disp_ops->alloc_ipsec_rx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), cfg_info); +} + +static void nbl_ipsec_cfg_tx_sad(struct net_device *netdev, u32 index, + struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->cfg_ipsec_tx_sad(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, sa_entry); +} + +static void nbl_ipsec_cfg_rx_sad(struct net_device *netdev, u32 index, + struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->cfg_ipsec_rx_sad(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, sa_entry); +} + +static int nbl_ipsec_add_rx_flow(struct net_device *netdev, u32 index, + struct nbl_accel_esp_xfrm_attrs *attrs) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + u32 data[NBL_IPSEC_SPI_DIP__LEN] = {0}; + u32 dip[NBL_IPSEC_FLOW_IP_LEN] = {0}; + int i; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + memcpy(data, &attrs->spi, sizeof(attrs->spi)); + if (attrs->is_ipv6) { + for (i = 0; i < NBL_IPSEC_FLOW_IP_LEN; i++) + dip[i] = ntohl(attrs->daddr.a6[NBL_IPSEC_FLOW_IP_LEN - 1 - i]); + } else { + dip[0] = ntohl(attrs->daddr.a4); + } + memcpy(data + 1, dip, sizeof(dip)); + + return disp_ops->add_ipsec_rx_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, data, + NBL_COMMON_TO_VSI_ID(common)); +} + +static int nbl_ipsec_add_tx_flow(struct net_device *netdev, u32 index, struct xfrm_selector *sel) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + u32 data[NBL_IPSEC_FLOW_TOTAL_LEN] = {0}; + u32 sip[NBL_IPSEC_FLOW_IP_LEN] = {0}; + u32 dip[NBL_IPSEC_FLOW_IP_LEN] = {0}; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + switch (sel->family) { + case AF_INET: + data[0] = AF_INET; + data[NBL_IPSEC_FLOW_SIP_OFF] = ntohl(sel->saddr.a4); + data[NBL_IPSEC_FLOW_DIP_OFF] = ntohl(sel->daddr.a4); + break; + case AF_INET6: + data[0] = AF_INET6; + be32_to_cpu_array(sip, sel->saddr.a6, NBL_IPSEC_FLOW_IP_LEN); + be32_to_cpu_array(dip, sel->daddr.a6, NBL_IPSEC_FLOW_IP_LEN); + memcpy(data + NBL_IPSEC_FLOW_SIP_OFF, sip, sizeof(sip)); + memcpy(data + NBL_IPSEC_FLOW_DIP_OFF, dip, sizeof(dip)); + break; + default: + return -EINVAL; + } + + return disp_ops->add_ipsec_tx_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, data, + NBL_COMMON_TO_VSI_ID(common)); +} + +static int nbl_xfrm_add_state(struct xfrm_state *x, struct netlink_ext_ack *extack) +{ + struct nbl_ipsec_sa_entry *sa_entry; + struct net_device *netdev = x->xso.dev; + int index; + int ret = 0; + + if (nbl_validate_xfrm_state(netdev, x)) + return -EINVAL; + + sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL); + if (!sa_entry) { + return -ENOMEM; + } + + nbl_ipsec_init_cfg_info(x, &sa_entry->cfg_info); + nbl_ipsec_update_esn_state(x, &sa_entry->esn_state); + nbl_ipsec_build_accel_xfrm_attrs(x, &sa_entry->attrs); + + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) { + index = nbl_ipsec_alloc_rx_index(netdev, &sa_entry->cfg_info); + if (index < 0) { + netdev_err(netdev, "No enough rx session resources\n"); + kfree(sa_entry); + return -ENOSPC; + } + netdev_info(netdev, "nbl ipsec ingress index %d\n", index); + + ret = nbl_ipsec_add_rx_flow(netdev, index, &sa_entry->attrs); + if (ret) { + netdev_err(netdev, "No enough rx flow resources for %d\n", index); + nbl_ipsec_free_rx_index(netdev, index); + kfree(sa_entry); + return -ENOSPC; + } + nbl_ipsec_cfg_rx_sad(netdev, index, sa_entry); + } else { + index = nbl_ipsec_alloc_tx_index(netdev, &sa_entry->cfg_info); + if (index < 0) { + netdev_err(netdev, "No enough tx session resources\n"); + kfree(sa_entry); + return -ENOSPC; + } + netdev_info(netdev, "nbl ipsec egress index %d\n", index); + + ret = nbl_ipsec_add_tx_flow(netdev, index, &x->sel); + if (ret) { + netdev_err(netdev, "No enough tx flow resources for %d\n", index); + nbl_ipsec_free_tx_index(netdev, index); + kfree(sa_entry); + return -ENOSPC; + } + nbl_ipsec_cfg_tx_sad(netdev, index, sa_entry); + } + + sa_entry->index = (u32)index; + x->xso.offload_handle = (unsigned long)sa_entry; + + return 0; +} + +static void nbl_ipsec_del_tx_flow(struct net_device *netdev, u32 index) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->del_ipsec_tx_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index); +} + +static void nbl_ipsec_del_rx_flow(struct net_device *netdev, u32 index) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->del_ipsec_rx_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index); +} + +static void nbl_xfrm_del_state(struct xfrm_state *x) +{ + struct nbl_ipsec_sa_entry *sa_entry = (struct nbl_ipsec_sa_entry *)x->xso.offload_handle; + struct net_device *netdev = x->xso.dev; + + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) + nbl_ipsec_del_rx_flow(netdev, sa_entry->index); + else + nbl_ipsec_del_tx_flow(netdev, sa_entry->index); +} + +static void nbl_xfrm_free_state(struct xfrm_state *x) +{ + struct nbl_ipsec_sa_entry *sa_entry = (struct nbl_ipsec_sa_entry *)x->xso.offload_handle; + struct net_device *netdev = x->xso.dev; + + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) + nbl_ipsec_free_rx_index(netdev, sa_entry->index); + else + nbl_ipsec_free_tx_index(netdev, sa_entry->index); + + kfree(sa_entry); +} + +static bool nbl_offload_ok(struct sk_buff *skb, struct xfrm_state *x) +{ +#define NBL_IP_HEADER_LEN 5 + if (x->props.family == AF_INET) { + if (ip_hdr(skb)->ihl != NBL_IP_HEADER_LEN) + return false; + } else { + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) + return false; + } + + return true; +} + +static void nbl_xfrm_advance_esn_state(struct xfrm_state *x) +{ + // not need to do anything +} + +static bool nbl_check_ipsec_status(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + return disp_ops->check_ipsec_status(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_handle_dipsec_lft_event(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + union nbl_ipsec_lft_info lft_info = {0}; + + lft_info.data = disp_ops->get_dipsec_lft_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (lft_info.soft_vld) + disp_ops->handle_dipsec_soft_expire(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + lft_info.soft_sad_index); + + if (lft_info.hard_vld) + disp_ops->handle_dipsec_hard_expire(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + lft_info.hard_sad_index); +} + +static void nbl_handle_uipsec_lft_event(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + union nbl_ipsec_lft_info lft_info = {0}; + + lft_info.data = disp_ops->get_uipsec_lft_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (lft_info.soft_vld) + disp_ops->handle_uipsec_soft_expire(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + lft_info.soft_sad_index); + + if (lft_info.hard_vld) + disp_ops->handle_uipsec_hard_expire(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + lft_info.hard_sad_index); +} + +static void nbl_handle_ipsec_event(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + + nbl_handle_dipsec_lft_event(serv_mgt); + nbl_handle_uipsec_lft_event(serv_mgt); +} + +#define NBL_SERV_XFRM_OPS_TBL \ +do { \ + NBL_SERV_SET_XFRM_OPS(add_xdo_dev_state, nbl_xfrm_add_state); \ + NBL_SERV_SET_XFRM_OPS(delete_xdo_dev_state, nbl_xfrm_del_state); \ + NBL_SERV_SET_XFRM_OPS(free_xdo_dev_state, nbl_xfrm_free_state); \ + NBL_SERV_SET_XFRM_OPS(xdo_dev_offload_ok, nbl_offload_ok); \ + NBL_SERV_SET_XFRM_OPS(xdo_dev_state_advance_esn, nbl_xfrm_advance_esn_state); \ + NBL_SERV_SET_XFRM_OPS(check_ipsec_status, nbl_check_ipsec_status); \ + NBL_SERV_SET_XFRM_OPS(handle_ipsec_event, nbl_handle_ipsec_event); \ +} while (0) + +void nbl_serv_setup_xfrm_ops(struct nbl_service_ops *serv_ops) +{ +#define NBL_SERV_SET_XFRM_OPS(name, func) do {serv_ops->NBL_NAME(name) = func; ; } while (0) + NBL_SERV_XFRM_OPS_TBL; +#undef NBL_SERV_SET_XFRM_OPS +} + diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.h new file mode 100644 index 0000000000000000000000000000000000000000..3156d51d2f4b8bdcdd40ea96baf1ff2bff4fb9cc --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2023 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_IPSEC_H +#define _NBL_IPSEC_H + +#include +#include +#include +#include +#include "nbl_service.h" +#include +#include +#include +#include +#include + +#define NBL_IPSEC_AES_128_ALG_LEN (128 + 32) +#define NBL_IPSEC_AES_256_ALG_LEN (256 + 32) + +#define NBL_IPSEC_ICV_LEN_64 64 +#define NBL_IPSEC_ICV_LEN_96 96 +#define NBL_IPSEC_ICV_LEN_128 128 + +#define NBL_IPSEC_WINDOW_32 32 +#define NBL_IPSEC_WINDOW_64 64 +#define NBL_IPSEC_WINDOW_128 128 +#define NBL_IPSEC_WINDOW_256 256 + +#define NBL_IPSEC_LIFETIME_BYTE 0 +#define NBL_IPSEC_LIFETIME_PACKET 1 +#define NBL_IPSEC_LIFETIME_ROUND 31 +#define NBL_IPSEC_LIFETIME_REMAIN (0x7fffffff) +#define NBL_IPSEC_REPLAY_MID_SEQ (0X80000000L) +#define NBL_GET_SOFT_BY_HARD(hard) (((hard) >> 2) * 3) + +#define NBL_GET_KEYLEN_BY_ALG(alg_key_len) ((((alg_key_len) + 7) / 8) - 4) +#define NBL_IPSEC_KEY_LEN_TOTAL 32 +#define NBL_IPSEC_AES128_KEY_LEN 16 +#define NBL_IPSEC_AES_GCM_128 0 +#define NBL_IPSEC_AES_GCM_256 1 +#define NBL_IPSEC_SM4_GCM 2 + +#define NBL_IPSEC_ICV_64_TYPE 0 +#define NBL_IPSEC_ICV_96_TYPE 1 +#define NBL_IPSEC_ICV_128_TYPE 2 + +#define NBL_IPSEC_SPI_DIP__LEN 5 +#define NBL_IPSEC_FLOW_TOTAL_LEN 12 +#define NBL_IPSEC_FLOW_IP_LEN 4 +#define NBL_IPSEC_FLOW_SIP_OFF 1 +#define NBL_IPSEC_FLOW_DIP_OFF 5 + +#define XFRM_SA_XFLAG_OSEQ_MAY_WRAP 2 + +void nbl_serv_setup_xfrm_ops(struct nbl_service_ops *serv_ops_tbl); +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.c new file mode 100644 index 0000000000000000000000000000000000000000..afdc850102a3bdee9d86ffc0933d3aa2eb5effe8 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.c @@ -0,0 +1,391 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_ktls.h" + +static void nbl_ktls_free_tx_index(struct net_device *netdev, u32 index) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->free_ktls_tx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index); +} + +static void nbl_ktls_free_rx_index(struct net_device *netdev, u32 index) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->free_ktls_rx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index); +} + +static int nbl_ktls_alloc_tx_index(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + u16 vsi; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + vsi = NBL_COMMON_TO_VSI_ID(common); + + return disp_ops->alloc_ktls_tx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi); +} + +static int nbl_ktls_alloc_rx_index(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + u16 vsi; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + vsi = NBL_COMMON_TO_VSI_ID(common); + + return disp_ops->alloc_ktls_rx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi); +} + +static void nbl_ktls_cfg_tx_keymat(struct net_device *netdev, u32 index, + struct tls_crypto_info *crypto_info, + struct nbl_ktls_offload_context_tx *priv_tx) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct tls12_crypto_info_aes_gcm_128 *crypto_info_aes_128; + struct tls12_crypto_info_aes_gcm_256 *crypto_info_aes_256; + struct tls12_crypto_info_sm4_gcm *crypto_info_sm4; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + switch (crypto_info->cipher_type) { + case TLS_CIPHER_AES_GCM_128: + crypto_info_aes_128 = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + + disp_ops->cfg_ktls_tx_keymat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, + NBL_KTLS_AES_GCM_128, + crypto_info_aes_128->salt, + crypto_info_aes_128->key, + TLS_CIPHER_AES_GCM_128_KEY_SIZE); + memcpy(priv_tx->iv, crypto_info_aes_128->iv, NBL_KTLS_IV_LEN); + memcpy(priv_tx->rec_num, crypto_info_aes_128->rec_seq, NBL_KTLS_REC_LEN); + break; + case TLS_CIPHER_AES_GCM_256: + crypto_info_aes_256 = (struct tls12_crypto_info_aes_gcm_256 *)crypto_info; + + disp_ops->cfg_ktls_tx_keymat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, + NBL_KTLS_AES_GCM_256, + crypto_info_aes_256->salt, + crypto_info_aes_256->key, + TLS_CIPHER_AES_GCM_256_KEY_SIZE); + memcpy(priv_tx->iv, crypto_info_aes_256->iv, NBL_KTLS_IV_LEN); + memcpy(priv_tx->rec_num, crypto_info_aes_256->rec_seq, NBL_KTLS_REC_LEN); + break; + case TLS_CIPHER_SM4_GCM: + crypto_info_sm4 = (struct tls12_crypto_info_sm4_gcm *)crypto_info; + + disp_ops->cfg_ktls_tx_keymat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, + NBL_KTLS_SM4_GCM, + crypto_info_sm4->salt, + crypto_info_sm4->key, + TLS_CIPHER_SM4_GCM_KEY_SIZE); + memcpy(priv_tx->iv, crypto_info_sm4->iv, NBL_KTLS_IV_LEN); + memcpy(priv_tx->rec_num, crypto_info_sm4->rec_seq, NBL_KTLS_REC_LEN); + break; + } +} + +static int nbl_ktls_add_tx(struct net_device *netdev, struct sock *sk, + struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn) +{ + struct tls_context *tls_ctx; + struct nbl_ktls_offload_context_tx *priv_tx; + struct nbl_ktls_offload_context_tx **ctx; + int index; + + priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL); + if (!priv_tx) { + return -ENOMEM; + } + + /* get unused index */ + index = nbl_ktls_alloc_tx_index(netdev); + if (index < 0) { + netdev_err(netdev, "No enough tx session resources\n"); + kfree(priv_tx); + return -ENOSPC; + } + + netdev_info(netdev, "nbl ktls egress index %d, start seq %u\n", + index, start_offload_tcp_sn); + nbl_ktls_cfg_tx_keymat(netdev, index, crypto_info, priv_tx); + + priv_tx->index = (u32)index; + priv_tx->expected_tcp = start_offload_tcp_sn; + tls_ctx = tls_get_ctx(sk); + priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx); + priv_tx->ctx_post_pending = true; + ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); + *ctx = priv_tx; + + return 0; +} + +static void nbl_ktls_cfg_rx_keymat(struct net_device *netdev, u32 index, + struct tls_crypto_info *crypto_info, + struct nbl_ktls_offload_context_rx *priv_rx) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct tls12_crypto_info_aes_gcm_128 *crypto_info_aes_128; + struct tls12_crypto_info_aes_gcm_256 *crypto_info_aes_256; + struct tls12_crypto_info_sm4_gcm *crypto_info_sm4; + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + switch (crypto_info->cipher_type) { + case TLS_CIPHER_AES_GCM_128: + crypto_info_aes_128 = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + + disp_ops->cfg_ktls_rx_keymat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, + NBL_KTLS_AES_GCM_128, + crypto_info_aes_128->salt, + crypto_info_aes_128->key, + TLS_CIPHER_AES_GCM_128_KEY_SIZE); + memcpy(priv_rx->rec_num, crypto_info_aes_128->rec_seq, NBL_KTLS_REC_LEN); + break; + case TLS_CIPHER_AES_GCM_256: + crypto_info_aes_256 = (struct tls12_crypto_info_aes_gcm_256 *)crypto_info; + + disp_ops->cfg_ktls_rx_keymat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, + NBL_KTLS_AES_GCM_256, + crypto_info_aes_256->salt, + crypto_info_aes_256->key, + TLS_CIPHER_AES_GCM_256_KEY_SIZE); + memcpy(priv_rx->rec_num, crypto_info_aes_256->rec_seq, NBL_KTLS_REC_LEN); + break; + case TLS_CIPHER_SM4_GCM: + crypto_info_sm4 = (struct tls12_crypto_info_sm4_gcm *)crypto_info; + + disp_ops->cfg_ktls_rx_keymat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, + NBL_KTLS_SM4_GCM, + crypto_info_sm4->salt, + crypto_info_sm4->key, + TLS_CIPHER_SM4_GCM_KEY_SIZE); + memcpy(priv_rx->rec_num, crypto_info_sm4->rec_seq, NBL_KTLS_REC_LEN); + break; + } +} + +static void nbl_ktls_cfg_rx_record(struct net_device *netdev, u32 index, + u32 tcp_sn, u64 rec_num, bool init) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + netdev_info(netdev, "nbl ktls cfg index %u, tcp_seq %u, rec_num %llu, init %u.\n", + index, tcp_sn, rec_num, init); + disp_ops->cfg_ktls_rx_record(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, + tcp_sn, rec_num, init); +} + +static int nbl_ktls_add_rx_flow(struct net_device *netdev, u32 index, struct sock *sk) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + u32 data[NBL_KTLS_FLOW_TOTAL_LEN] = {0}; + u32 sip[NBL_KTLS_FLOW_IP_LEN] = {0}; + u32 dip[NBL_KTLS_FLOW_IP_LEN] = {0}; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + switch (sk->sk_family) { + case AF_INET: + data[NBL_KTLS_FLOW_TYPE_OFF] = AF_INET; + data[NBL_KTLS_FLOW_SIP_OFF] = ntohl(inet_sk(sk)->inet_daddr); + data[NBL_KTLS_FLOW_DIP_OFF] = ntohl(inet_sk(sk)->inet_rcv_saddr); + break; + case AF_INET6: + data[NBL_KTLS_FLOW_TYPE_OFF] = AF_INET6; + be32_to_cpu_array(sip, sk->sk_v6_daddr.s6_addr32, NBL_KTLS_FLOW_IP_LEN); + be32_to_cpu_array(dip, inet6_sk(sk)->saddr.s6_addr32, NBL_KTLS_FLOW_IP_LEN); + memcpy(data + NBL_KTLS_FLOW_SIP_OFF, sip, sizeof(sip)); + memcpy(data + NBL_KTLS_FLOW_DIP_OFF, dip, sizeof(dip)); + break; + default: + return -EINVAL; + } + + data[NBL_KTLS_FLOW_DPORT_OFF] = ntohs(inet_sk(sk)->inet_dport); + data[NBL_KTLS_FLOW_SPORT_OFF] = ntohs(inet_sk(sk)->inet_sport); + + return disp_ops->add_ktls_rx_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, data, + NBL_COMMON_TO_VSI_ID(common)); +} + +static int nbl_ktls_add_rx(struct net_device *netdev, struct sock *sk, + struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn) +{ + struct nbl_ktls_offload_context_rx *priv_rx; + struct nbl_ktls_offload_context_rx **ctx; + struct tls_context *tls_ctx; + int index; + u64 rec_num; + int ret = 0; + + priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL); + if (!priv_rx) { + netdev_err(netdev, "No enough rx memory available\n"); + return -ENOMEM; + } + + /* get unused index */ + index = nbl_ktls_alloc_rx_index(netdev); + if (index < 0) { + netdev_err(netdev, "No enough rx session resources\n"); + kfree(priv_rx); + return -ENOSPC; + } + + netdev_info(netdev, "nbl ktls ingress index %d, expected seq %u\n", + index, start_offload_tcp_sn); + ret = nbl_ktls_add_rx_flow(netdev, index, sk); + if (ret) { + netdev_err(netdev, "No enough rx flow resources for %d\n", index); + nbl_ktls_free_rx_index(netdev, index); + kfree(priv_rx); + return -ENOSPC; + } + nbl_ktls_cfg_rx_keymat(netdev, index, crypto_info, priv_rx); + rec_num = be64_to_cpu(*(__be64 *)priv_rx->rec_num) - 1; + nbl_ktls_cfg_rx_record(netdev, index, start_offload_tcp_sn, rec_num, true); + + priv_rx->index = (u32)index; + tls_ctx = tls_get_ctx(sk); + ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); + *ctx = priv_rx; + tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ); + return 0; +} + +static int nbl_ktls_add(struct net_device *netdev, struct sock *sk, + enum tls_offload_ctx_dir direction, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn) +{ + int err = 0; + + if (crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128 && + crypto_info->cipher_type != TLS_CIPHER_SM4_GCM && + crypto_info->cipher_type != TLS_CIPHER_AES_GCM_256) { + netdev_info(netdev, "Unsupported cipher type %u\n", crypto_info->cipher_type); + return -EOPNOTSUPP; + } + + if (direction == TLS_OFFLOAD_CTX_DIR_TX) + err = nbl_ktls_add_tx(netdev, sk, crypto_info, start_offload_tcp_sn); + else + err = nbl_ktls_add_rx(netdev, sk, crypto_info, start_offload_tcp_sn); + + return err; +} + +static void nbl_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx) +{ + struct nbl_ktls_offload_context_tx **ctx = + __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); + struct nbl_ktls_offload_context_tx *priv_tx = *ctx; + + netdev_info(netdev, "nbl ktls egress free index %u\n", priv_tx->index); + nbl_ktls_free_tx_index(netdev, priv_tx->index); + kfree(priv_tx); +} + +static void nbl_ktls_del_rx_flow(struct net_device *netdev, u32 index) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->del_ktls_rx_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index); +} + +static void nbl_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) +{ + struct nbl_ktls_offload_context_rx **ctx = + __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); + struct nbl_ktls_offload_context_rx *priv_rx = *ctx; + + netdev_info(netdev, "nbl ktls ingress free index %u\n", priv_rx->index); + nbl_ktls_free_rx_index(netdev, priv_rx->index); + nbl_ktls_del_rx_flow(netdev, priv_rx->index); + kfree(priv_rx); +} + +static void nbl_ktls_del(struct net_device *netdev, struct tls_context *tls_ctx, + enum tls_offload_ctx_dir direction) +{ + if (direction == TLS_OFFLOAD_CTX_DIR_TX) + nbl_ktls_del_tx(netdev, tls_ctx); + else + nbl_ktls_del_rx(netdev, tls_ctx); +} + +static void nbl_ktls_rx_resync(struct net_device *netdev, struct sock *sk, + u32 tcp_seq, u8 *rec_num) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct nbl_ktls_offload_context_rx **ctx = + __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); + struct nbl_ktls_offload_context_rx *priv = *ctx; + + nbl_ktls_cfg_rx_record(netdev, priv->index, priv->tcp_seq, + be64_to_cpu(*(__be64 *)rec_num), false); +} + +static int nbl_ktls_resync(struct net_device *netdev, struct sock *sk, + u32 tcp_seq, u8 *rec_num, + enum tls_offload_ctx_dir direction) +{ + if (direction != TLS_OFFLOAD_CTX_DIR_RX) + return -1; + + nbl_ktls_rx_resync(netdev, sk, tcp_seq, rec_num); + return 0; +} + +#define NBL_SERV_KTLS_OPS_TBL \ +do { \ + NBL_SERV_SET_KTLS_OPS(add_tls_dev, nbl_ktls_add); \ + NBL_SERV_SET_KTLS_OPS(del_tls_dev, nbl_ktls_del); \ + NBL_SERV_SET_KTLS_OPS(resync_tls_dev, nbl_ktls_resync); \ +} while (0) + +void nbl_serv_setup_ktls_ops(struct nbl_service_ops *serv_ops) +{ +#define NBL_SERV_SET_KTLS_OPS(name, func) do {serv_ops->NBL_NAME(name) = func; ; } while (0) + NBL_SERV_KTLS_OPS_TBL; +#undef NBL_SERV_SET_KTLS_OPS +} + diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.h new file mode 100644 index 0000000000000000000000000000000000000000..02c70f6456cf006a25c0aff3db5d242613d63bb9 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_KTLS_H +#define _NBL_KTLS_H + +#include "nbl_service.h" +#include +#include +#include +#include +#include + +#define NBL_KTLS_AES_GCM_128 0 +#define NBL_KTLS_AES_GCM_256 1 +#define NBL_KTLS_SM4_GCM 2 +#define NBL_KTLS_FLOW_TYPE_OFF 0 +#define NBL_KTLS_FLOW_SIP_OFF 1 +#define NBL_KTLS_FLOW_DIP_OFF 5 +#define NBL_KTLS_FLOW_DPORT_OFF 9 +#define NBL_KTLS_FLOW_SPORT_OFF 10 +#define NBL_KTLS_FLOW_IP_LEN 4 +#define NBL_KTLS_FLOW_TOTAL_LEN 12 + +void nbl_serv_setup_ktls_ops(struct nbl_service_ops *serv_ops_tbl); + +#endif /*_NBL_KTLS_H*/ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.c new file mode 100644 index 0000000000000000000000000000000000000000..5f878b823802c13d9ea7001a666d12c27d49db5d --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.c @@ -0,0 +1,1248 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_lag.h" +#include "nbl_dev.h" + +struct list_head lag_resource_head; +/* mutex for lag resource */ +struct mutex nbl_lag_mutex; + +static inline void init_lag_instance(struct nbl_lag_instance *lag_info, struct net_device *bond_dev) +{ + lag_info->bond_netdev = bond_dev; + INIT_LIST_HEAD(&lag_info->mem_list_head); + lag_info->linkup = 0; + lag_info->lag_enable = 0; + lag_info->lag_id = NBL_INVALID_LAG_ID; + memset(&lag_info->lag_upper_info, 0, sizeof(lag_info->lag_upper_info)); +} + +static struct nbl_lag_instance *find_lag_by_lagid(u32 board_key, u8 lag_id) +{ + struct nbl_lag_resource *find_resource = NULL; + struct nbl_lag_resource *lag_resource_tmp; + struct nbl_lag_instance *lag_tmp, *lag_info = NULL; + + if (!nbl_lag_id_valid(lag_id)) + goto ret; + + /* find the lag resource by the bus id, identify a card */ + list_for_each_entry(lag_resource_tmp, &lag_resource_head, resource_node) { + if (lag_resource_tmp->board_key == board_key) { + find_resource = lag_resource_tmp; + break; + } + } + + if (!find_resource) + goto ret; + + /* find the lag instance by lag_id */ + list_for_each_entry(lag_tmp, &find_resource->lag_instance_head, instance_node) { + if (lag_tmp->lag_id == lag_id) { + lag_info = lag_tmp; + break; + } + } + +ret: + return lag_info; +} + +static struct nbl_lag_instance *find_lag_by_bonddev(u32 board_key, struct net_device *bond_dev) +{ + struct nbl_lag_resource *find_resource = NULL; + struct nbl_lag_resource *lag_resource_tmp; + struct nbl_lag_instance *lag_tmp, *lag_info = NULL; + + if (!bond_dev) + goto ret; + + /* find the lag resource by the bus id, identify a card */ + list_for_each_entry(lag_resource_tmp, &lag_resource_head, resource_node) { + if (lag_resource_tmp->board_key == board_key) { + find_resource = lag_resource_tmp; + break; + } + } + + if (!find_resource) + goto ret; + + /* find the lag instance by bonddev */ + list_for_each_entry(lag_tmp, &find_resource->lag_instance_head, instance_node) { + if (lag_tmp->bond_netdev == bond_dev) { + lag_info = lag_tmp; + break; + } + } + +ret: + return lag_info; +} + +static struct nbl_lag_instance *alloc_lag_instance(u32 board_key, struct net_device *bond_dev, + struct nbl_lag_resource **find_resource) +{ + struct nbl_lag_resource *lag_resource_tmp; + struct nbl_lag_instance *lag_tmp, *lag_info = NULL; + + /* find the lag resource by the bus id, identify a card */ + list_for_each_entry(lag_resource_tmp, &lag_resource_head, resource_node) { + if (lag_resource_tmp->board_key == board_key) { + *find_resource = lag_resource_tmp; + break; + } + } + + if (!(*find_resource)) + goto ret; + + /* find the lag instance by bond_dev */ + list_for_each_entry(lag_tmp, &(*find_resource)->lag_instance_head, instance_node) { + /* mark the idle lag instance */ + if (!lag_info && !lag_tmp->bond_netdev) + lag_info = lag_tmp; + if (lag_tmp->bond_netdev == bond_dev) { + lag_info = lag_tmp; + break; + } + } + /* if not found and no idle lag instance, then alloc a new lag instance */ + if (!lag_info) { + lag_info = kzalloc(sizeof(*lag_info), GFP_KERNEL); + if (!lag_info) + goto ret; + + init_lag_instance(lag_info, bond_dev); + list_add_tail(&lag_info->instance_node, &(*find_resource)->lag_instance_head); + } + +ret: + return lag_info; +} + +static void nbl_display_lag_info(struct nbl_dev_mgt *dev_mgt, u8 lag_id) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct net_device *current_netdev; + const char *member_name, *upper_name; + struct nbl_lag_member *mem_tmp; + struct nbl_lag_instance *lag_info = NULL; + u32 board_key; + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + lag_info = find_lag_by_lagid(board_key, lag_id); + + if (!lag_info) + return; + + current_netdev = net_dev->netdev; + upper_name = lag_info->bond_netdev ? netdev_name(lag_info->bond_netdev) : "unset"; + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "bond dev %s: enabled is %u, lag_id is %u.\n", + upper_name, lag_info->lag_enable, lag_info->lag_id); + + if (lag_info && lag_info->lag_enable) { + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "bond dev %s: tx_type is %d, hash_type is %d.\n", upper_name, + lag_info->lag_upper_info.tx_type, + lag_info->lag_upper_info.hash_type); + + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) { + member_name = current_netdev ? + netdev_name(current_netdev) : "unset"; + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "%s(%s): lag_id: %d, eth_id: %d, bonded: %d, linkup: %d, tx_enabled: %d.\n", + upper_name, member_name, mem_tmp->lag_id, + mem_tmp->logic_eth_id, mem_tmp->bonded, + mem_tmp->lower_state.link_up, + mem_tmp->lower_state.tx_enabled); + } + } +} + +static void nbl_lag_create_bond_adev(struct nbl_dev_mgt *dev_mgt, + struct nbl_lag_instance *lag_info) +{ + struct nbl_event_param event_data; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_lag_member *mem_tmp, *notify_mem = NULL; + struct nbl_lag_member_list_param *list_param = &event_data.param; + struct nbl_rdma_register_param register_param = {0}; + int mem_num = 0; + int i = 0; + + memset(&event_data, 0, sizeof(event_data)); + + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) { + event_data.param.member_list[mem_num].vsi_id = mem_tmp->vsi_id; + event_data.param.member_list[mem_num].eth_id = mem_tmp->eth_id; + mem_num++; + if (!notify_mem || notify_mem->eth_id > mem_tmp->eth_id) + notify_mem = mem_tmp; + } + + if (!notify_mem) { + nbl_err(common, NBL_DEBUG_MAIN, + "notify to create the bond adev failed, member count %u.\n", mem_num); + return; + } + event_data.param.lag_num = mem_num; + + /* Checking if we can support and create the rdma bond */ + serv_ops->register_rdma_bond(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + list_param, ®ister_param); + + if (!register_param.has_rdma) { + nbl_warn(common, NBL_DEBUG_MAIN, + "Can not support to create rdma bond, vsi %u.\n", notify_mem->vsi_id); + return; + } + + for (i = 0; i < mem_num; i++) { + event_data.subevent = NBL_SUBEVENT_RELEASE_ADEV; + /* Notify the dev to release the rdma adev first. */ + nbl_event_notify(NBL_EVENT_RDMA_BOND_UPDATE, &event_data, + event_data.param.member_list[i].vsi_id, + NBL_COMMON_TO_BOARD_ID(common)); + } + + /* Notify the rdma dev to create the bond adev. */ + event_data.subevent = NBL_SUBEVENT_CREATE_BOND_ADEV; + event_data.param.bond_netdev = lag_info->bond_netdev; + event_data.param.lag_id = lag_info->lag_id; + event_data.param.lag_num = mem_num; + + nbl_event_notify(NBL_EVENT_RDMA_BOND_UPDATE, &event_data, notify_mem->vsi_id, + NBL_COMMON_TO_BOARD_ID(common)); + + notify_mem->is_bond_adev = true; +} + +static void nbl_lag_member_recover_adev(struct nbl_dev_mgt *dev_mgt, + struct nbl_lag_instance *lag_info, + struct nbl_lag_member *lag_mem) +{ + struct nbl_event_param event_data; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_lag_member *mem_tmp, *adev_mem = NULL; + int i = 0, has_self = 0, mem_num = 0; + + memset(&event_data, 0, sizeof(event_data)); + + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) + if (mem_tmp == lag_mem) + has_self = 1; + + if (!has_self) + return; + + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) { + event_data.param.member_list[mem_num].vsi_id = mem_tmp->vsi_id; + event_data.param.member_list[mem_num].eth_id = mem_tmp->eth_id; + mem_num++; + + if (mem_tmp->is_bond_adev) + adev_mem = mem_tmp; + } + + /* If we cannot find a member with adev, then we have nothing to do, return */ + if (!adev_mem) + return; + + /* Notify the rdma dev to delete the bond adev. */ + event_data.subevent = NBL_SUBEVENT_RELEASE_BOND_ADEV; + event_data.param.bond_netdev = lag_info->bond_netdev; + event_data.param.lag_id = lag_info->lag_id; + event_data.param.lag_num = mem_num; + + nbl_event_notify(NBL_EVENT_RDMA_BOND_UPDATE, &event_data, adev_mem->vsi_id, + NBL_COMMON_TO_BOARD_ID(common)); + + for (i = 0; i < mem_num; i++) { + event_data.subevent = NBL_SUBEVENT_CREATE_ADEV; + /* Notify the dev to restore the rdma adev. */ + nbl_event_notify(NBL_EVENT_RDMA_BOND_UPDATE, &event_data, + event_data.param.member_list[i].vsi_id, + NBL_COMMON_TO_BOARD_ID(common)); + } + + adev_mem->is_bond_adev = false; +} + +static void update_lag_member_list(struct nbl_dev_mgt *dev_mgt, + u8 lag_id, + struct nbl_lag_instance *lag_info, + struct nbl_lag_member *lag_mem) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_lag_member *mem_tmp; + struct nbl_event_param event_data; + struct nbl_lag_member_list_param mem_list_param = {0}; + u16 mem_id, tx_enabled_id = U16_MAX; + u8 fwd; + + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) { + if (nbl_lag_mem_is_active(mem_tmp)) + tx_enabled_id = mem_tmp->eth_id; + } + + memset(&event_data, 0, sizeof(event_data)); + mem_id = 0; + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) { + if (mem_id < NBL_LAG_MAX_PORTS) { + /* The member list is mainly for dup-arp/nd cfg. + * If we only use port_list, which only contains active eth_id, the + * following problem will occur: + * 1. Add pf0 & pf1 to bond + * 2. pf0 up, pf0 cfg member_list, right now only pf0 is active, so + * port_list contains only eth0 + * 3. pf1 up, pf1 cfg member_list, now both pf0 and pf1 are up, so + * port_list contains eth0 & eth1 + * In this case, pf1 knows that it should dup-arp to two ports, but + * pf0 is unaware, so if kernel use pf0 to send pkts, it cannot dup. + */ + mem_list_param.member_list[mem_id].eth_id = mem_tmp->eth_id; + mem_list_param.member_list[mem_id].vsi_id = mem_tmp->vsi_id; + + if (nbl_lag_mem_is_active(mem_tmp)) { + mem_list_param.port_list[mem_id] = mem_tmp->eth_id; + mem_list_param.member_list[mem_id].active = true; + } else if (tx_enabled_id < U16_MAX) { + mem_list_param.port_list[mem_id] = tx_enabled_id; + } + } + mem_id++; + } + mem_list_param.lag_num = mem_id; + if (lag_info->lag_upper_info.tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && mem_id > 1) + mem_list_param.duppkt_enable = true; + + if (tx_enabled_id < U16_MAX) + for ( ; mem_id < NBL_LAG_MAX_PORTS; ) + mem_list_param.port_list[mem_id++] = tx_enabled_id; + + mem_list_param.lag_id = lag_id; + serv_ops->cfg_lag_member_list(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &mem_list_param); + + serv_ops->cfg_lag_member_up_attr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + lag_mem->eth_id, lag_id, lag_mem->bonded ? true : false); + if (!lag_mem->bonded) { + fwd = NBL_LAG_MEM_FWD_DROP; + serv_ops->cfg_lag_member_fwd(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + lag_mem->eth_id, lag_id, fwd); + } + + mem_list_param.bond_netdev = lag_info->bond_netdev; + memcpy(&event_data.param, &mem_list_param, sizeof(event_data.param)); + event_data.subevent = NBL_SUBEVENT_UPDATE_BOND_MEMBER; + + /* Make sure only notify the dev who has been created the rdma bond adev to update the + * bond member list info. + */ + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) + if (mem_tmp->is_bond_adev) + nbl_event_notify(NBL_EVENT_RDMA_BOND_UPDATE, &event_data, mem_tmp->vsi_id, + NBL_COMMON_TO_BOARD_ID(common)); +} + +static void nbl_update_lag_cfg(struct nbl_lag_member *lag_mem, u8 lag_id, u32 flag) +{ + struct nbl_dev_mgt *dev_mgt = NBL_NETDEV_TO_DEV_MGT(lag_mem->netdev); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_dev_vsi *vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_DATA]; + u16 eth_id; + u8 fwd; + const char *upper_name; + struct nbl_lag_instance *lag_info = NULL; + bool sfp_tx_enable, lag_enable; + u32 board_key; + + eth_id = lag_mem->eth_id; + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + + if (flag & NBL_LAG_UPDATE_LACP_PKT) { + lag_enable = lag_mem->bonded ? true : false; + serv_ops->enable_lag_protocol(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + eth_id, lag_enable); + vsi->feature.has_lacp = lag_enable; + + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "%s lag protocol for lag_id: %d.\n", + lag_enable ? "enable" : "disable", lag_id); + } + + if (flag & NBL_LAG_UPDATE_SFP_TX) { + if (lag_mem->bonded) + sfp_tx_enable = lag_mem->lower_state.link_up; + else + sfp_tx_enable = true; + serv_ops->set_sfp_state(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), lag_mem->netdev, + (u8)eth_id, sfp_tx_enable, true); + } + + if (!nbl_lag_id_valid(lag_id)) { + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "lag_id: %d is invalid, flag: 0x%08x.\n", lag_id, flag); + return; + } + + lag_info = find_lag_by_lagid(board_key, lag_id); + + if (!lag_info) + return; + + upper_name = lag_info->bond_netdev ? netdev_name(lag_info->bond_netdev) : "unset"; + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "bond dev %s: lag_id: %d, eth_id: %u, enabled: %d, linkup: %s, flag: 0x%08x.\n", + upper_name, lag_id, lag_mem->logic_eth_id, lag_info->lag_enable, + lag_info->linkup ? "up" : "down", flag); + + if (flag & NBL_LAG_UPDATE_HASH) + serv_ops->cfg_lag_hash_algorithm(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), eth_id, + lag_id, lag_info->lag_upper_info.hash_type); + + if (flag & NBL_LAG_UPDATE_LINK) { + if (lag_mem->bonded) { + fwd = NBL_LAG_MEM_FWD_DROP; + if (lag_info->linkup) + fwd = nbl_lag_mem_is_active(lag_mem) ? + NBL_LAG_MEM_FWD_NORMAL : NBL_LAG_MEM_FWD_DROP; + serv_ops->cfg_lag_member_fwd(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + eth_id, lag_id, fwd); + } + } + + if (flag & NBL_LAG_UPDATE_MEMBER) + update_lag_member_list(dev_mgt, lag_id, lag_info, lag_mem); +} + +static int del_lag_member(struct nbl_dev_mgt *dev_mgt, + struct nbl_lag_instance *lag_info, + struct netdev_notifier_changeupper_info *info) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem; + u8 mem_count = 0; + struct nbl_lag_member *mem_tmp = NULL; + + lag_mem = net_dev->lag_mem; + + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) { + mem_count++; + if (lag_mem == mem_tmp) + break; + } + + if (nbl_list_entry_is_head(mem_tmp, &lag_info->mem_list_head, mem_list_node)) + return -ENOENT; + + if (mem_count == 0 || mem_count > NBL_LAG_MAX_PORTS) { + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "lag member device has been deleted.\n"); + return -1; + } + + lag_mem->bonded = 0; + lag_mem->lag_id = NBL_INVALID_LAG_ID; + memset(&lag_mem->lower_state, 0, sizeof(lag_mem->lower_state)); + list_del(&lag_mem->mem_list_node); + + return 0; +} + +static int add_lag_member(struct nbl_dev_mgt *dev_mgt, + struct nbl_lag_instance *lag_info, + u8 lag_id, + struct netdev_notifier_changeupper_info *info) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem; + u8 mem_count = 0; + struct netdev_lag_upper_info *upper_info; + struct nbl_lag_member *mem_tmp = NULL; + + lag_mem = net_dev->lag_mem; + upper_info = info->upper_info; + + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) { + mem_count++; + if (lag_mem == mem_tmp) + return 0; + } + + if (mem_count < NBL_LAG_MAX_PORTS) { + lag_mem->bonded = 1; + lag_mem->lag_id = lag_id; + list_add_tail(&lag_mem->mem_list_node, &lag_info->mem_list_head); + } else { + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "no available lag member resource.\n"); + return -1; + } + return 0; +} + +static bool is_lag_can_offload(struct nbl_dev_mgt *dev_mgt, + const struct nbl_lag_instance *lag_info) +{ + struct nbl_lag_resource *lag_resource_tmp; + struct nbl_lag_instance *lag_info_tmp; + u32 count = 0; + + if (!(lag_info->lag_upper_info.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP || + lag_info->lag_upper_info.tx_type == NETDEV_LAG_TX_TYPE_HASH)) { + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "bond dev %s tx_type %d is not allowed.\n", + netdev_name(lag_info->bond_netdev), lag_info->lag_upper_info.tx_type); + return false; + } + + /* if the lag instance's all lag members only belong to one card, this lag can offload */ + list_for_each_entry(lag_resource_tmp, &lag_resource_head, resource_node) { + list_for_each_entry(lag_info_tmp, + &lag_resource_tmp->lag_instance_head, instance_node) { + if (lag_info_tmp->bond_netdev == lag_info->bond_netdev && + !list_empty(&lag_info_tmp->mem_list_head)) + count++; + } + } + + return (count == 1) ? true : false; +} + +static int enable_lag_instance(struct nbl_lag_resource *lag_resource, + struct nbl_lag_instance *lag_info) +{ + u8 lag_id; + struct nbl_lag_member *lag_mem; + + if (lag_info->lag_enable) + return 0; + + /* enable the lag instance, and distribute a lag id, then updating all members' lag id */ + lag_id = find_first_zero_bit(lag_resource->lag_id_bitmap, NBL_LAG_MAX_NUM); + if (!nbl_lag_id_valid(lag_id)) + return -1; + + set_bit(lag_id, lag_resource->lag_id_bitmap); + + list_for_each_entry(lag_mem, &lag_info->mem_list_head, mem_list_node) + lag_mem->lag_id = lag_id; + + lag_info->lag_id = lag_id; + lag_info->lag_enable = 1; + return 0; +} + +static void disable_lag_instance(struct nbl_lag_resource *lag_resource, + struct nbl_lag_instance *lag_info) +{ + u8 lag_id; + + /* retrieving the lag id resource, then disable and init the lag instance. + * don't free the lag instance for reusing later if needed, all lag instance + * resource will be freed in lag dinit function. + */ + lag_id = lag_info->lag_id; + clear_bit(lag_id, lag_resource->lag_id_bitmap); + + init_lag_instance(lag_info, NULL); +} + +static void nbl_lag_changeupper_event(struct nbl_dev_mgt *dev_mgt, void *ptr, u32 *flag) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem, *mem_tmp; + struct nbl_lag_resource *lag_resource = NULL; + struct netdev_notifier_changeupper_info *info; + struct netdev_lag_upper_info *upper_info; + struct net_device *netdev; + struct nbl_lag_instance *lag_info; + const char *upper_name, *device_name; + struct net_device *current_netdev; + u8 lag_id = NBL_INVALID_LAG_ID; + u32 board_key; + int ret; + + info = ptr; + netdev = netdev_notifier_info_to_dev(ptr); + + lag_mem = net_dev->lag_mem; + current_netdev = net_dev->netdev; + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + + /* not for this netdev */ + if (netdev != current_netdev) + return; + + device_name = netdev ? netdev_name(netdev) : "unset"; + + if (!info->upper_dev) { + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "changeupper(%s) event received, but upper dev is null\n", device_name); + return; + } + + upper_info = info->upper_info; + upper_name = netdev_name(info->upper_dev); + + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "changeupper(%s) event bond %s, linking: %d, master: %d, tx_type: %d, hash_type: %d.\n", + device_name, + upper_name, info->linking, info->master, + upper_info ? upper_info->tx_type : 0, + upper_info ? upper_info->hash_type : 0); + + if (!netif_is_lag_master(info->upper_dev)) { + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "changeupper(%s) event received, but not master.\n", device_name); + return; + } + + lag_info = alloc_lag_instance(board_key, info->upper_dev, &lag_resource); + if (!lag_info || !lag_resource) { + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "changeupper(%s) event received, but have no lag resource for board_key 0x%x.\n", + device_name, board_key); + return; + } + + lag_id = lag_info->lag_id; + if (info->linking) { + ret = add_lag_member(dev_mgt, lag_info, lag_id, info); + if (!ret) { + /* updating the lag info when the first device bonding to this lag */ + if (nbl_list_is_first(&lag_mem->mem_list_node, &lag_info->mem_list_head)) { + lag_info->bond_netdev = info->upper_dev; + lag_info->linkup = (lag_info->bond_netdev->flags & IFF_UP) ? 1 : 0; + lag_info->lag_upper_info.tx_type = upper_info->tx_type; + lag_info->lag_upper_info.hash_type = upper_info->hash_type; + } else if (is_lag_can_offload(dev_mgt, lag_info)) { + /* if the lag can offload after the second device bonding to it, + * will enable the lag instance and assign a lag id for this lag, + * then update the offloading configuration. + */ + if (enable_lag_instance(lag_resource, lag_info)) + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "enable lag failed, lag-bitmap: %lx.\n", + lag_resource->lag_id_bitmap[0]); + else + nbl_lag_create_bond_adev(dev_mgt, lag_info); + } + if (lag_info->lag_enable) { + *flag |= NBL_LAG_UPDATE_HASH | NBL_LAG_UPDATE_MEMBER | + NBL_LAG_UPDATE_LINK; + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, + mem_list_node) + nbl_update_lag_cfg(mem_tmp, mem_tmp->lag_id, *flag); + *flag = 0; + } + *flag = NBL_LAG_UPDATE_LACP_PKT; + serv_ops->set_lag_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + lag_info->bond_netdev, lag_info->lag_id); + } + } else { + nbl_lag_member_recover_adev(dev_mgt, lag_info, lag_mem); + + ret = del_lag_member(dev_mgt, lag_info, info); + if (!ret) { + /* updating the offloading configuration if the lag enabled. If all + * members unbonded, will disable and init this lag instance, and + * retrieve the lag id resource. + */ + if (lag_info->lag_enable) { + *flag |= NBL_LAG_UPDATE_MEMBER; + nbl_update_lag_cfg(lag_mem, lag_id, *flag); + } + if (list_empty(&lag_info->mem_list_head)) + disable_lag_instance(lag_resource, lag_info); + *flag = NBL_LAG_UPDATE_LACP_PKT | NBL_LAG_UPDATE_SFP_TX; + serv_ops->unset_lag_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + } + } +} + +static void nbl_lag_changelower_event(struct nbl_dev_mgt *dev_mgt, void *ptr, u32 *flag) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem; + struct netdev_notifier_changelowerstate_info *info; + struct netdev_lag_lower_state_info *lower_stat_info; + struct net_device *netdev; + const char *device_name; + struct net_device *current_netdev; + struct nbl_lag_instance *lag_info; + u8 lag_id; + u32 board_key; + + info = ptr; + netdev = netdev_notifier_info_to_dev(ptr); + lower_stat_info = info->lower_state_info; + if (!lower_stat_info) + return; + + device_name = netdev ? netdev_name(netdev) : "unset"; + + lag_mem = net_dev->lag_mem; + current_netdev = net_dev->netdev; + + /* not for this netdev */ + if (netdev != current_netdev) + return; + + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "changelower(%s) event link_up: %d, tx_enabled: %d.\n", + device_name, + lower_stat_info->link_up, + lower_stat_info->tx_enabled); + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + if (lag_mem->bonded) { + lag_mem->lower_state.link_up = lower_stat_info->link_up; + lag_mem->lower_state.tx_enabled = lower_stat_info->tx_enabled; + lag_id = lag_mem->lag_id; + lag_info = find_lag_by_lagid(board_key, lag_id); + if (!lag_info) { + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "changelower(%s) event received, but have no lag resource for board_key 0x%x.\n", + device_name, board_key); + return; + } + + if (lag_info->lag_enable) + *flag |= NBL_LAG_UPDATE_MEMBER | NBL_LAG_UPDATE_LINK; + } +} + +static void nbl_lag_info_event(struct nbl_dev_mgt *dev_mgt, void *ptr, u32 *flag) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem; + struct net_device *netdev; + struct netdev_notifier_bonding_info *info; + struct netdev_bonding_info *bonding_info; + const char *lag_mem_name; + struct net_device *current_netdev; + struct nbl_lag_instance *lag_info; + u8 lag_id; + u32 board_key; + + info = ptr; + netdev = netdev_notifier_info_to_dev(ptr); + info = ptr; + bonding_info = &info->bonding_info; + lag_mem = net_dev->lag_mem; + current_netdev = net_dev->netdev; + + if (!current_netdev || netdev != current_netdev) + return; + + lag_mem_name = netdev_name(current_netdev); + + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "bondinfo(%s) event, bond_mode: %d, num_slaves: %d, miimon: %d.\n", + lag_mem_name, bonding_info->master.bond_mode, + bonding_info->master.num_slaves, + bonding_info->master.miimon); + + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "bondinfo(%s) event, slave_id: %d, slave_name: %s, link: %d, state: %d, failure_count: %d.\n", + lag_mem_name, bonding_info->slave.slave_id, + bonding_info->slave.slave_name, bonding_info->slave.link, + bonding_info->slave.state, bonding_info->slave.link_failure_count); + + if (bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) { + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Bonding event recv, but mode not active/backup.\n"); + return; + } + + if (bonding_info->slave.state == BOND_STATE_BACKUP) { + if (lag_mem->bonded) { + lag_mem->lower_state.tx_enabled = 0; + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + lag_id = lag_mem->lag_id; + lag_info = find_lag_by_lagid(board_key, lag_id); + if (!lag_info) { + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "bondinfo(%s) event received, but have no lag resource for board_key 0x%x.\n", + lag_mem_name, board_key); + return; + } + if (lag_info->lag_enable) + *flag |= NBL_LAG_UPDATE_MEMBER | NBL_LAG_UPDATE_LINK; + } + } +} + +static void nbl_lag_updown_event(struct nbl_dev_mgt *dev_mgt, void *ptr, bool is_up, u32 *flag) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem; + struct netdev_notifier_info *info; + struct net_device *event_netdev = NULL; + struct nbl_lag_instance *lag_info = NULL; + const char *device_name; + u8 linkup; + u32 board_key; + + info = ptr; + event_netdev = netdev_notifier_info_to_dev(ptr); + device_name = netdev_name(event_netdev); + lag_mem = net_dev->lag_mem; + if (!lag_mem->bonded) + return; + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + lag_info = find_lag_by_bonddev(board_key, event_netdev); + + if (!(lag_info || net_dev->netdev == event_netdev)) + return; + + linkup = is_up ? 1 : 0; + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "netdev%s(%s) event received.\n", linkup ? "up" : "down", device_name); + + /* bond dev up/down event */ + if (lag_info) { + lag_info->linkup = linkup; + /* if the lag link change, update the member's fwd type */ + if (lag_info->lag_enable) { + *flag |= NBL_LAG_UPDATE_LINK; + if (linkup) + *flag |= NBL_LAG_UPDATE_MEMBER; + } + } else { /* lag member dev up/down event */ + lag_mem->lower_state.link_up = linkup; + *flag |= NBL_LAG_UPDATE_SFP_TX; + } +} + +static void nbl_lag_change_event(struct nbl_dev_mgt *dev_mgt, void *ptr, u32 *flag) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem; + struct netdev_notifier_change_info *info; + struct net_device *lag_netdev = NULL; + struct bonding *bond; + enum netdev_lag_hash new_hash; + struct nbl_lag_instance *lag_info = NULL; + const char *device_name; + u32 board_key; + + info = ptr; + lag_netdev = netdev_notifier_info_to_dev(ptr); + + device_name = lag_netdev ? netdev_name(lag_netdev) : "unset"; + lag_mem = net_dev->lag_mem; + if (!lag_mem->bonded) + return; + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + + lag_info = find_lag_by_bonddev(board_key, lag_netdev); + + if (!lag_info) + return; + + bond = netdev_priv(lag_netdev); + + switch (bond->params.xmit_policy) { + case BOND_XMIT_POLICY_LAYER2: + new_hash = NETDEV_LAG_HASH_L2; + break; + case BOND_XMIT_POLICY_LAYER34: + new_hash = NETDEV_LAG_HASH_L34; + break; + case BOND_XMIT_POLICY_LAYER23: + new_hash = NETDEV_LAG_HASH_L23; + break; + case BOND_XMIT_POLICY_ENCAP23: + new_hash = NETDEV_LAG_HASH_E23; + break; + case BOND_XMIT_POLICY_ENCAP34: + new_hash = NETDEV_LAG_HASH_E34; + break; + case BOND_XMIT_POLICY_VLAN_SRCMAC: + new_hash = NETDEV_LAG_HASH_VLAN_SRCMAC; + break; + default: + new_hash = NETDEV_LAG_HASH_UNKNOWN; + break; + } + + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "netdevchange(%s) event received, old hash: %d, new hash: %d.\n", + device_name, lag_info->lag_upper_info.hash_type, new_hash); + + if (lag_info->lag_upper_info.hash_type != new_hash) { + lag_info->lag_upper_info.hash_type = new_hash; + if (lag_info->lag_enable) + *flag |= NBL_LAG_UPDATE_HASH; + } +} + +static int +nbl_lag_event_handler(struct notifier_block *notify_blk, unsigned long event, void *ptr) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct nbl_lag_member *lag_mem; + struct nbl_dev_mgt *dev_mgt; + u32 update_flag = 0; + u8 bus_id; + u8 lag_id = NBL_INVALID_LAG_ID; + + lag_mem = container_of(notify_blk, struct nbl_lag_member, notify_block); + + if (!lag_mem) + return NOTIFY_DONE; + + dev_mgt = (struct nbl_dev_mgt *)NBL_NETDEV_TO_DEV_MGT(lag_mem->netdev); + + bus_id = NBL_DEV_MGT_TO_COMMON(dev_mgt)->bus; + + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, "nbl kernel(%s) receive event: %s.\n", + netdev_name(netdev), netdev_cmd_to_name(event)); + + mutex_lock(&nbl_lag_mutex); + /* record the bonded slave's lag_id */ + if (lag_mem->bonded) + lag_id = lag_mem->lag_id; + + switch (event) { + case NETDEV_CHANGEUPPER: + nbl_lag_changeupper_event(dev_mgt, ptr, &update_flag); + break; + case NETDEV_CHANGELOWERSTATE: + nbl_lag_changelower_event(dev_mgt, ptr, &update_flag); + break; + case NETDEV_BONDING_INFO: + nbl_lag_info_event(dev_mgt, ptr, &update_flag); + break; + case NETDEV_DOWN: + nbl_lag_updown_event(dev_mgt, ptr, false, &update_flag); + break; + case NETDEV_UP: + nbl_lag_updown_event(dev_mgt, ptr, true, &update_flag); + break; + case NETDEV_CHANGE: + case NETDEV_FEAT_CHANGE: + nbl_lag_change_event(dev_mgt, ptr, &update_flag); + break; + default: + goto unlock; + } + /* update the new slave's lag_id */ + if (!nbl_lag_id_valid(lag_id)) + lag_id = lag_mem->lag_id; + + if (update_flag) { + nbl_update_lag_cfg(lag_mem, lag_id, update_flag); + nbl_display_lag_info(dev_mgt, lag_id); + } + +unlock: + mutex_unlock(&nbl_lag_mutex); + + return NOTIFY_DONE; +} + +u32 nbl_lag_get_other_active_members(struct nbl_dev_mgt *dev_mgt, + u16 eth_list[], u32 array_size) +{ + u32 active_count = 0; +// u8 lag_id; +// struct nbl_adapter *adapter; +// struct nbl_lag_member *lag_mem; +// struct nbl_lag_instance *lag_info; +// struct nbl_lag_member *mem_tmp; +// struct list_head *iter; +// const char *upper_name; +// +// lag_mgt = NBL_RES_MGT_TO_LAG_MGT(res_mgt); +// lag_mem = &lag_mgt->lag_mem; +// lag_id = lag_mem->lag_id; +// +// if (!nbl_lag_id_valid(lag_id)) { +// nbl_err(NBL_ADAPTER_TO_COMMON(adapter), NBL_DEBUG_MAIN, +// "params err, lag_id: %u.\n", lag_id); +// return active_count; +// } +// +// mutex_lock(&nbl_lag_mutex); +// +// lag_info = &nbl_lag_info[lag_id]; +// +// upper_name = lag_info->bond_netdev ? netdev_name(lag_info->bond_netdev) : "unset"; +// nbl_debug(NBL_ADAPTER_TO_COMMON(adapter), NBL_DEBUG_MAIN, +// "bond dev %s: lag_id: %u, enabled: %u.\n", +// upper_name, lag_id, lag_info->lag_enable); +// +// if (lag_info->lag_enable) { +// list_for_each(iter, &lag_info->mem_list_head) { +// mem_tmp = list_entry(iter, struct nbl_lag_member, mem_list_node); +// if (mem_tmp == lag_mem) +// continue; +// if (nbl_lag_mem_is_active(mem_tmp) && active_count < array_size) +// eth_list[active_count++] = mem_tmp->eth_id; +// +// nbl_debug(NBL_ADAPTER_TO_COMMON(adapter), NBL_DEBUG_MAIN, +// "eth_id: %u, bonded: %u, linkup: %u, tx_enabled: %u, " +// "active_count: %u, array_size: %u.\n", +// mem_tmp->logic_eth_id, mem_tmp->bonded, +// mem_tmp->lower_state.link_up, +// mem_tmp->lower_state.tx_enabled, +// active_count, array_size); +// } +// } +// +// mutex_unlock(&nbl_lag_mutex); + return active_count; +} + +static void nbl_unregister_lag_handler(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem; + struct notifier_block *notif_blk; + struct netdev_net_notifier *netdevice_nn; + + lag_mem = net_dev->lag_mem; + notif_blk = &lag_mem->notify_block; + if (notif_blk->notifier_call) { + netdevice_nn = &lag_mem->netdevice_nn; + unregister_netdevice_notifier_dev_net(net_dev->netdev, notif_blk, netdevice_nn); + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "nbl lag event handler unregistered.\n"); + } +} + +static int nbl_register_lag_handler(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct notifier_block *notify_blk; + struct nbl_lag_member *lag_mem; + struct netdev_net_notifier *netdevice_nn; + + lag_mem = net_dev->lag_mem; + notify_blk = &lag_mem->notify_block; + + /* register the lag related event handler function for each device */ + if (!notify_blk->notifier_call) { + notify_blk->notifier_call = nbl_lag_event_handler; + netdevice_nn = &lag_mem->netdevice_nn; + if (register_netdevice_notifier_dev_net(net_dev->netdev, + notify_blk, netdevice_nn)) { + notify_blk->notifier_call = NULL; + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Failed to register nbl lag event handler!\n"); + return -EINVAL; + } + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "nbl lag event handler registered.\n"); + } + return 0; +} + +static int nbl_lag_alloc_resource(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_lag_resource *lag_resource_tmp; + u32 lag_resource_num = 0; + u32 board_key; + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + /* find the lag resource with the bus_id firstly, increasing the refcount if found */ + list_for_each_entry(lag_resource_tmp, &lag_resource_head, resource_node) { + lag_resource_num++; + if (lag_resource_tmp->board_key == board_key) { + kref_get(&lag_resource_tmp->kref); + goto ret_ok; + } + } + + /* checking the max cards we supported */ + if (lag_resource_num >= NBL_LAG_MAX_RESOURCE_NUM) { + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Lag resource num %u exceed the max num %u.\n", + lag_resource_num, NBL_LAG_MAX_RESOURCE_NUM); + goto ret_fail; + } + + /* alloc the lag resource when the card's first device registered */ + lag_resource_tmp = kzalloc(sizeof(*lag_resource_tmp), GFP_KERNEL); + if (!lag_resource_tmp) { + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Lag resource alloc failed.\n"); + goto ret_fail; + } + kref_init(&lag_resource_tmp->kref); + lag_resource_tmp->board_key = board_key; + INIT_LIST_HEAD(&lag_resource_tmp->lag_instance_head); + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Alloc lag resource for board_key 0x%x, refcount %u.\n", + board_key, kref_read(&lag_resource_tmp->kref)); + /* add the new lag resource into the resource list */ + list_add_tail(&lag_resource_tmp->resource_node, &lag_resource_head); + +ret_ok: + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Return lag resource for board_key 0x%x, refcount %u.\n", + board_key, kref_read(&lag_resource_tmp->kref)); + return 0; +ret_fail: + return -1; +} + +static void delete_and_free_lag_resource(struct kref *kref) +{ + struct nbl_lag_resource *lag_resource_tmp; + struct nbl_lag_instance *lag_info, *lag_tmp; + + lag_resource_tmp = container_of(kref, struct nbl_lag_resource, kref); + + /* release all lag instances firstly */ + list_for_each_entry_safe(lag_info, lag_tmp, + &lag_resource_tmp->lag_instance_head, instance_node) { + list_del(&lag_info->instance_node); + kfree(lag_info); + } + + list_del(&lag_resource_tmp->resource_node); + kfree(lag_resource_tmp); +} + +static void nbl_lag_free_resource(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_lag_resource *lag_resource, *lag_resource_tmp; + int ret; + u32 board_key; + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + list_for_each_entry_safe(lag_resource, lag_resource_tmp, + &lag_resource_head, resource_node) { + if (lag_resource->board_key == board_key) { + /* release the lag resource */ + ret = kref_put(&lag_resource->kref, delete_and_free_lag_resource); + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Release the lag resource for board_key 0x%x, refcount %d.\n", + board_key, ret ? -1 : kref_read(&lag_resource->kref)); + } + } +} + +int nbl_init_lag(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param) +{ + int ret = 0; + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_lag_member *lag_mem; + u8 lag_id; + + if (!param->caps.support_lag) + return 0; + + lag_mem = net_dev->lag_mem; + if (!lag_mem) { + lag_mem = devm_kzalloc(NBL_DEV_MGT_TO_DEV(dev_mgt), + sizeof(*net_dev->lag_mem), GFP_KERNEL); + if (!lag_mem) { + return -ENOMEM; + } + } + + lag_mem->bonded = 0; + lag_mem->lower_state.link_up = 0; + lag_mem->lower_state.tx_enabled = 0; + memset(&lag_mem->notify_block, 0, sizeof(lag_mem->notify_block)); + lag_mem->vsi_id = NBL_COMMON_TO_VSI_ID(NBL_DEV_MGT_TO_COMMON(dev_mgt)); + lag_mem->lag_id = NBL_INVALID_LAG_ID; + lag_mem->eth_id = NBL_DEV_MGT_TO_COMMON(dev_mgt)->eth_id; + lag_mem->logic_eth_id = NBL_DEV_MGT_TO_COMMON(dev_mgt)->logic_eth_id; + lag_mem->netdev = net_dev->netdev; + net_dev->lag_mem = lag_mem; + + mutex_lock(&nbl_lag_mutex); + ret = nbl_lag_alloc_resource(dev_mgt); + if (ret) { + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Failed to alloc lag resource.\n"); + goto err_alloc; + } + + for (lag_id = 0; nbl_lag_id_valid(lag_id); lag_id++) + nbl_display_lag_info(dev_mgt, lag_id); + + mutex_unlock(&nbl_lag_mutex); + + ret = nbl_register_lag_handler(dev_mgt); + if (ret) { + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Failed to register nbl lag event handler\n"); + goto err_register; + } + + ret = serv_ops->register_indr_dev_tc_offload(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + net_dev->netdev); + if (ret) + goto err_reg_lag_tc_offload; + + net_dev->lag_inited = 1; + + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, "Init the nbl lag success.\n"); + return 0; + +err_reg_lag_tc_offload: + nbl_unregister_lag_handler(dev_mgt); +err_register: + devm_kfree(NBL_DEV_MGT_TO_DEV(dev_mgt), net_dev->lag_mem); + net_dev->lag_mem = NULL; + nbl_lag_free_resource(dev_mgt); + return ret; +err_alloc: + devm_kfree(NBL_DEV_MGT_TO_DEV(dev_mgt), net_dev->lag_mem); + net_dev->lag_mem = NULL; + mutex_unlock(&nbl_lag_mutex); + return ret; +} + +int nbl_deinit_lag(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + if (!net_dev->lag_inited) + return 0; + + serv_ops->unregister_indr_dev_tc_offload(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + net_dev->netdev); + nbl_unregister_lag_handler(dev_mgt); + + mutex_lock(&nbl_lag_mutex); + nbl_lag_free_resource(dev_mgt); + mutex_unlock(&nbl_lag_mutex); + + if (net_dev->lag_mem) + devm_kfree(NBL_DEV_MGT_TO_DEV(dev_mgt), net_dev->lag_mem); + net_dev->lag_mem = NULL; + + return 0; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.h new file mode 100644 index 0000000000000000000000000000000000000000..de913244391063157b52ec01f16718e294602f67 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_LAG_H +#define _NBL_LAG_H +#include +#include "nbl_dev.h" + +#define NBL_INVALID_LAG_ID 0xf +#define nbl_lag_id_valid(lag_id) ((lag_id) < NBL_LAG_MAX_NUM) + +#define NBL_LAG_ENABLE BIT(0) +#define NBL_LAG_DISABLE BIT(1) +#define NBL_LAG_UPDATE_HASH BIT(2) +#define NBL_LAG_UPDATE_MEMBER BIT(3) +#define NBL_LAG_UPDATE_LINK BIT(4) +#define NBL_LAG_UPDATE_SFP_TX BIT(5) +#define NBL_LAG_UPDATE_LACP_PKT BIT(6) + +enum nbl_lag_mem_fwd { + NBL_LAG_MEM_FWD_DROP = 0, + NBL_LAG_MEM_FWD_NORMAL = 1, +}; + +struct nbl_lag_instance { + struct net_device *bond_netdev; + struct netdev_lag_upper_info lag_upper_info; + struct list_head mem_list_head; + struct list_head instance_node; + u8 linkup; + u8 lag_enable; + u8 lag_id; +}; + +struct nbl_lag_resource { + struct kref kref; + struct list_head resource_node; + u32 board_key; /* domain << 16 | bus_id */ + DECLARE_BITMAP(lag_id_bitmap, NBL_LAG_MAX_NUM); + struct list_head lag_instance_head; +}; + +static inline bool nbl_lag_mem_is_active(const struct nbl_lag_member *lag_mem) +{ + return lag_mem->bonded && lag_mem->lower_state.link_up && lag_mem->lower_state.tx_enabled; +} + +int nbl_init_lag(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param); +int nbl_deinit_lag(struct nbl_dev_mgt *dev_mgt); +u32 nbl_lag_get_other_active_members(struct nbl_dev_mgt *dev_mgt, + u16 eth_list[], u32 array_size); +#endif /* _NBL_LAG_H */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_p4_version.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_p4_version.h new file mode 100644 index 0000000000000000000000000000000000000000..2fbe7c7643861421eb4c4520b656b78f788b19ba --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_p4_version.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_P4_VERSION_H_ +#define _NBL_P4_VERSION_H_ + +#define NBL_SINGLE_VXLAN_TOE_ENHANCE_P4_MD5 "fc61c22894eb17f688dff153b7c29efe" +#define NBL_DUAL_VXLAN_TOE_ENHANCE_P4_MD5 "64fff3eeebdb53990c201ec70a430a55" +#define NBL_QUAD_VXLAN_TOE_ENHANCE_P4_MD5 "9b8ab0508834436e1df1eac537934485" + +#define NBL_SINGLE_PORT_HG_P4_MD5 "44757bab80dc985bffc04fe9a6d66bc1" +#define NBL_DUAL_PORT_HG_P4_MD5 "74e95394bc348b9cc6ebe5f9c28c2b8a" +#define NBL_QUAD_PORT_HG_P4_MD5 "009e209c4a3cab358bc76d5e06e3338b" + +#define NBL_SINGLE_PORT_LG_P4_MD5 "bfb18a8db52d82d2708920d0d3efc231" +#define NBL_DUAL_PORT_LG_P4_MD5 "32da40ac96884d520ebfe4179db2d7fb" +#define NBL_QUAD_PORT_LG_P4_MD5 "07453cc77b7c714c285038b05f5b53d7" + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.c index fa17690c2b3811131fc717593f1c767aa1ffff4c..082916e2036135431fa11ca263f90922546fe616 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.c @@ -5,8 +5,14 @@ */ #include "nbl_ethtool.h" +#include "nbl_ktls.h" +#include "nbl_ipsec.h" +#include "nbl_p4_version.h" +#include "nbl_tc.h" +#include static void nbl_serv_set_link_state(struct nbl_service_mgt *serv_mgt, struct net_device *netdev); +static int nbl_serv_update_default_vlan(struct nbl_service_mgt *serv_mgt, u16 vid); static void nbl_serv_set_queue_param(struct nbl_serv_ring *ring, u16 desc_num, struct nbl_txrx_queue_param *param, u16 vsi_id, @@ -29,7 +35,8 @@ static void nbl_serv_set_queue_param(struct nbl_serv_ring *ring, u16 desc_num, * txrx_registers only based on tx_ring, so the rx_info needs * to be delivered first before the tx_info can be delivered. */ -int nbl_serv_setup_queues(struct nbl_service_mgt *serv_mgt, struct nbl_serv_ring_vsi_info *vsi_info) +static int +nbl_serv_setup_queues(struct nbl_service_mgt *serv_mgt, struct nbl_serv_ring_vsi_info *vsi_info) { struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); @@ -37,10 +44,14 @@ int nbl_serv_setup_queues(struct nbl_service_mgt *serv_mgt, struct nbl_serv_ring struct nbl_serv_ring *ring; struct nbl_serv_vector *vector; u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; + int vector_offset = 0; int i, ret = 0; + if (vsi_info->vsi_index == NBL_VSI_XDP) + vector_offset = ring_mgt->xdp_ring_offset; + for (i = start; i < end; i++) { - vector = &ring_mgt->vectors[i]; + vector = &ring_mgt->vectors[i - vector_offset]; ring = &ring_mgt->rx_rings[i]; nbl_serv_set_queue_param(ring, ring_mgt->rx_desc_num, ¶m, vsi_info->vsi_id, vector->global_vector_id); @@ -51,7 +62,7 @@ int nbl_serv_setup_queues(struct nbl_service_mgt *serv_mgt, struct nbl_serv_ring } for (i = start; i < end; i++) { - vector = &ring_mgt->vectors[i]; + vector = &ring_mgt->vectors[i - vector_offset]; ring = &ring_mgt->tx_rings[i]; nbl_serv_set_queue_param(ring, ring_mgt->tx_desc_num, ¶m, @@ -65,7 +76,8 @@ int nbl_serv_setup_queues(struct nbl_service_mgt *serv_mgt, struct nbl_serv_ring return 0; } -void nbl_serv_flush_rx_queues(struct nbl_service_mgt *serv_mgt, u16 ring_offset, u16 ring_num) +static void +nbl_serv_flush_rx_queues(struct nbl_service_mgt *serv_mgt, u16 ring_offset, u16 ring_num) { struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); int i; @@ -74,8 +86,8 @@ void nbl_serv_flush_rx_queues(struct nbl_service_mgt *serv_mgt, u16 ring_offset, disp_ops->kick_rx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); } -int nbl_serv_setup_rings(struct nbl_service_mgt *serv_mgt, struct net_device *netdev, - struct nbl_serv_ring_vsi_info *vsi_info, bool use_napi) +static int nbl_serv_setup_rings(struct nbl_service_mgt *serv_mgt, struct net_device *netdev, + struct nbl_serv_ring_vsi_info *vsi_info, bool use_napi) { struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); @@ -121,8 +133,8 @@ int nbl_serv_setup_rings(struct nbl_service_mgt *serv_mgt, struct net_device *ne return ret; } -void nbl_serv_stop_rings(struct nbl_service_mgt *serv_mgt, - struct nbl_serv_ring_vsi_info *vsi_info) +static void nbl_serv_stop_rings(struct nbl_service_mgt *serv_mgt, + struct nbl_serv_ring_vsi_info *vsi_info) { struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; @@ -179,6 +191,55 @@ static void nbl_serv_remove_rx_ring(struct nbl_serv_ring_mgt *ring_mgt, struct d ring_mgt->rx_rings = NULL; } +static int nbl_serv_register_xdp_rxq(struct nbl_service_mgt *serv_mgt, + struct nbl_serv_ring_mgt *ring_mgt) +{ + u16 ring_num; + int i, j; + int ret; + struct nbl_dispatch_ops *disp_ops; + struct nbl_serv_ring_vsi_info *vsi_info; + + if (ring_mgt->xdp_ring_offset == ring_mgt->tx_ring_num) + return 0; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + ring_num = vsi_info->ring_num; + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + for (i = 0; i < ring_num; i++) { + ret = disp_ops->register_xdp_rxq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); + if (ret) + goto register_xdp_err; + } + + return 0; +register_xdp_err: + for (j = 0; j < i; j++) + disp_ops->unregister_xdp_rxq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), j); + + return -1; +} + +static void nbl_serv_unregister_xdp_rxq(struct nbl_service_mgt *serv_mgt, + struct nbl_serv_ring_mgt *ring_mgt) +{ + u16 ring_num; + int i; + struct nbl_dispatch_ops *disp_ops; + struct nbl_serv_ring_vsi_info *vsi_info; + + if (ring_mgt->xdp_ring_offset == ring_mgt->tx_ring_num) + return; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + ring_num = vsi_info->ring_num; + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + for (i = 0; i < ring_num; i++) + disp_ops->unregister_xdp_rxq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); +} + static int nbl_serv_set_vectors(struct nbl_service_mgt *serv_mgt, struct net_device *netdev, struct device *dev) { @@ -187,17 +248,18 @@ static int nbl_serv_set_vectors(struct nbl_service_mgt *serv_mgt, struct nbl_resource_pt_ops *pt_ops = NBL_ADAPTER_TO_RES_PT_OPS(adapter); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); int i; - u16 ring_num = ring_mgt->rx_ring_num; + u16 ring_num = ring_mgt->xdp_ring_offset; ring_mgt->vectors = devm_kcalloc(dev, ring_num, sizeof(*ring_mgt->vectors), GFP_KERNEL); if (!ring_mgt->vectors) return -ENOMEM; for (i = 0; i < ring_num; i++) { - ring_mgt->vectors[i].napi = + ring_mgt->vectors[i].nbl_napi = disp_ops->get_vector_napi(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); - netif_napi_add(netdev, ring_mgt->vectors[i].napi, pt_ops->napi_poll); + netif_napi_add(netdev, &ring_mgt->vectors[i].nbl_napi->napi, pt_ops->napi_poll); ring_mgt->vectors[i].netdev = netdev; + cpumask_clear(&ring_mgt->vectors[i].cpumask); } return 0; @@ -206,15 +268,49 @@ static int nbl_serv_set_vectors(struct nbl_service_mgt *serv_mgt, static void nbl_serv_remove_vectors(struct nbl_serv_ring_mgt *ring_mgt, struct device *dev) { int i; - u16 ring_num = ring_mgt->rx_ring_num; + u16 ring_num = ring_mgt->xdp_ring_offset; for (i = 0; i < ring_num; i++) - netif_napi_del(ring_mgt->vectors[i].napi); + netif_napi_del(&ring_mgt->vectors[i].nbl_napi->napi); devm_kfree(dev, ring_mgt->vectors); ring_mgt->vectors = NULL; } +static void nbl_serv_check_flow_table_spec(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + int ret; + + if (!flow_mgt->force_promisc) + return; + + ret = disp_ops->check_flow_table_spec(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + flow_mgt->vlan_list_cnt, + flow_mgt->unicast_mac_cnt + 1, + flow_mgt->multi_mac_cnt); + + if (!ret) { + flow_mgt->force_promisc = 0; + flow_mgt->pending_async_work = 1; + } +} + +static bool nbl_serv_check_need_flow_rule(u8 *mac, u16 promisc) +{ + if (promisc & (BIT(NBL_USER_FLOW) | BIT(NBL_MIRROR))) + return false; + + if (!is_multicast_ether_addr(mac) && (promisc & BIT(NBL_PROMISC))) + return false; + + if (is_multicast_ether_addr(mac) && (promisc & BIT(NBL_ALLMULTI))) + return false; + + return true; +} + static struct nbl_serv_vlan_node *nbl_serv_alloc_vlan_node(void) { struct nbl_serv_vlan_node *vlan_node = NULL; @@ -224,6 +320,10 @@ static struct nbl_serv_vlan_node *nbl_serv_alloc_vlan_node(void) return NULL; INIT_LIST_HEAD(&vlan_node->node); + vlan_node->ref_cnt = 1; + vlan_node->primary_mac_effective = 0; + vlan_node->sub_mac_effective = 0; + return vlan_node; } @@ -241,6 +341,8 @@ static struct nbl_serv_submac_node *nbl_serv_alloc_submac_node(void) return NULL; INIT_LIST_HEAD(&submac_node->node); + submac_node->effective = 0; + return submac_node; } @@ -249,6 +351,226 @@ static void nbl_serv_free_submac_node(struct nbl_serv_submac_node *submac_node) kfree(submac_node); } +static int nbl_serv_update_submac_node_effective(struct nbl_service_mgt *serv_mgt, + struct nbl_serv_submac_node *submac_node, + bool effective, + u16 vsi) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct net_device *dev = net_resource_mgt->netdev; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_vlan_node *vlan_node; + bool force_promisc = 0; + int ret = 0; + + if (submac_node->effective == effective) + return 0; + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + if (!vlan_node->sub_mac_effective) + continue; + + if (effective) { + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + submac_node->mac, vlan_node->vid, vsi); + if (ret) + goto del_macvlan_node; + } else { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + submac_node->mac, vlan_node->vid, vsi); + } + } + submac_node->effective = effective; + if (effective) + flow_mgt->active_submac_list++; + else + flow_mgt->active_submac_list--; + + return 0; + +del_macvlan_node: + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + if (vlan_node->sub_mac_effective) + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + submac_node->mac, vlan_node->vid, vsi); + } + + if (ret) { + force_promisc = 1; + if (flow_mgt->force_promisc ^ force_promisc) { + flow_mgt->force_promisc = force_promisc; + flow_mgt->pending_async_work = 1; + netdev_info(dev, "Reached MAC filter limit, forcing promisc/allmuti moden"); + } + } + + return 0; +} + +static int nbl_serv_update_vlan_node_effective(struct nbl_service_mgt *serv_mgt, + struct nbl_serv_vlan_node *vlan_node, + bool effective, + u16 vsi) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct net_device *dev = net_resource_mgt->netdev; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_submac_node *submac_node; + bool force_promisc = 0; + int ret = 0, i = 0; + + if (vlan_node->primary_mac_effective == effective && + vlan_node->sub_mac_effective == effective) + return 0; + + if (effective && !vlan_node->primary_mac_effective) { + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + flow_mgt->mac, vlan_node->vid, vsi); + if (ret) + goto check_ret; + } else if (!effective && vlan_node->primary_mac_effective) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + flow_mgt->mac, vlan_node->vid, vsi); + } + + vlan_node->primary_mac_effective = effective; + + for (i = 0; i < NBL_SUBMAC_MAX; i++) + list_for_each_entry(submac_node, &flow_mgt->submac_list[i], node) { + if (!submac_node->effective) + continue; + + if (effective && !vlan_node->sub_mac_effective) { + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + submac_node->mac, vlan_node->vid, vsi); + if (ret) + goto del_macvlan_node; + } else if (!effective && vlan_node->sub_mac_effective) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + submac_node->mac, vlan_node->vid, vsi); + } + } + + vlan_node->sub_mac_effective = effective; + + return 0; + +del_macvlan_node: + for (i = 0; i < NBL_SUBMAC_MAX; i++) + list_for_each_entry(submac_node, &flow_mgt->submac_list[i], node) { + if (submac_node->effective) + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + submac_node->mac, vlan_node->vid, vsi); + } +check_ret: + if (ret) { + force_promisc = 1; + if (flow_mgt->force_promisc ^ force_promisc) { + flow_mgt->force_promisc = force_promisc; + flow_mgt->pending_async_work = 1; + netdev_info(dev, "Reached VLAN filter limit, forcing promisc/allmuti moden"); + } + } + + if (vlan_node->primary_mac_effective == effective) + return 0; + + if (!NBL_COMMON_TO_VF_CAP(NBL_SERV_MGT_TO_COMMON(serv_mgt))) + return 0; + + return ret; +} + +static void nbl_serv_del_submac_node(struct nbl_service_mgt *serv_mgt, u8 *mac, u16 vsi) +{ + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_submac_node *submac_node, *submac_node_safe; + struct list_head *submac_head; + + if (is_multicast_ether_addr(mac)) + submac_head = &flow_mgt->submac_list[NBL_SUBMAC_MULTI]; + else + submac_head = &flow_mgt->submac_list[NBL_SUBMAC_UNICAST]; + + list_for_each_entry_safe(submac_node, submac_node_safe, submac_head, node) + if (ether_addr_equal(submac_node->mac, mac)) { + if (submac_node->effective) + nbl_serv_update_submac_node_effective(serv_mgt, + submac_node, 0, vsi); + list_del(&submac_node->node); + flow_mgt->submac_list_cnt--; + if (is_multicast_ether_addr(submac_node->mac)) + flow_mgt->multi_mac_cnt--; + else + flow_mgt->unicast_mac_cnt--; + nbl_serv_free_submac_node(submac_node); + break; + } +} + +static int nbl_serv_add_submac_node(struct nbl_service_mgt *serv_mgt, u8 *mac, u16 vsi, u16 promisc) +{ + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_submac_node *submac_node; + struct list_head *submac_head; + + if (is_multicast_ether_addr(mac)) + submac_head = &flow_mgt->submac_list[NBL_SUBMAC_MULTI]; + else + submac_head = &flow_mgt->submac_list[NBL_SUBMAC_UNICAST]; + + list_for_each_entry(submac_node, submac_head, node) { + if (ether_addr_equal(submac_node->mac, mac)) + return 0; + } + + submac_node = nbl_serv_alloc_submac_node(); + if (!submac_node) + return -ENOMEM; + + submac_node->effective = 0; + ether_addr_copy(submac_node->mac, mac); + if (nbl_serv_check_need_flow_rule(mac, promisc) && + (flow_mgt->trusted_en || flow_mgt->active_submac_list < NBL_NO_TRUST_MAX_MAC)) { + nbl_serv_update_submac_node_effective(serv_mgt, submac_node, 1, vsi); + } + + list_add(&submac_node->node, submac_head); + flow_mgt->submac_list_cnt++; + if (is_multicast_ether_addr(mac)) + flow_mgt->multi_mac_cnt++; + else + flow_mgt->unicast_mac_cnt++; + + return 0; +} + +static void nbl_serv_update_mcast_submac(struct nbl_service_mgt *serv_mgt, bool multi_effective, + bool unicast_effective, u16 vsi) +{ + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_submac_node *submac_node; + + list_for_each_entry(submac_node, &flow_mgt->submac_list[NBL_SUBMAC_MULTI], node) + nbl_serv_update_submac_node_effective(serv_mgt, submac_node, + multi_effective, vsi); + + list_for_each_entry(submac_node, &flow_mgt->submac_list[NBL_SUBMAC_UNICAST], node) + nbl_serv_update_submac_node_effective(serv_mgt, submac_node, + unicast_effective, vsi); +} + +static void nbl_serv_update_promisc_vlan(struct nbl_service_mgt *serv_mgt, bool effective, u16 vsi) +{ + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_vlan_node *vlan_node; + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) + nbl_serv_update_vlan_node_effective(serv_mgt, vlan_node, effective, vsi); +} + static void nbl_serv_del_all_vlans(struct nbl_service_mgt *serv_mgt) { struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); @@ -257,28 +579,223 @@ static void nbl_serv_del_all_vlans(struct nbl_service_mgt *serv_mgt) struct nbl_serv_vlan_node *vlan_node, *vlan_node_safe; list_for_each_entry_safe(vlan_node, vlan_node_safe, &flow_mgt->vlan_list, node) { - disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vlan_node->vid, NBL_COMMON_TO_VSI_ID(common)); + if (vlan_node->primary_mac_effective) + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, NBL_COMMON_TO_VSI_ID(common)); list_del(&vlan_node->node); nbl_serv_free_vlan_node(vlan_node); } } -static void nbl_serv_del_all_submacs(struct nbl_service_mgt *serv_mgt) +static void nbl_serv_del_all_submacs(struct nbl_service_mgt *serv_mgt, u16 vsi) { struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_serv_submac_node *submac_node, *submac_node_safe; + int i; + + for (i = 0; i < NBL_SUBMAC_MAX; i++) + list_for_each_entry_safe(submac_node, submac_node_safe, + &flow_mgt->submac_list[i], node) { + nbl_serv_update_submac_node_effective(serv_mgt, submac_node, 0, vsi); + list_del(&submac_node->node); + flow_mgt->submac_list_cnt--; + if (is_multicast_ether_addr(submac_node->mac)) + flow_mgt->multi_mac_cnt--; + else + flow_mgt->unicast_mac_cnt--; + nbl_serv_free_submac_node(submac_node); + } +} + +static int nbl_serv_validate_tc_config(struct tc_mqprio_qopt_offload *mqprio_qopt, + struct nbl_common_info *common, u16 num_active_queues) +{ + u64 tx_rate = 0; + int i, num_qps = 0; + + if (mqprio_qopt->qopt.num_tc > NBL_MAX_QUEUE_TC_NUM || mqprio_qopt->qopt.num_tc < 1) { + nbl_err(common, NBL_DEBUG_QUEUE, "Invalid num_tc %u", mqprio_qopt->qopt.num_tc); + return -EINVAL; + } + + for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) { + if (!mqprio_qopt->qopt.count[i] || mqprio_qopt->qopt.offset[i] != num_qps) { + nbl_err(common, NBL_DEBUG_QUEUE, "Invalid offset%u, num_qps:%u for tc %d", + mqprio_qopt->qopt.offset[i], num_qps, i); + return -EINVAL; + } + + if (mqprio_qopt->min_rate[i]) { + nbl_err(common, NBL_DEBUG_QUEUE, + "Invalid min tx rate (greater than 0) specified for TC %d", i); + return -EINVAL; + } + + tx_rate = div_u64(mqprio_qopt->max_rate[i], NBL_TC_MBPS_DIVSIOR); + + if (mqprio_qopt->max_rate[i] && tx_rate < NBL_TC_WEIGHT_GRAVITY) { + nbl_err(common, NBL_DEBUG_QUEUE, + "Invalid max tx rate for TC %d, minimum %d Mbps", + i, NBL_TC_MBPS_DIVSIOR); + return -EINVAL; + } + + if (tx_rate % NBL_TC_WEIGHT_GRAVITY != 0) { + nbl_err(common, NBL_DEBUG_QUEUE, + "Invalid max tx rate for TC %d, not divisible by %d", + i, NBL_TC_MBPS_DIVSIOR); + return -EINVAL; + } + + num_qps += mqprio_qopt->qopt.count[i]; + } + + if (num_qps > num_active_queues) { + nbl_err(common, NBL_DEBUG_QUEUE, "Cannot support requested number of queues"); + return -EINVAL; + } + + return 0; +} + +void nbl_serv_cpu_affinity_init(void *priv, u16 rings_num) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + int i; + + for (i = 0; i < rings_num; i++) { + cpumask_set_cpu(cpumask_local_spread(i, NBL_COMMON_TO_DEV(common)->numa_node), + &ring_mgt->vectors[i].cpumask); + netif_set_xps_queue(ring_mgt->vectors[i].netdev, &ring_mgt->vectors[i].cpumask, i); + } +} + +static int nbl_serv_setup_tc_mqprio(struct net_device *netdev, void *type_data) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_tc_mgt *tc_mgt = NBL_SERV_MGT_TO_TC_MGT(serv_mgt); + struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; + struct nbl_tc_qidsc_param param; + u8 num_tc = mqprio_qopt->qopt.num_tc, total_qps = 0; + struct nbl_serv_ring_vsi_info *vsi_info; + int i, ret = 0; + + memset(¶m, 0, sizeof(param)); + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + param.vsi_id = vsi_info->vsi_id; + + if (!mqprio_qopt->qopt.hw) { + /*hw 1 to hw 0*/ + if (tc_mgt->state == NBL_TC_RUNNING) { + /*reset the tc configuration*/ + netdev_reset_tc(netdev); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + nbl_serv_vsi_stop(serv_mgt, NBL_VSI_DATA); + + param.origin_qps = tc_mgt->total_qps; + disp_ops->cfg_qdisc_mqprio(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + + total_qps = tc_mgt->orig_num_active_queues; + tc_mgt->num_tc = 0; + tc_mgt->state = NBL_TC_INVALID; + + goto exit; + } else { + return -EINVAL; + } + } - list_for_each_entry_safe(submac_node, submac_node_safe, &flow_mgt->submac_list, node) { - disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), submac_node->mac, - NBL_DEFAULT_VLAN_ID, NBL_COMMON_TO_VSI_ID(common)); + if (mqprio_qopt->mode != TC_MQPRIO_MODE_CHANNEL) + return -EOPNOTSUPP; - list_del(&submac_node->node); - nbl_serv_free_submac_node(submac_node); + if (tc_mgt->state != NBL_TC_INVALID) { + netdev_err(netdev, "TC configuration already exists"); + return -EINVAL; + } + + ret = nbl_serv_validate_tc_config(mqprio_qopt, common, vsi_info->ring_num); + if (ret) { + netdev_err(netdev, "TC config invalid"); + return ret; + } + + if (tc_mgt->num_tc == num_tc) + return 0; + + if (num_tc > NBL_MAX_TC_NUM) { + netdev_err(netdev, "num_tc max to 8 but set %d\n", num_tc); + return -EINVAL; + } + + for (i = 0; i < num_tc; i++) { + total_qps += mqprio_qopt->qopt.count[i]; + param.info[i].count = mqprio_qopt->qopt.count[i]; + param.info[i].offset = mqprio_qopt->qopt.offset[i]; + param.info[i].max_tx_rate = div_u64(mqprio_qopt->max_rate[i], NBL_TC_MBPS_DIVSIOR); + } + + tc_mgt->num_tc = num_tc; + tc_mgt->orig_num_active_queues = vsi_info->active_ring_num; + + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + nbl_serv_vsi_stop(serv_mgt, NBL_VSI_DATA); + + param.num_tc = num_tc; + param.enable = true; + param.origin_qps = tc_mgt->orig_num_active_queues; + param.gravity = NBL_TC_WEIGHT_GRAVITY; + ret = disp_ops->cfg_qdisc_mqprio(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + if (ret) { + netdev_err(netdev, "Fail to cfg qdisc mqprio"); + tc_mgt->num_tc = 0; + return ret; + } + + netdev_reset_tc(netdev); + /* Report the tc mapping up the stack */ + netdev_set_num_tc(netdev, num_tc); + for (i = 0; i < num_tc; i++) + netdev_set_tc_queue(netdev, i, mqprio_qopt->qopt.count[i], + mqprio_qopt->qopt.offset[i]); + + tc_mgt->state = NBL_TC_RUNNING; +exit: + /* If the device are unregistering, we cannot set queue nums or start them, + * otherwise we will hold the refcnt forever and block the unregistering process. + * + * Note: ndo_stop will not help, cause ndo_stop(in dev_close_many) execs + * before ndo_setup_tc(in dev_shutdown) when unregistering + */ + if (total_qps && NETREG_REGISTERED == netdev->reg_state && + !test_bit(NBL_DOWN, adapter->state)) { + nbl_serv_cpu_affinity_init(serv_mgt, total_qps); + netif_set_real_num_rx_queues(netdev, total_qps); + netif_set_real_num_tx_queues(netdev, total_qps); + + nbl_serv_vsi_open(serv_mgt, netdev, NBL_VSI_DATA, total_qps, 1); + + netif_tx_start_all_queues(netdev); + nbl_serv_set_link_state(serv_mgt, netdev); } + + tc_mgt->total_qps = total_qps; + return ret; } static int nbl_serv_ipv6_exthdr_num(struct sk_buff *skb, int start, u8 nexthdr) @@ -340,45 +857,101 @@ static void nbl_serv_set_sfp_state(void *priv, struct net_device *netdev, u8 eth static void nbl_serv_set_netdev_carrier_state(void *priv, struct net_device *netdev, u8 link_state) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; if (test_bit(NBL_DOWN, adapter->state)) return; - if (link_state) { - if (!netif_carrier_ok(netdev)) { - netif_carrier_on(netdev); - netdev_info(netdev, "Set nic link up\n"); - } - } else { - if (netif_carrier_ok(netdev)) { - netif_carrier_off(netdev); - netdev_info(netdev, "Set nic link down\n"); + switch (net_resource_mgt->link_forced) { + case IFLA_VF_LINK_STATE_AUTO: + if (link_state) { + if (!netif_carrier_ok(netdev)) { + netif_carrier_on(netdev); + netdev_info(netdev, "Set nic link up\n"); + } + } else { + if (netif_carrier_ok(netdev)) { + netif_carrier_off(netdev); + netdev_info(netdev, "Set nic link down\n"); + } } + return; + case IFLA_VF_LINK_STATE_ENABLE: + netif_carrier_on(netdev); + return; + case IFLA_VF_LINK_STATE_DISABLE: + netif_carrier_off(netdev); + return; + default: + netif_carrier_on(netdev); + return; } } static void nbl_serv_set_link_state(struct nbl_service_mgt *serv_mgt, struct net_device *netdev) { + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u16 vsi_id = NBL_COMMON_TO_VSI_ID(common); u8 eth_id = NBL_COMMON_TO_ETH_ID(common); struct nbl_eth_link_info eth_link_info = {0}; int ret = 0; - ret = disp_ops->get_link_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - eth_id, ð_link_info); - if (ret) { - netdev_err(netdev, "Fail to get_link_state err %d\n", ret); - eth_link_info.link_status = 1; + net_resource_mgt->link_forced = + disp_ops->get_link_forced(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + + if (net_resource_mgt->link_forced == IFLA_VF_LINK_STATE_AUTO) { + ret = disp_ops->get_link_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, ð_link_info); + if (ret) { + netdev_err(netdev, "Fail to get_link_state err %d\n", ret); + eth_link_info.link_status = 1; + } } nbl_serv_set_netdev_carrier_state(serv_mgt, netdev, eth_link_info.link_status); } +static int nbl_serv_rep_netdev_open(struct net_device *netdev) +{ + int ret = 0; + + netdev_info(netdev, "Nbl rep open\n"); + ret = netif_set_real_num_tx_queues(netdev, 1); + if (ret) + goto fail; + ret = netif_set_real_num_rx_queues(netdev, 1); + if (ret) + goto fail; + + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + netdev_info(netdev, "Nbl rep open ok!\n"); + + return 0; + +fail: + return ret; +} + +static int nbl_serv_rep_netdev_stop(struct net_device *netdev) +{ + netdev_info(netdev, "Nbl rep stop\n"); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + netdev_info(netdev, "Nbl rep stop ok!\n"); + + return 0; +} + int nbl_serv_vsi_open(void *priv, struct net_device *netdev, u16 vsi_index, u16 real_qps, bool use_napi) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); @@ -401,6 +974,11 @@ int nbl_serv_vsi_open(void *priv, struct net_device *netdev, u16 vsi_index, } nbl_serv_flush_rx_queues(serv_mgt, vsi_info->ring_offset, vsi_info->ring_num); + if (vsi_index == NBL_VSI_DATA) + disp_ops->cfg_txrx_vlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + net_resource_mgt->vlan_tci, net_resource_mgt->vlan_proto, + vsi_index); + ret = disp_ops->cfg_dsch(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id, true); if (ret) { @@ -409,7 +987,8 @@ int nbl_serv_vsi_open(void *priv, struct net_device *netdev, u16 vsi_index, } vsi_info->active_ring_num = real_qps; - ret = disp_ops->setup_cqs(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id, real_qps); + ret = disp_ops->setup_cqs(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_info->vsi_id, real_qps, false); if (ret) goto setup_cqs_fail; @@ -442,63 +1021,93 @@ int nbl_serv_vsi_stop(void *priv, u16 vsi_index) /* modify defalt action and rss configuration */ disp_ops->remove_cqs(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id); + /* clear dsch config */ + disp_ops->cfg_dsch(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id, false); + /* disable and rest tx/rx logic queue */ disp_ops->remove_all_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id); - /* clear dsch config */ - disp_ops->cfg_dsch(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id, false); /* free tx and rx bufs */ nbl_serv_stop_rings(serv_mgt, vsi_info); return 0; } -static int nbl_serv_switch_traffic_default_dest(void *priv, u16 from_vsi, u16 to_vsi) +static struct nbl_mac_filter *nbl_add_filter(struct list_head *head, + const u8 *macaddr) +{ + struct nbl_mac_filter *f; + + if (!macaddr) + return NULL; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return f; + + ether_addr_copy(f->macaddr, macaddr); + list_add_tail(&f->list, head); + + return f; +} + +static int nbl_serv_suspend_data_vsi_traffic(struct nbl_service_mgt *serv_mgt) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); struct net_device *dev = net_resource_mgt->netdev; + struct nbl_netdev_priv *net_priv = netdev_priv(dev); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); - struct nbl_serv_vlan_node *vlan_node; - int ret; - list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { - disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vlan_node->vid, from_vsi); - ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vlan_node->vid, to_vsi); - if (ret) { - netdev_err(dev, "Fail to cfg macvlan on vid %u in vsi switch", - vlan_node->vid); - goto fail; - } - } + rtnl_lock(); + disp_ops->cfg_multi_mcast(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + net_priv->data_vsi, 0); + disp_ops->set_promisc_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + net_priv->data_vsi, 0); + + disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + 0, net_priv->user_vsi); + + flow_mgt->promisc &= ~BIT(NBL_PROMISC); + flow_mgt->promisc &= ~BIT(NBL_ALLMULTI); + flow_mgt->promisc |= BIT(NBL_USER_FLOW); + rtnl_unlock(); - /* trigger submac update */ - net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_MODIFY_MAC_FILTER; - net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); - /* arp/nd traffic */ - disp_ops->del_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), from_vsi); - ret = disp_ops->add_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), to_vsi); - if (ret) - goto add_multi_fail; + return 0; +} + +static int nbl_serv_restore_vsi_traffic(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct net_device *dev = net_resource_mgt->netdev; + struct nbl_netdev_priv *net_priv = netdev_priv(dev); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + rtnl_lock(); + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + 0, net_priv->user_vsi); + disp_ops->cfg_multi_mcast(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->user_vsi, 0); + disp_ops->set_promisc_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->user_vsi, 0); + flow_mgt->promisc &= ~BIT(NBL_USER_FLOW); + rtnl_unlock(); + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); return 0; +} -add_multi_fail: - disp_ops->add_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), from_vsi); -fail: - list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { - disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vlan_node->vid, to_vsi); - disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vlan_node->vid, from_vsi); - } +static int nbl_serv_switch_traffic_default_dest(void *priv, int op) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - return -EINVAL; + if (op == NBL_DEV_KERNEL_TO_USER) + nbl_serv_suspend_data_vsi_traffic(serv_mgt); + else if (op == NBL_DEV_USER_TO_KERNEL) + nbl_serv_restore_vsi_traffic(serv_mgt); + + return 0; } static int nbl_serv_abnormal_event_to_queue(int event_type) @@ -513,15 +1122,65 @@ static int nbl_serv_abnormal_event_to_queue(int event_type) } } +static int nbl_serv_stop_abnormal_sw_queue(struct nbl_service_mgt *serv_mgt, + u16 local_queue_id, int type) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->stop_abnormal_sw_queue(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + local_queue_id, type); +} + +static int nbl_serv_chan_stop_abnormal_sw_queue_req(struct nbl_service_mgt *serv_mgt, + u16 local_queue_id, u16 func_id, int type) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_param_stop_abnormal_sw_queue param = {0}; + struct nbl_chan_send_info chan_send = {0}; + int ret = 0; + + param.local_queue_id = local_queue_id; + param.type = type; + + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_STOP_ABNORMAL_SW_QUEUE, + ¶m, sizeof(param), NULL, 0, 1); + ret = chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); + + return ret; +} + +static void nbl_serv_chan_stop_abnormal_sw_queue_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_param_stop_abnormal_sw_queue *param = + (struct nbl_chan_param_stop_abnormal_sw_queue *)data; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + if (param->local_queue_id < vsi_info->ring_offset || + param->local_queue_id >= vsi_info->ring_offset + vsi_info->ring_num || + !vsi_info->ring_num) { + ret = -EINVAL; + goto send_ack; + } + + ret = nbl_serv_stop_abnormal_sw_queue(serv_mgt, param->local_queue_id, param->type); + +send_ack: + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_STOP_ABNORMAL_SW_QUEUE, msg_id, + ret, NULL, 0); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); +} + static dma_addr_t nbl_serv_netdev_queue_restore(struct nbl_service_mgt *serv_mgt, u16 local_queue_id, int type) { - struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_serv_vector *vector = &ring_mgt->vectors[local_queue_id]; - - if (type == NBL_TX) - netif_stop_subqueue(vector->netdev, local_queue_id); return disp_ops->restore_abnormal_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), local_queue_id, type); @@ -530,12 +1189,7 @@ static dma_addr_t nbl_serv_netdev_queue_restore(struct nbl_service_mgt *serv_mgt static int nbl_serv_netdev_queue_restart(struct nbl_service_mgt *serv_mgt, u16 local_queue_id, int type) { - struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_serv_vector *vector = &ring_mgt->vectors[local_queue_id]; - - if (type == NBL_TX) - netif_start_subqueue(vector->netdev, local_queue_id); return disp_ops->restart_abnormal_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), local_queue_id, type); @@ -572,17 +1226,21 @@ static void nbl_serv_chan_restore_netdev_queue_resp(void *priv, u16 src_id, u16 struct nbl_serv_ring_vsi_info *vsi_info; struct nbl_chan_ack_info chan_ack; dma_addr_t dma = 0; + int ret = NBL_CHAN_RESP_OK; vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; if (param->local_queue_id < vsi_info->ring_offset || param->local_queue_id >= vsi_info->ring_offset + vsi_info->ring_num || - !vsi_info->ring_num) - return; + !vsi_info->ring_num) { + ret = -EINVAL; + goto send_ack; + } dma = nbl_serv_netdev_queue_restore(serv_mgt, param->local_queue_id, param->type); +send_ack: NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_RESTORE_NETDEV_QUEUE, msg_id, - NBL_CHAN_RESP_OK, &dma, sizeof(dma)); + ret, &dma, sizeof(dma)); chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); } @@ -610,20 +1268,60 @@ static void nbl_serv_chan_restart_netdev_queue_resp(void *priv, u16 src_id, u16 struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); struct nbl_serv_ring_vsi_info *vsi_info; struct nbl_chan_ack_info chan_ack; + int ret = 0; vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; if (param->local_queue_id < vsi_info->ring_offset || param->local_queue_id >= vsi_info->ring_offset + vsi_info->ring_num || - !vsi_info->ring_num) - return; + !vsi_info->ring_num) { + ret = -EINVAL; + goto send_ack; + } - nbl_serv_netdev_queue_restart(serv_mgt, param->local_queue_id, param->type); + ret = nbl_serv_netdev_queue_restart(serv_mgt, param->local_queue_id, param->type); +send_ack: NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_RESTART_NETDEV_QUEUE, msg_id, - NBL_CHAN_RESP_OK, NULL, 0); + ret, NULL, 0); chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); } +static int +nbl_serv_start_abnormal_hw_queue(struct nbl_service_mgt *serv_mgt, u16 vsi_id, u16 local_queue_id, + dma_addr_t dma, int type) +{ + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_txrx_queue_param param = {0}; + struct nbl_serv_vector *vector; + struct nbl_serv_ring *ring; + int ret = 0; + + switch (type) { + case NBL_TX: + vector = &ring_mgt->vectors[local_queue_id]; + ring = &ring_mgt->tx_rings[local_queue_id]; + ring->dma = dma; + nbl_serv_set_queue_param(ring, ring_mgt->tx_desc_num, ¶m, + vsi_id, vector->global_vector_id); + ret = disp_ops->setup_queue(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m, true); + return ret; + case NBL_RX: + vector = &ring_mgt->vectors[local_queue_id]; + ring = &ring_mgt->rx_rings[local_queue_id]; + ring->dma = dma; + + nbl_serv_set_queue_param(ring, ring_mgt->rx_desc_num, ¶m, + vsi_id, vector->global_vector_id); + ret = disp_ops->setup_queue(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m, false); + return 0; + default: + break; + } + + return -EINVAL; +} + static void nbl_serv_restore_queue(struct nbl_service_mgt *serv_mgt, u16 vsi_id, u16 local_queue_id, u16 type, bool dif_err) { @@ -634,16 +1332,29 @@ static void nbl_serv_restore_queue(struct nbl_service_mgt *serv_mgt, u16 vsi_id, dma_addr_t dma = 0; int ret = 0; + while (!rtnl_trylock()) + msleep(20); + + ret = nbl_serv_chan_stop_abnormal_sw_queue_req(serv_mgt, local_queue_id, func_id, type); + if (ret) + goto unlock; + + ret = disp_ops->stop_abnormal_hw_queue(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, + local_queue_id, type); + if (ret) + goto unlock; + dma = nbl_serv_chan_restore_netdev_queue_req(serv_mgt, local_queue_id, func_id, type); if (!dma) - return; + goto unlock; - ret = disp_ops->restore_hw_queue(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, - local_queue_id, dma, type); + ret = nbl_serv_start_abnormal_hw_queue(serv_mgt, vsi_id, local_queue_id, dma, type); if (ret) - return; + goto unlock; - nbl_serv_chan_restart_netdev_queue_req(serv_mgt, local_queue_id, func_id, type); + ret = nbl_serv_chan_restart_netdev_queue_req(serv_mgt, local_queue_id, func_id, type); + if (ret) + goto unlock; if (dif_err && type == NBL_TX) { global_queue_id = @@ -652,6 +1363,9 @@ static void nbl_serv_restore_queue(struct nbl_service_mgt *serv_mgt, u16 vsi_id, nbl_info(common, NBL_DEBUG_COMMON, "dvn int_status:0, queue_id:%d\n", global_queue_id); } + +unlock: + rtnl_unlock(); } static void nbl_serv_handle_tx_timeout(struct work_struct *work) @@ -661,2242 +1375,6050 @@ static void nbl_serv_handle_tx_timeout(struct work_struct *work) struct nbl_service_mgt *serv_mgt = serv_net_resource_mgt->serv_mgt; struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); struct nbl_serv_ring_vsi_info *vsi_info; + struct nbl_serv_vector *vector; int i = 0; vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; for (i = vsi_info->ring_offset; i < vsi_info->ring_offset + vsi_info->ring_num; i++) { if (ring_mgt->tx_rings[i].need_recovery) { + vector = &ring_mgt->vectors[i]; nbl_serv_restore_queue(serv_mgt, vsi_info->vsi_id, i, NBL_TX, false); ring_mgt->tx_rings[i].need_recovery = false; } } } -int nbl_serv_netdev_open(struct net_device *netdev) +static void nbl_serv_update_link_state(struct work_struct *work) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); - struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); - struct nbl_serv_ring_vsi_info *vsi_info; - int num_cpus, real_qps, ret = 0; - - vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + struct nbl_serv_net_resource_mgt *serv_net_resource_mgt = + container_of(work, struct nbl_serv_net_resource_mgt, update_link_state); + struct nbl_service_mgt *serv_mgt = serv_net_resource_mgt->serv_mgt; - if (!test_bit(NBL_DOWN, adapter->state)) - return -EBUSY; + nbl_serv_set_link_state(serv_mgt, serv_net_resource_mgt->netdev); +} - netdev_info(netdev, "Nbl open\n"); - netif_carrier_off(netdev); +static int nbl_serv_chan_notify_link_forced_req(struct nbl_service_mgt *serv_mgt, u16 func_id) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_send_info chan_send = {0}; - nbl_serv_set_sfp_state(serv_mgt, netdev, NBL_COMMON_TO_ETH_ID(common), true, false); + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_NOTIFY_LINK_FORCED, NULL, 0, NULL, 0, 1); + return chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); +} - if (vsi_info->active_ring_num) { - real_qps = vsi_info->active_ring_num; - } else { - num_cpus = num_online_cpus(); - real_qps = num_cpus > vsi_info->ring_num ? vsi_info->ring_num : num_cpus; - } +static void nbl_serv_chan_notify_link_forced_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_ack_info chan_ack; - ret = nbl_serv_vsi_open(serv_mgt, netdev, NBL_VSI_DATA, real_qps, 1); - if (ret) - goto vsi_open_fail; + if (!net_resource_mgt) + return; - ret = netif_set_real_num_tx_queues(netdev, real_qps); - if (ret) - goto setup_real_qps_fail; - ret = netif_set_real_num_rx_queues(netdev, real_qps); - if (ret) - goto setup_real_qps_fail; + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_NOTIFY_LINK_FORCED, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); - netif_tx_start_all_queues(netdev); - clear_bit(NBL_DOWN, adapter->state); - set_bit(NBL_RUNNING, adapter->state); - nbl_serv_set_link_state(serv_mgt, netdev); + nbl_common_queue_work(&net_resource_mgt->update_link_state, false, false); +} - netdev_info(netdev, "Nbl open ok!\n"); +static void nbl_serv_register_link_forced_notify(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); - return 0; + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; -setup_real_qps_fail: - nbl_serv_vsi_stop(serv_mgt, NBL_VSI_DATA); -vsi_open_fail: - return ret; + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_NOTIFY_LINK_FORCED, + nbl_serv_chan_notify_link_forced_resp, serv_mgt); } -int nbl_serv_netdev_stop(struct net_device *netdev) +static void nbl_serv_unregister_link_forced_notify(struct nbl_service_mgt *serv_mgt) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); - struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); - struct nbl_serv_ring_vsi_info *vsi_info; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); - vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; - if (!test_bit(NBL_RUNNING, adapter->state)) - return -EBUSY; + chan_ops->unregister_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_NOTIFY_LINK_FORCED); +} - netdev_info(netdev, "Nbl stop\n"); - set_bit(NBL_DOWN, adapter->state); - clear_bit(NBL_RUNNING, adapter->state); +static void nbl_serv_update_vlan(struct work_struct *work) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = + container_of(work, struct nbl_serv_net_resource_mgt, update_vlan); + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct net_device *netdev = net_resource_mgt->netdev; + int was_running, err; + u16 vid; - nbl_serv_set_sfp_state(serv_mgt, netdev, NBL_COMMON_TO_ETH_ID(common), false, false); + vid = net_resource_mgt->vlan_tci & VLAN_VID_MASK; + nbl_serv_update_default_vlan(serv_mgt, vid); - netif_tx_stop_all_queues(netdev); - netif_carrier_off(netdev); - netif_tx_disable(netdev); + rtnl_lock(); + was_running = netif_running(netdev); - nbl_serv_vsi_stop(serv_mgt, NBL_VSI_DATA); + if (was_running) { + err = nbl_serv_netdev_stop(netdev); + if (err) { + netdev_err(netdev, "Netdev stop failed while update_vlan\n"); + goto netdev_stop_fail; + } - netdev_info(netdev, "Nbl stop ok!\n"); + err = nbl_serv_netdev_open(netdev); + if (err) { + netdev_err(netdev, "Netdev open failed after update_vlan\n"); + goto netdev_open_fail; + } + } - return 0; +netdev_stop_fail: +netdev_open_fail: + rtnl_unlock(); } -static int nbl_serv_change_mtu(struct net_device *netdev, int new_mtu) +static int nbl_serv_chan_notify_vlan_req(struct nbl_service_mgt *serv_mgt, u16 func_id, + struct nbl_serv_notify_vlan_param *param) { - netdev->mtu = new_mtu; - return 0; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_NOTIFY_VLAN, + param, sizeof(*param), NULL, 0, 1); + return chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); } -static int nbl_serv_set_mac(struct net_device *dev, void *p) +static void nbl_serv_chan_notify_vlan_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); - struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); - struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); - struct nbl_serv_vlan_node *vlan_node; - struct sockaddr *addr = p; - struct nbl_netdev_priv *priv = netdev_priv(dev); - u16 vsi_id = priv->default_vsi_id; - int ret = 0; - - if (!is_valid_ether_addr(addr->sa_data)) { - netdev_err(dev, "Temp to change a invalid mac address %pM\n", addr->sa_data); - return -EADDRNOTAVAIL; - } - - if (ether_addr_equal(dev->dev_addr, addr->sa_data)) - return 0; - - list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { - disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vlan_node->vid, vsi_id); - ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), addr->sa_data, - vlan_node->vid, vsi_id); - if (ret) { - netdev_err(dev, "Fail to cfg macvlan on vid %u", vlan_node->vid); - goto fail; - } - } - - disp_ops->set_spoof_check_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - vsi_id, addr->sa_data); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_serv_notify_vlan_param *param = (struct nbl_serv_notify_vlan_param *)data; + struct nbl_chan_ack_info chan_ack; - ether_addr_copy(flow_mgt->mac, addr->sa_data); - eth_hw_addr_set(dev, addr->sa_data); + net_resource_mgt->vlan_tci = param->vlan_tci; + net_resource_mgt->vlan_proto = param->vlan_proto; - if (!NBL_COMMON_TO_VF_CAP(common)) - disp_ops->set_eth_mac_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - addr->sa_data, NBL_COMMON_TO_ETH_ID(common)); + nbl_common_queue_work(&net_resource_mgt->update_vlan, false, false); - return 0; -fail: - list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { - disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), addr->sa_data, - vlan_node->vid, vsi_id); - disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vlan_node->vid, vsi_id); - } - return -EAGAIN; + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_NOTIFY_VLAN, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); } -static int nbl_serv_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +static void nbl_serv_register_vlan_notify(struct nbl_service_mgt *serv_mgt) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); - struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); - struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); - struct nbl_netdev_priv *priv = netdev_priv(dev); - struct nbl_serv_vlan_node *vlan_node; - u16 vsi_id = priv->default_vsi_id; - int ret = 0; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); - if (vid == NBL_DEFAULT_VLAN_ID) - return 0; + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; - nbl_debug(common, NBL_DEBUG_COMMON, "add mac-vlan dev for proto 0x%04x, vid %u.", - be16_to_cpu(proto), vid); + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), NBL_CHAN_MSG_NOTIFY_VLAN, + nbl_serv_chan_notify_vlan_resp, serv_mgt); +} - list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { - nbl_debug(common, NBL_DEBUG_COMMON, "add mac-vlan dev vid %u.", vlan_node->vid); - if (vlan_node->vid == vid) - return 0; - } +static void nbl_serv_unregister_vlan_notify(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); - vlan_node = nbl_serv_alloc_vlan_node(); - if (!vlan_node) - return -EAGAIN; + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; - ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - flow_mgt->mac, vid, vsi_id); - if (ret) { - nbl_serv_free_vlan_node(vlan_node); - return -EAGAIN; - } + chan_ops->unregister_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), NBL_CHAN_MSG_NOTIFY_VLAN); +} - vlan_node->vid = vid; - list_add(&vlan_node->node, &flow_mgt->vlan_list); +static int nbl_serv_chan_notify_trust_req(struct nbl_service_mgt *serv_mgt, + u16 func_id, bool trusted) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_send_info chan_send = {0}; - return 0; + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_NOTIFY_TRUST, &trusted, sizeof(trusted), + NULL, 0, 1); + return chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); } -static int nbl_serv_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +static void nbl_serv_chan_notify_trust_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); - struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); - struct nbl_netdev_priv *priv = netdev_priv(dev); - struct nbl_serv_vlan_node *vlan_node; - u16 vsi_id = priv->default_vsi_id; - - if (vid == NBL_DEFAULT_VLAN_ID) - return 0; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + bool *trusted = (bool *)data; + struct nbl_chan_ack_info chan_ack; - nbl_debug(common, NBL_DEBUG_COMMON, "del mac-vlan dev for proto 0x%04x, vid %u.", - be16_to_cpu(proto), vid); + flow_mgt->trusted_en = *trusted; + flow_mgt->trusted_update = 1; + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); - list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { - nbl_debug(common, NBL_DEBUG_COMMON, "del mac-vlan dev vid %u.", vlan_node->vid); - if (vlan_node->vid == vid) { - disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vid, vsi_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_NOTIFY_TRUST, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); +} - list_del(&vlan_node->node); - nbl_serv_free_vlan_node(vlan_node); +static void nbl_serv_register_trust_notify(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); - break; - } - } + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; - return 0; + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), NBL_CHAN_MSG_NOTIFY_TRUST, + nbl_serv_chan_notify_trust_resp, serv_mgt); } -static void nbl_serv_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +static void nbl_serv_unregister_trust_notify(struct nbl_service_mgt *serv_mgt) { - struct nbl_queue_stats queue_stats = { 0 }; - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_serv_ring_vsi_info *vsi_info; - u16 start, end; - int i; - - vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; - start = vsi_info->ring_offset; - end = vsi_info->ring_offset + vsi_info->ring_num; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); - if (!stats) + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) return; - for (i = start; i < end; i++) { - disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - i, &queue_stats, true); - stats->tx_packets += queue_stats.packets; - stats->tx_bytes += queue_stats.bytes; - } - - for (i = start; i < end; i++) { - disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - i, &queue_stats, false); - stats->rx_packets += queue_stats.packets; - stats->rx_bytes += queue_stats.bytes; - } - - stats->multicast = 0; - stats->rx_errors = 0; - stats->tx_errors = 0; - stats->rx_length_errors = 0; - stats->rx_crc_errors = 0; - stats->rx_frame_errors = 0; - stats->rx_dropped = 0; - stats->tx_dropped = 0; + chan_ops->unregister_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), NBL_CHAN_MSG_NOTIFY_TRUST); } -static void nbl_modify_submacs(struct nbl_serv_net_resource_mgt *net_resource_mgt) +static void nbl_serv_update_mirror_outputport(struct work_struct *work) { - struct netdev_hw_addr *ha; + struct nbl_serv_net_resource_mgt *net_resource_mgt = + container_of(work, struct nbl_serv_net_resource_mgt, update_mirror_outputport); struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); - struct nbl_netdev_priv *priv = netdev_priv(net_resource_mgt->netdev); - struct nbl_serv_submac_node *submac_node; - int uc_count, i, ret = 0; - u16 vsi_id = priv->default_vsi_id; - u8 *buf = NULL; - u16 len; - - spin_lock_bh(&net_resource_mgt->mac_vlan_list_lock); - uc_count = netdev_uc_count(net_resource_mgt->netdev); - - if (uc_count) { - len = uc_count * ETH_ALEN; - buf = kzalloc(len, GFP_ATOMIC); - - if (!buf) { - spin_unlock_bh(&net_resource_mgt->mac_vlan_list_lock); - return; - } + bool mirror; - i = 0; - netdev_hw_addr_list_for_each(ha, &net_resource_mgt->netdev->uc) { - if (i >= len) - break; - memcpy(&buf[i], ha->addr, ETH_ALEN); - i += ETH_ALEN; - } + mirror = !!(flow_mgt->promisc & BIT(NBL_MIRROR)); + nbl_event_notify(NBL_EVENT_MIRROR_OUTPUTPORT_DEVLAYER, &mirror, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); - net_resource_mgt->rxmode_set_required &= ~NBL_FLAG_AQ_MODIFY_MAC_FILTER; - } - spin_unlock_bh(&net_resource_mgt->mac_vlan_list_lock); + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); +} - nbl_serv_del_all_submacs(serv_mgt); +static int nbl_serv_chan_notify_mirror_outputport_req(struct nbl_service_mgt *serv_mgt, u16 func_id, + bool opcode) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_send_info chan_send = {0}; - for (i = 0; i < uc_count; i++) { - submac_node = nbl_serv_alloc_submac_node(); - if (!submac_node) - break; + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_MIRROR_OUTPUTPORT_NOTIFY, + &opcode, sizeof(bool), NULL, 0, 1); + return chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); +} - ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &buf[i * ETH_ALEN], - 0, vsi_id); - if (ret) { - nbl_serv_free_submac_node(submac_node); - break; - } +static void nbl_serv_chan_notify_mirror_outputport_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + bool opcode = *(bool *)data; + struct nbl_chan_ack_info chan_ack; - ether_addr_copy(submac_node->mac, &buf[i * ETH_ALEN]); - list_add(&submac_node->node, &flow_mgt->submac_list); + if (!!(flow_mgt->promisc & BIT(NBL_MIRROR)) ^ opcode) { + if (opcode) + flow_mgt->promisc |= BIT(NBL_MIRROR); + else + flow_mgt->promisc &= ~BIT(NBL_MIRROR); + nbl_common_queue_work(&net_resource_mgt->update_mirror_outputport, false, false); } - kfree(buf); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_MIRROR_OUTPUTPORT_NOTIFY, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); } -static void nbl_modify_promisc_mode(struct nbl_serv_net_resource_mgt *net_resource_mgt) +static void nbl_serv_register_mirror_outputport_notify(struct nbl_service_mgt *serv_mgt) { - struct nbl_netdev_priv *priv = netdev_priv(net_resource_mgt->netdev); - struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - u16 mode = 0; - - spin_lock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); - if (net_resource_mgt->curr_promiscuout_mode & (IFF_PROMISC | IFF_ALLMULTI)) - mode = 1; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); - net_resource_mgt->rxmode_set_required &= ~NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; - spin_unlock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; - disp_ops->set_promisc_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - priv->default_vsi_id, mode); + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_MIRROR_OUTPUTPORT_NOTIFY, + nbl_serv_chan_notify_mirror_outputport_resp, serv_mgt); } -static struct nbl_mac_filter *nbl_find_filter(struct nbl_adapter *adapter, const u8 *macaddr) +static void nbl_serv_unregister_mirror_outputport_notify(struct nbl_service_mgt *serv_mgt) { - struct nbl_service_mgt *serv_mgt; - struct nbl_serv_net_resource_mgt *net_resource_mgt; - struct nbl_mac_filter *f; - - if (!macaddr) - return NULL; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); - serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - list_for_each_entry(f, &net_resource_mgt->mac_filter_list, list) { - if (ether_addr_equal(macaddr, f->macaddr)) - return f; - } + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; - return NULL; + chan_ops->unregister_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_MIRROR_OUTPUTPORT_NOTIFY); } -static void nbl_free_filter(struct nbl_serv_net_resource_mgt *net_resource_mgt) +static int nbl_serv_chan_get_vf_stats_req(struct nbl_service_mgt *serv_mgt, + u16 func_id, struct nbl_vf_stats *vf_stats) { - struct nbl_mac_filter *f; - struct list_head *pos, *n; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_send_info chan_send = {0}; - list_for_each_safe(pos, n, &net_resource_mgt->mac_filter_list) { - f = list_entry(pos, struct nbl_mac_filter, list); - list_del(&f->list); - kfree(f); - } + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_GET_VF_STATS, + NULL, 0, vf_stats, sizeof(*vf_stats), 1); + return chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); } -static struct nbl_mac_filter *nbl_add_filter(struct nbl_adapter *adapter, const u8 *macaddr) +static void nbl_serv_chan_get_vf_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { - struct nbl_mac_filter *f; - struct nbl_service_mgt *serv_mgt; - struct nbl_serv_net_resource_mgt *net_resource_mgt; - - if (!macaddr) - return NULL; - - serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_vf_stats vf_stats = {0}; + struct nbl_stats stats = { 0 }; + int err = NBL_CHAN_RESP_OK; - f = nbl_find_filter(adapter, macaddr); - if (!f) { - f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - return f; + disp_ops->get_net_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &stats); - ether_addr_copy(f->macaddr, macaddr); - list_add_tail(&f->list, &net_resource_mgt->mac_filter_list); - net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_MODIFY_MAC_FILTER; - } + vf_stats.rx_packets = stats.rx_packets; + vf_stats.tx_packets = stats.tx_packets; + vf_stats.rx_bytes = stats.rx_bytes; + vf_stats.tx_bytes = stats.tx_bytes; + vf_stats.multicast = stats.rx_multicast_packets; + vf_stats.rx_dropped = 0; - return f; + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VF_STATS, msg_id, + err, &vf_stats, sizeof(vf_stats)); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); } -static int nbl_addr_unsync(struct net_device *netdev, const u8 *addr) +static void nbl_serv_register_get_vf_stats(struct nbl_service_mgt *serv_mgt) { - struct nbl_adapter *adapter; - struct nbl_mac_filter *f; - struct nbl_service_mgt *serv_mgt; - struct nbl_serv_net_resource_mgt *net_resource_mgt; - - adapter = NBL_NETDEV_TO_ADAPTER(netdev); - serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - - if (ether_addr_equal(addr, netdev->dev_addr)) - return 0; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); - f = nbl_find_filter(adapter, addr); - if (f) { - list_del(&f->list); - kfree(f); - net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_MODIFY_MAC_FILTER; - } + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; - return 0; + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_GET_VF_STATS, + nbl_serv_chan_get_vf_stats_resp, serv_mgt); } -static int nbl_addr_sync(struct net_device *netdev, const u8 *addr) +static void nbl_serv_unregister_get_vf_stats(struct nbl_service_mgt *serv_mgt) { - struct nbl_adapter *adapter; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); - adapter = NBL_NETDEV_TO_ADAPTER(netdev); - if (ether_addr_equal(addr, netdev->dev_addr)) + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->unregister_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), NBL_CHAN_MSG_GET_VF_STATS); +} + +int nbl_serv_netdev_open(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_tc_mgt *tc_mgt = NBL_SERV_MGT_TO_TC_MGT(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_serv_ring_vsi_info *vsi_info; + int num_cpus, real_qps, ret = 0; + bool netdev_open = true; + + if (!test_bit(NBL_DOWN, adapter->state)) + return -EBUSY; + + netdev_info(netdev, "Nbl open\n"); + + if (ring_mgt->xdp_prog) + nbl_event_notify(NBL_EVENT_NETDEV_STATE_CHANGE, &netdev_open, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + netif_carrier_off(netdev); + nbl_serv_set_sfp_state(serv_mgt, netdev, NBL_COMMON_TO_ETH_ID(common), true, false); + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + if (tc_mgt->num_tc) { + real_qps = tc_mgt->total_qps; + } else if (vsi_info->active_ring_num) { + real_qps = vsi_info->active_ring_num; + } else { + num_cpus = num_online_cpus(); + real_qps = num_cpus > vsi_info->ring_num ? vsi_info->ring_num : num_cpus; + } + + ret = nbl_serv_vsi_open(serv_mgt, netdev, NBL_VSI_DATA, real_qps, 1); + if (ret) + goto vsi_open_fail; + + ret = netif_set_real_num_tx_queues(netdev, real_qps); + if (ret) + goto setup_real_qps_fail; + ret = netif_set_real_num_rx_queues(netdev, real_qps); + if (ret) + goto setup_real_qps_fail; + + netif_tx_start_all_queues(netdev); + clear_bit(NBL_DOWN, adapter->state); + set_bit(NBL_RUNNING, adapter->state); + nbl_serv_set_link_state(serv_mgt, netdev); + + netdev_info(netdev, "Nbl open ok!\n"); + + return 0; + +setup_real_qps_fail: + nbl_serv_vsi_stop(serv_mgt, NBL_VSI_DATA); +vsi_open_fail: + netdev_open = false; + if (ring_mgt->xdp_prog) + nbl_event_notify(NBL_EVENT_NETDEV_STATE_CHANGE, &netdev_open, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + return ret; +} + +int nbl_serv_netdev_stop(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + bool netdev_open = false; + + if (!test_bit(NBL_RUNNING, adapter->state)) + return -EBUSY; + + netdev_info(netdev, "Nbl stop\n"); + set_bit(NBL_DOWN, adapter->state); + clear_bit(NBL_RUNNING, adapter->state); + + nbl_serv_set_sfp_state(serv_mgt, netdev, NBL_COMMON_TO_ETH_ID(common), false, false); + + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + synchronize_net(); + nbl_serv_vsi_stop(serv_mgt, NBL_VSI_DATA); + + if (ring_mgt->xdp_prog) + nbl_event_notify(NBL_EVENT_NETDEV_STATE_CHANGE, &netdev_open, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + netdev_info(netdev, "Nbl stop ok!\n"); + + return 0; +} + +static int nbl_serv_change_rep_mtu(struct net_device *netdev, int new_mtu) +{ + netdev->mtu = new_mtu; + return 0; +} + +static int nbl_serv_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int was_running = 0, err = 0; + int max_mtu; + + max_mtu = disp_ops->get_max_mtu(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (new_mtu > max_mtu) + netdev_notice(netdev, "Netdev already bind xdp prog: new_mtu(%d) > current_max_mtu(%d), try to rebuild rx buffer\n", + new_mtu, max_mtu); + + if (new_mtu) { + netdev->mtu = new_mtu; + nbl_event_notify(NBL_EVENT_CHANGE_MTU, &new_mtu, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + was_running = netif_running(netdev); + if (was_running) { + err = nbl_serv_netdev_stop(netdev); + if (err) { + netdev_err(netdev, "Netdev stop failed while change mtu\n"); + return err; + } + + err = nbl_serv_netdev_open(netdev); + if (err) { + netdev_err(netdev, "Netdev open failed after change mtu\n"); + return err; + } + } + } + + disp_ops->set_mtu(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), new_mtu); + + return 0; +} + +static int nbl_serv_set_mac(struct net_device *dev, void *p) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_vlan_node *vlan_node; + struct sockaddr *addr = p; + struct nbl_netdev_priv *priv = netdev_priv(dev); + int ret = 0; + + if (!is_valid_ether_addr(addr->sa_data)) { + netdev_err(dev, "Temp to change a invalid mac address %pM\n", addr->sa_data); + return -EADDRNOTAVAIL; + } + + if (ether_addr_equal(flow_mgt->mac, addr->sa_data)) + return 0; + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + if (!vlan_node->primary_mac_effective) + continue; + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, priv->data_vsi); + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), addr->sa_data, + vlan_node->vid, priv->data_vsi); + if (ret) { + netdev_err(dev, "Fail to cfg macvlan on vid %u", vlan_node->vid); + goto fail; + } + } + + if (flow_mgt->promisc & BIT(NBL_USER_FLOW)) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + 0, priv->user_vsi); + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), addr->sa_data, + 0, priv->user_vsi); + if (ret) { + netdev_err(dev, "Fail to cfg macvlan on vid %u for user", 0); + goto fail; + } + } + + disp_ops->set_spoof_check_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + priv->data_vsi, addr->sa_data); + + ether_addr_copy(flow_mgt->mac, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); + + if (!NBL_COMMON_TO_VF_CAP(common)) + disp_ops->set_eth_mac_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + addr->sa_data, NBL_COMMON_TO_ETH_ID(common)); + + return 0; +fail: + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + if (!vlan_node->primary_mac_effective) + continue; + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), addr->sa_data, + vlan_node->vid, priv->data_vsi); + disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, priv->data_vsi); + } + return -EAGAIN; +} + +static int nbl_serv_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_netdev_priv *priv = netdev_priv(dev); + struct nbl_serv_vlan_node *vlan_node; + bool effective = true; + int ret = 0; + + if (vid == NBL_DEFAULT_VLAN_ID) + return 0; + + if (flow_mgt->vid != 0) + effective = false; + + if (!flow_mgt->unicast_flow_enable) + effective = false; + + if (!flow_mgt->trusted_en && flow_mgt->vlan_list_cnt >= NBL_NO_TRUST_MAX_VLAN) + return -ENOSPC; + + nbl_debug(common, NBL_DEBUG_COMMON, "add mac-vlan dev for proto 0x%04x, vid %u.", + be16_to_cpu(proto), vid); + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + nbl_debug(common, NBL_DEBUG_COMMON, "add mac-vlan dev vid %u.", vlan_node->vid); + if (vlan_node->vid == vid) { + vlan_node->ref_cnt++; + return 0; + } + } + + vlan_node = nbl_serv_alloc_vlan_node(); + if (!vlan_node) + return -ENOMEM; + + vlan_node->vid = vid; + ret = nbl_serv_update_vlan_node_effective(serv_mgt, vlan_node, effective, priv->data_vsi); + if (ret) + goto add_macvlan_failed; + list_add(&vlan_node->node, &flow_mgt->vlan_list); + flow_mgt->vlan_list_cnt++; + + nbl_serv_check_flow_table_spec(serv_mgt); + + return 0; + +add_macvlan_failed: + nbl_serv_free_vlan_node(vlan_node); + return ret; +} + +static int nbl_serv_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_netdev_priv *priv = netdev_priv(dev); + struct nbl_serv_vlan_node *vlan_node; + + if (vid == NBL_DEFAULT_VLAN_ID) return 0; - if (nbl_add_filter(adapter, addr)) + nbl_debug(common, NBL_DEBUG_COMMON, "del mac-vlan dev for proto 0x%04x, vid %u.", + be16_to_cpu(proto), vid); + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + nbl_debug(common, NBL_DEBUG_COMMON, "del mac-vlan dev vid %u.", vlan_node->vid); + if (vlan_node->vid == vid) { + vlan_node->ref_cnt--; + if (!vlan_node->ref_cnt) { + nbl_serv_update_vlan_node_effective(serv_mgt, vlan_node, + 0, priv->data_vsi); + list_del(&vlan_node->node); + flow_mgt->vlan_list_cnt--; + nbl_serv_free_vlan_node(vlan_node); + } + break; + } + } + + nbl_serv_check_flow_table_spec(serv_mgt); + + return 0; +} + +static int nbl_serv_update_default_vlan(struct nbl_service_mgt *serv_mgt, u16 vid) +{ + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_vlan_node *vlan_node = NULL; + struct nbl_serv_vlan_node *node, *tmp; + struct nbl_common_info *common; + int ret; + u16 vsi; + bool other_effective = false; + + if (flow_mgt->vid == vid) return 0; - else + + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + vsi = NBL_COMMON_TO_VSI_ID(common); + rtnl_lock(); + + list_for_each_entry(node, &flow_mgt->vlan_list, node) { + if (node->vid == vid) { + node->ref_cnt++; + vlan_node = node; + break; + } + } + + if (!vlan_node) + vlan_node = nbl_serv_alloc_vlan_node(); + + if (!vlan_node) { + rtnl_unlock(); return -ENOMEM; + } + + vlan_node->vid = vid; + /* restore to default vlan id 0, we need restore other vlan interface */ + if (!vid) + other_effective = true; + list_for_each_entry_safe(node, tmp, &flow_mgt->vlan_list, node) { + if (node->vid == flow_mgt->vid && node != vlan_node) { + node->ref_cnt--; + if (!node->ref_cnt) { + nbl_serv_update_vlan_node_effective(serv_mgt, node, 0, vsi); + list_del(&node->node); + nbl_serv_free_vlan_node(node); + } + } else if (node->vid != vid) { + nbl_serv_update_vlan_node_effective(serv_mgt, node, + other_effective, vsi); + } + } + + ret = nbl_serv_update_vlan_node_effective(serv_mgt, vlan_node, 1, vsi); + if (ret) + goto free_vlan_node; + + if (vlan_node->ref_cnt == 1) + list_add(&vlan_node->node, &flow_mgt->vlan_list); + + flow_mgt->vid = vid; + rtnl_unlock(); + + return 0; + +free_vlan_node: + vlan_node->ref_cnt--; + if (!vlan_node->ref_cnt) + nbl_serv_free_vlan_node(vlan_node); + rtnl_unlock(); + + return ret; +} + +static void nbl_serv_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_stats net_stats = { 0 }; + + if (!stats) { + netdev_err(netdev, "get_link_stats64 stats is null\n"); + return; + } + + disp_ops->get_net_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &net_stats); + + stats->rx_packets = net_stats.rx_packets; + stats->tx_packets = net_stats.tx_packets; + stats->rx_bytes = net_stats.rx_bytes; + stats->tx_bytes = net_stats.tx_bytes; + stats->multicast = net_stats.rx_multicast_packets; + + stats->rx_errors = 0; + stats->tx_errors = 0; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_frame_errors = netdev->stats.rx_frame_errors; + stats->rx_dropped = 0; + stats->tx_dropped = 0; +} + +static int nbl_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + adapter = NBL_NETDEV_TO_ADAPTER(netdev); + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + if (!nbl_add_filter(&net_resource_mgt->tmp_del_filter_list, addr)) + return -ENOMEM; + + net_resource_mgt->update_submac = 1; + return 0; +} + +static int nbl_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + adapter = NBL_NETDEV_TO_ADAPTER(netdev); + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + if (!nbl_add_filter(&net_resource_mgt->tmp_add_filter_list, addr)) + return -ENOMEM; + + net_resource_mgt->update_submac = 1; + return 0; +} + +static void nbl_modify_submacs(struct nbl_serv_net_resource_mgt *net_resource_mgt) +{ + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct net_device *netdev = net_resource_mgt->netdev; + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_mac_filter *filter, *safe_filter; + + INIT_LIST_HEAD(&net_resource_mgt->tmp_add_filter_list); + INIT_LIST_HEAD(&net_resource_mgt->tmp_del_filter_list); + net_resource_mgt->update_submac = 0; + + netif_addr_lock_bh(netdev); + __dev_uc_sync(net_resource_mgt->netdev, nbl_addr_sync, nbl_addr_unsync); + __dev_mc_sync(net_resource_mgt->netdev, nbl_addr_sync, nbl_addr_unsync); + netif_addr_unlock_bh(netdev); + + if (!net_resource_mgt->update_submac) + return; + + rtnl_lock(); + list_for_each_entry_safe(filter, safe_filter, + &net_resource_mgt->tmp_del_filter_list, list) { + nbl_serv_del_submac_node(serv_mgt, filter->macaddr, priv->data_vsi); + list_del(&filter->list); + kfree(filter); + } + + list_for_each_entry_safe(filter, safe_filter, + &net_resource_mgt->tmp_add_filter_list, list) { + nbl_serv_add_submac_node(serv_mgt, filter->macaddr, + priv->data_vsi, flow_mgt->promisc); + list_del(&filter->list); + kfree(filter); + } + + nbl_serv_check_flow_table_spec(serv_mgt); + rtnl_unlock(); +} + +static void nbl_modify_promisc_mode(struct nbl_serv_net_resource_mgt *net_resource_mgt) +{ + struct net_device *netdev = net_resource_mgt->netdev; + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + bool mode = 0, multi = 0; + bool need_flow = 1; + bool unicast_enable, multicast_enable; + + rtnl_lock(); + net_resource_mgt->curr_promiscuout_mode = netdev->flags; + + if (((netdev->flags & (IFF_PROMISC)) || flow_mgt->force_promisc) && + !NBL_COMMON_TO_VF_CAP(NBL_SERV_MGT_TO_COMMON(serv_mgt))) + mode = 1; + + if ((netdev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || flow_mgt->force_promisc) + multi = 1; + + if (flow_mgt->promisc & (BIT(NBL_USER_FLOW) | BIT(NBL_MIRROR))) { + multi = 0; + mode = 0; + need_flow = 0; + } + + if (!flow_mgt->trusted_en) + multi = 0; + + unicast_enable = !mode && need_flow; + multicast_enable = !multi && need_flow; + + if ((flow_mgt->promisc & BIT(NBL_PROMISC)) ^ (mode << NBL_PROMISC)) + if (!NBL_COMMON_TO_VF_CAP(NBL_SERV_MGT_TO_COMMON(serv_mgt))) { + disp_ops->set_promisc_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + priv->data_vsi, mode); + if (mode) + flow_mgt->promisc |= BIT(NBL_PROMISC); + else + flow_mgt->promisc &= ~BIT(NBL_PROMISC); + } + + if ((flow_mgt->promisc & BIT(NBL_ALLMULTI)) ^ (multi << NBL_ALLMULTI)) { + disp_ops->cfg_multi_mcast(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + priv->data_vsi, multi); + if (multi) + flow_mgt->promisc |= BIT(NBL_ALLMULTI); + else + flow_mgt->promisc &= ~BIT(NBL_ALLMULTI); + } + + if (flow_mgt->multicast_flow_enable ^ multicast_enable) { + nbl_serv_update_mcast_submac(serv_mgt, multicast_enable, + unicast_enable, priv->data_vsi); + flow_mgt->multicast_flow_enable = multicast_enable; + } + + if (flow_mgt->unicast_flow_enable ^ unicast_enable) { + nbl_serv_update_promisc_vlan(serv_mgt, unicast_enable, priv->data_vsi); + flow_mgt->unicast_flow_enable = unicast_enable; + } + + if (flow_mgt->trusted_update) { + flow_mgt->trusted_update = 0; + if (flow_mgt->active_submac_list < flow_mgt->submac_list_cnt) + nbl_serv_update_mcast_submac(serv_mgt, flow_mgt->multicast_flow_enable, + flow_mgt->unicast_flow_enable, priv->data_vsi); + } + rtnl_unlock(); +} + +static void nbl_serv_set_rx_mode(struct net_device *dev) +{ + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + adapter = NBL_NETDEV_TO_ADAPTER(dev); + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); +} + +static void nbl_serv_change_rx_flags(struct net_device *dev, int flag) +{ + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + adapter = NBL_NETDEV_TO_ADAPTER(dev); + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); +} + +static netdev_features_t +nbl_serv_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) +{ + u32 l2_l3_hrd_len = 0, l4_hrd_len = 0, total_hrd_len = 0; + u8 l4_proto = 0; + __be16 protocol, frag_off; + int ret; + unsigned char *exthdr; + unsigned int offset = 0; + int nexthdr = 0; + int exthdr_num = 0; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + + /* No point in doing any of this if neither checksum nor GSO are + * being requested for this frame. We can rule out both by just + * checking for CHECKSUM_PARTIAL. + */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + /* We cannot support GSO if the MSS is going to be less than + * 256 bytes or bigger than 16383 bytes. If it is then we need to drop support for GSO. + */ + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < NBL_TX_TSO_MSS_MIN || + skb_shinfo(skb)->gso_size > NBL_TX_TSO_MSS_MAX)) + features &= ~NETIF_F_GSO_MASK; + + l2_l3_hrd_len = (u32)(skb_transport_header(skb) - skb->data); + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + protocol = vlan_get_protocol(skb); + + if (protocol == htons(ETH_P_IP)) { + l4_proto = ip.v4->protocol; + } else if (protocol == htons(ETH_P_IPV6)) { + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) { + ret = ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); + if (ret < 0) + goto out_rm_features; + } + + /* IPV6 extension headers + * (1) donot support routing and destination extension headers + * (2) support 2 extension headers mostly + */ + nexthdr = ipv6_find_hdr(skb, &offset, NEXTHDR_ROUTING, NULL, NULL); + if (nexthdr == NEXTHDR_ROUTING) { + netdev_info(dev, "skb contain ipv6 routing ext header\n"); + goto out_rm_features; + } + + nexthdr = ipv6_find_hdr(skb, &offset, NEXTHDR_DEST, NULL, NULL); + if (nexthdr == NEXTHDR_DEST) { + netdev_info(dev, "skb contain ipv6 routing dest header\n"); + goto out_rm_features; + } + + exthdr_num = nbl_serv_ipv6_exthdr_num(skb, exthdr - skb->data, ip.v6->nexthdr); + if (exthdr_num < 0 || exthdr_num > 2) { + netdev_info(dev, "skb ipv6 exthdr_num:%d\n", exthdr_num); + goto out_rm_features; + } + } else { + goto out_rm_features; + } + + switch (l4_proto) { + case IPPROTO_TCP: + l4_hrd_len = (l4.tcp->doff) * 4; + break; + case IPPROTO_UDP: + l4_hrd_len = sizeof(struct udphdr); + break; + case IPPROTO_SCTP: + l4_hrd_len = sizeof(struct sctphdr); + break; + default: + goto out_rm_features; + } + + total_hrd_len = l2_l3_hrd_len + l4_hrd_len; + + // TX checksum offload support total header len is [0, 255] + if (total_hrd_len > NBL_TX_CHECKSUM_OFFLOAD_L2L3L4_HDR_LEN_MAX) + goto out_rm_features; + + // TSO support total header len is [42, 128] + if (total_hrd_len < NBL_TX_TSO_L2L3L4_HDR_LEN_MIN || + total_hrd_len > NBL_TX_TSO_L2L3L4_HDR_LEN_MAX) + features &= ~NETIF_F_GSO_MASK; + + if (skb->encapsulation) + goto out_rm_features; + + return features; + +out_rm_features: + return features & ~(NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_GSO_MASK); +} + +static int nbl_serv_config_rxhash(void *priv, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + u32 rxfh_indir_size = 0; + u32 *indir = NULL; + int i = 0; + + disp_ops->get_rxfh_indir_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), &rxfh_indir_size); + indir = devm_kcalloc(dev, rxfh_indir_size, sizeof(u32), GFP_KERNEL); + if (!indir) + return -ENOMEM; + if (enable) { + if (ring_mgt->rss_indir_user) { + memcpy(indir, ring_mgt->rss_indir_user, rxfh_indir_size * sizeof(u32)); + } else { + for (i = 0; i < rxfh_indir_size; i++) + indir[i] = i % vsi_info->active_ring_num; + } + } + disp_ops->set_rxfh_indir(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), + indir, rxfh_indir_size); + devm_kfree(dev, indir); + return 0; +} + +static int nbl_serv_set_features(struct net_device *netdev, netdev_features_t features) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + netdev_features_t changed = netdev->features ^ features; + u16 vsi_id = NBL_COMMON_TO_VSI_ID(common); + bool enable = false; + + if (!common->is_vf) { + if (changed & NETIF_F_NTUPLE) { + enable = !!(features & NETIF_F_NTUPLE); + + disp_ops->config_fd_flow_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_CHAN_FDIR_RULE_NORMAL, vsi_id, enable); + } + } + + if (changed & NETIF_F_RXHASH) { + enable = !!(features & NETIF_F_RXHASH); + nbl_serv_config_rxhash(serv_mgt, enable); + } + + return 0; +} + +static int nbl_serv_config_fd_flow_state(void *priv, enum nbl_chan_fdir_rule_type type, u32 state) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u16 vsi_id = NBL_COMMON_TO_VSI_ID(common); + + return disp_ops->config_fd_flow_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + type, vsi_id, state); +} + +static LIST_HEAD(nbl_serv_block_cb_list); + +static int nbl_serv_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) +{ + struct nbl_netdev_priv *priv = netdev_priv(dev); + + switch (type) { + case TC_SETUP_BLOCK: { + return flow_block_cb_setup_simple((struct flow_block_offload *)type_data, + &nbl_serv_block_cb_list, + nbl_serv_setup_tc_block_cb, + priv, priv, true); + } + case TC_SETUP_QDISC_MQPRIO: + return nbl_serv_setup_tc_mqprio(dev, type_data); + default: + return -EOPNOTSUPP; + } +} + +static int nbl_serv_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 function_id = U16_MAX; + + function_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); + if (function_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + + ether_addr_copy(net_resource_mgt->vf_info[vf_id].mac, mac); + + disp_ops->register_func_mac(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), mac, function_id); + + return 0; +} + +static int nbl_serv_set_vf_rate(struct net_device *dev, int vf_id, int min_rate, int max_rate) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 function_id = U16_MAX; + int ret = 0; + + function_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); + if (function_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + + if (vf_id < net_resource_mgt->num_vfs) + ret = disp_ops->set_tx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + function_id, max_rate, 0); + + if (!ret) + net_resource_mgt->vf_info[vf_id].max_tx_rate = max_rate; + + ret = disp_ops->register_func_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + function_id, max_rate); + + return ret; +} + +static int nbl_serv_set_vf_spoofchk(struct net_device *dev, int vf_id, bool ena) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret = 0; + + if (vf_id >= net_resource_mgt->total_vfs || !net_resource_mgt->vf_info) + return -EINVAL; + + if (vf_id < net_resource_mgt->num_vfs) + ret = disp_ops->set_vf_spoof_check(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), vf_id, ena); + + if (!ret) + net_resource_mgt->vf_info[vf_id].spoof_check = ena; + + return ret; +} + +static int nbl_serv_set_vf_link_state(struct net_device *dev, int vf_id, int link_state) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 function_id = U16_MAX; + bool should_notify = false; + int ret = 0; + + function_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); + if (function_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + + ret = disp_ops->register_func_link_forced(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + function_id, link_state, &should_notify); + if (!ret && should_notify) + nbl_serv_chan_notify_link_forced_req(serv_mgt, function_id); + + if (!ret) + net_resource_mgt->vf_info[vf_id].state = link_state; + + return ret; +} + +static int nbl_serv_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 function_id = U16_MAX; + bool should_notify = false; + int ret = 0; + + function_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); + if (function_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + + ret = disp_ops->register_func_trust(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + function_id, trusted, &should_notify); + if (!ret && should_notify) + nbl_serv_chan_notify_trust_req(serv_mgt, function_id, trusted); + + if (!ret) + net_resource_mgt->vf_info[vf_id].trusted = trusted; + + return ret; +} + +static int __used nbl_serv_set_vf_tx_rate(struct net_device *dev, + int vf_id, int tx_rate, + int burst, bool burst_en) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 function_id = U16_MAX; + int ret = 0; + + function_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); + if (function_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + if (!burst_en) + burst = net_resource_mgt->vf_info[vf_id].meter_tx_burst; + + if (vf_id < net_resource_mgt->num_vfs) + ret = disp_ops->set_tx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + function_id, tx_rate, burst); + + if (!ret) { + net_resource_mgt->vf_info[vf_id].meter_tx_rate = tx_rate; + if (burst_en) + net_resource_mgt->vf_info[vf_id].meter_tx_burst = burst; + } + + return ret; +} + +static int __used nbl_serv_set_vf_rx_rate(struct net_device *dev, + int vf_id, int rx_rate, + int burst, bool burst_en) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 function_id = U16_MAX; + int ret = 0; + + function_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); + if (function_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + if (!burst_en) + burst = net_resource_mgt->vf_info[vf_id].meter_tx_burst; + + if (vf_id < net_resource_mgt->num_vfs) + ret = disp_ops->set_rx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + function_id, rx_rate, burst); + + if (!ret) { + net_resource_mgt->vf_info[vf_id].meter_rx_rate = rx_rate; + if (burst_en) + net_resource_mgt->vf_info[vf_id].meter_rx_burst = burst; + } + + return ret; +} + +static int nbl_serv_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan, u8 qos, __be16 proto) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_notify_vlan_param param = {0}; + int ret = 0; + u16 function_id = U16_MAX; + bool should_notify = false; + + if (vlan > 4095 || qos > 7) + return -EINVAL; + + function_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); + if (function_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + + if (vlan) { + param.vlan_tci = (vlan & VLAN_VID_MASK) | (qos << VLAN_PRIO_SHIFT); + param.vlan_proto = ntohs(proto); + } else { + proto = 0; + qos = 0; + } + + ret = disp_ops->register_func_vlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), function_id, + param.vlan_tci, param.vlan_proto, + &should_notify); + if (should_notify && !ret) { + ret = nbl_serv_chan_notify_vlan_req(serv_mgt, function_id, ¶m); + if (ret) + disp_ops->register_func_vlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + function_id, 0, 0, &should_notify); + } + if (!ret) { + net_resource_mgt->vf_info[vf_id].vlan = vlan; + net_resource_mgt->vf_info[vf_id].vlan_proto = ntohs(proto); + net_resource_mgt->vf_info[vf_id].vlan_qos = qos; + } + + return ret; +} + +static int nbl_serv_get_vf_config(struct net_device *dev, int vf_id, struct ifla_vf_info *ivi) +{ + struct nbl_netdev_priv *priv = netdev_priv(dev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; + + if (vf_id >= net_resource_mgt->total_vfs || !net_resource_mgt->vf_info) + return -EINVAL; + + ivi->vf = vf_id; + ivi->spoofchk = vf_info[vf_id].spoof_check; + ivi->linkstate = vf_info[vf_id].state; + ivi->max_tx_rate = vf_info[vf_id].max_tx_rate; + ivi->vlan = vf_info[vf_id].vlan; + ivi->vlan_proto = htons(vf_info[vf_id].vlan_proto); + ivi->qos = vf_info[vf_id].vlan_qos; + ivi->trusted = vf_info[vf_id].trusted; + ether_addr_copy(ivi->mac, vf_info[vf_id].mac); + + return 0; +} + +static int nbl_serv_get_vf_stats(struct net_device *dev, int vf_id, struct ifla_vf_stats *vf_stats) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_vf_stats stats = {0}; + u16 func_id = U16_MAX; + u8 is_vdpa = 0; + int ret = 0; + + func_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); + if (func_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + + ret = disp_ops->check_vf_is_active(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id); + if (!ret) + return 0; + + ret = disp_ops->check_vf_is_vdpa(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id, &is_vdpa); + if (!ret && is_vdpa) + ret = disp_ops->get_vdpa_vf_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, &stats); + else + ret = nbl_serv_chan_get_vf_stats_req(serv_mgt, func_id, &stats); + + if (ret) + return -EIO; + + vf_stats->rx_packets = stats.rx_packets; + vf_stats->tx_packets = stats.tx_packets; + vf_stats->rx_bytes = stats.rx_bytes; + vf_stats->tx_bytes = stats.tx_bytes; + vf_stats->broadcast = stats.broadcast; + vf_stats->multicast = stats.multicast; + vf_stats->rx_dropped = stats.rx_dropped; + vf_stats->tx_dropped = stats.tx_dropped; + + return 0; +} + +static u8 nbl_get_dscp_up(struct nbl_serv_net_resource_mgt *net_resource_mgt, struct sk_buff *skb) +{ + u8 dscp = 0; + + if (skb->protocol == htons(ETH_P_IP)) + dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; + else if (skb->protocol == htons(ETH_P_IPV6)) + dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; + + return net_resource_mgt->qos_info.dscp2prio_map[dscp]; +} + +static u16 +nbl_serv_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + if (net_resource_mgt->qos_info.trust_mode == NBL_TRUST_MODE_DSCP) + skb->priority = nbl_get_dscp_up(net_resource_mgt, skb); + return netdev_pick_tx(netdev, skb, sb_dev); +} + +static void nbl_serv_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + ring_mgt->tx_rings[vsi_info->ring_offset + txqueue].need_recovery = true; + ring_mgt->tx_rings[vsi_info->ring_offset + txqueue].tx_timeout_count++; + + nbl_warn(common, NBL_DEBUG_QUEUE, "TX timeout on queue %d", txqueue); + + nbl_common_queue_work(&NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->tx_timeout, false, false); +} + +static int nbl_serv_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *netdev, u32 filter_mask, int nlflags) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + u16 bmode; + + bmode = net_resource_mgt->bridge_mode; + + return ndo_dflt_bridge_getlink(skb, pid, seq, netdev, bmode, 0, 0, nlflags, + filter_mask, NULL); +} + +static int nbl_serv_bridge_setlink(struct net_device *netdev, struct nlmsghdr *nlh, + u16 flags, struct netlink_ext_ack *extack) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nlattr *attr, *br_spec; + u16 mode; + int ret, rem; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) + return -EINVAL; + + if (mode == net_resource_mgt->bridge_mode) + continue; + + ret = disp_ops->set_bridge_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), mode); + if (ret) { + netdev_info(netdev, "bridge_setlink failed 0x%x", ret); + return ret; + } + + net_resource_mgt->bridge_mode = mode; + } + + return 0; +} + +static int nbl_serv_get_phys_port_name(struct net_device *dev, char *name, size_t len) +{ + struct nbl_common_info *common = NBL_NETDEV_TO_COMMON(dev); + u8 pf_id; + + pf_id = common->eth_id; + if ((NBL_COMMON_TO_ETH_MODE(common) == NBL_TWO_ETHERNET_PORT) && common->eth_id == 2) + pf_id = 1; + + if (snprintf(name, len, "p%u", pf_id) >= len) + return -EOPNOTSUPP; + return 0; +} + +static int nbl_serv_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid) +{ + struct nbl_netdev_priv *priv = netdev_priv(dev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_NETDEV_TO_COMMON(dev); + u8 mac[ETH_ALEN]; + + if (common->devlink_port && common->devlink_port->devlink) + return -EOPNOTSUPP; + + /* return success to avoid linkwatch_do_dev report warnning */ + if (test_bit(NBL_FATAL_ERR, adapter->state)) + return 0; + + disp_ops->get_base_mac_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), mac); + + ppid->id_len = ETH_ALEN; + memcpy(&ppid->id, mac, ppid->id_len); + + return 0; +} + +static int nbl_serv_register_net(void *priv, struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int p4_type, ret = 0; + + ret = disp_ops->register_net(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + register_param, register_result); + if (ret) + return ret; + + p4_type = disp_ops->get_p4_used(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + switch (p4_type) { + case NBL_P4_DEFAULT: + set_bit(NBL_FLAG_P4_DEFAULT, serv_mgt->flags); + break; + default: + nbl_warn(NBL_SERV_MGT_TO_COMMON(serv_mgt), NBL_DEBUG_CUSTOMIZED_P4, + "Unknown P4 type %d", p4_type); + } + + return 0; +} + +static int nbl_serv_unregister_net(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->unregister_net(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_setup_txrx_queues(void *priv, u16 vsi_id, u16 queue_num, u16 net_vector_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_vector *vector; + int i, ret = 0; + + /* queue_num include user&kernel queue */ + ret = disp_ops->alloc_txrx_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, queue_num); + if (ret) + return -EFAULT; + + /* ring_mgt->tx_ring_number only for kernel use */ + for (i = 0; i < ring_mgt->tx_ring_num; i++) { + ring_mgt->tx_rings[i].local_queue_id = NBL_PAIR_ID_GET_TX(i); + ring_mgt->rx_rings[i].local_queue_id = NBL_PAIR_ID_GET_RX(i); + } + + for (i = 0; i < ring_mgt->xdp_ring_offset; i++) { + vector = &ring_mgt->vectors[i]; + vector->local_vector_id = i + net_vector_id; + vector->global_vector_id = + disp_ops->get_global_vector(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, vector->local_vector_id); + vector->irq_enable_base = + disp_ops->get_msix_irq_enable_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vector->global_vector_id, + &vector->irq_data); + + disp_ops->set_vector_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vector->irq_enable_base, + vector->irq_data, i, + ring_mgt->net_msix_mask_en); + } + + return 0; +} + +static void nbl_serv_remove_txrx_queues(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt; + struct nbl_dispatch_ops *disp_ops; + + ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->free_txrx_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static int nbl_serv_init_tx_rate(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 func_id; + int ret = 0; + + if (net_resource_mgt->max_tx_rate) { + func_id = disp_ops->get_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + ret = disp_ops->set_tx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, net_resource_mgt->max_tx_rate, 0); + } + + return ret; +} + +static int nbl_serv_setup_q2vsi(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->setup_q2vsi(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static void nbl_serv_remove_q2vsi(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->remove_q2vsi(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static int nbl_serv_setup_rss(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->setup_rss(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static void nbl_serv_remove_rss(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->remove_rss(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static int nbl_serv_setup_rss_indir(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + u32 rxfh_indir_size = 0; + int num_cpus = 0, real_qps = 0; + u32 *indir = NULL; + int i = 0; + + disp_ops->get_rxfh_indir_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, &rxfh_indir_size); + indir = devm_kcalloc(dev, rxfh_indir_size, sizeof(u32), GFP_KERNEL); + if (!indir) + return -ENOMEM; + + num_cpus = num_online_cpus(); + real_qps = num_cpus > vsi_info->ring_num ? vsi_info->ring_num : num_cpus; + + for (i = 0; i < rxfh_indir_size; i++) + indir[i] = i % real_qps; + + disp_ops->set_rxfh_indir(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, indir, rxfh_indir_size); + devm_kfree(dev, indir); + return 0; +} + +static int nbl_serv_alloc_rings(void *priv, struct net_device *netdev, struct nbl_ring_param *param) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct device *dev; + struct nbl_serv_ring_mgt *ring_mgt; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ring_mgt->tx_ring_num = param->tx_ring_num; + ring_mgt->rx_ring_num = param->rx_ring_num; + ring_mgt->tx_desc_num = param->queue_size; + ring_mgt->rx_desc_num = param->queue_size; + ring_mgt->xdp_ring_offset = param->xdp_ring_offset; + + ret = disp_ops->alloc_rings(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), netdev, param); + if (ret) + goto alloc_rings_fail; + + ret = nbl_serv_set_tx_rings(ring_mgt, netdev, dev); + if (ret) + goto set_tx_fail; + ret = nbl_serv_set_rx_rings(ring_mgt, netdev, dev); + if (ret) + goto set_rx_fail; + + ret = nbl_serv_set_vectors(serv_mgt, netdev, dev); + if (ret) + goto set_vectors_fail; + + ret = nbl_serv_register_xdp_rxq(serv_mgt, ring_mgt); + if (ret) + goto register_xdp_err; + + return 0; + +register_xdp_err: + nbl_serv_remove_vectors(ring_mgt, dev); +set_vectors_fail: + nbl_serv_remove_rx_ring(ring_mgt, dev); +set_rx_fail: + nbl_serv_remove_tx_ring(ring_mgt, dev); +set_tx_fail: + disp_ops->remove_rings(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +alloc_rings_fail: + return ret; +} + +static void nbl_serv_free_rings(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct device *dev; + struct nbl_serv_ring_mgt *ring_mgt; + struct nbl_dispatch_ops *disp_ops; + + dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + nbl_serv_unregister_xdp_rxq(serv_mgt, ring_mgt); + nbl_serv_remove_vectors(ring_mgt, dev); + nbl_serv_remove_rx_ring(ring_mgt, dev); + nbl_serv_remove_tx_ring(ring_mgt, dev); + + disp_ops->remove_rings(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_enable_napis(void *priv, u16 vsi_index) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[vsi_index]; + u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; + int i; + + for (i = start; i < end; i++) + napi_enable(&ring_mgt->vectors[i].nbl_napi->napi); + + return 0; +} + +static void nbl_serv_disable_napis(void *priv, u16 vsi_index) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[vsi_index]; + u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; + int i; + + for (i = start; i < end; i++) + napi_disable(&ring_mgt->vectors[i].nbl_napi->napi); +} + +static void nbl_serv_set_mask_en(void *priv, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt; + + ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + + ring_mgt->net_msix_mask_en = enable; +} + +static int nbl_serv_start_net_flow(void *priv, struct net_device *netdev, u16 vsi_id, u16 vid, + bool trusted) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_vlan_node *vlan_node; + u8 mac[ETH_ALEN]; + int ret = 0; + + flow_mgt->unicast_flow_enable = true; + flow_mgt->multicast_flow_enable = true; + /* Clear cfgs, in case this function exited abnormaly last time */ + disp_ops->clear_accel_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + disp_ops->clear_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + disp_ops->set_mtu(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), netdev->mtu); + if (!common->is_vf) + disp_ops->config_fd_flow_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_CHAN_FDIR_RULE_NORMAL, vsi_id, 1); + + if (!list_empty(&flow_mgt->vlan_list)) + return -ECONNRESET; + + vlan_node = nbl_serv_alloc_vlan_node(); + if (!vlan_node) + goto alloc_fail; + + flow_mgt->vid = vid; + flow_mgt->trusted_en = trusted; + vlan_node->vid = vid; + ether_addr_copy(flow_mgt->mac, netdev->dev_addr); + ret = nbl_serv_update_vlan_node_effective(serv_mgt, vlan_node, 1, vsi_id); + if (ret) + goto add_macvlan_fail; + + list_add(&vlan_node->node, &flow_mgt->vlan_list); + flow_mgt->vlan_list_cnt++; + + memset(mac, 0xFF, ETH_ALEN); + ret = nbl_serv_add_submac_node(serv_mgt, mac, vsi_id, 0); + if (ret) + goto add_submac_failed; + + return 0; + +add_submac_failed: + nbl_serv_update_vlan_node_effective(serv_mgt, vlan_node, 0, vsi_id); +add_macvlan_fail: + nbl_serv_free_vlan_node(vlan_node); +alloc_fail: + return ret; +} + +static void nbl_serv_stop_net_flow(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct net_device *dev = net_resource_mgt->netdev; + struct nbl_netdev_priv *net_priv = netdev_priv(dev); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + + nbl_serv_del_all_submacs(serv_mgt, net_priv->data_vsi); + nbl_serv_del_all_vlans(serv_mgt); + + if (!common->is_vf) + disp_ops->config_fd_flow_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_CHAN_FDIR_RULE_NORMAL, vsi_id, 0); + + disp_ops->del_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + + disp_ops->set_vf_spoof_check(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, -1, false); + memset(flow_mgt->mac, 0, sizeof(flow_mgt->mac)); +} + +static void nbl_serv_clear_flow(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->clear_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static int nbl_serv_set_promisc_mode(void *priv, u16 vsi_id, u16 mode) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->set_promisc_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, mode); +} + +static int nbl_serv_cfg_multi_mcast(void *priv, u16 vsi_id, u16 enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->cfg_multi_mcast(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, enable); +} + +static int nbl_serv_set_lldp_flow(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->add_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static void nbl_serv_remove_lldp_flow(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->del_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static int nbl_serv_start_mgt_flow(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->setup_multi_group(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_stop_mgt_flow(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->remove_multi_group(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static u32 nbl_serv_get_tx_headroom(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_tx_headroom(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +/** + * This ops get flexible product capability from ctrl device, if the device has not manager cap, it + * need get capability from ctr device by channel + */ +static bool nbl_serv_get_product_flex_cap(void *priv, enum nbl_flex_cap_type cap_type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_product_flex_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + cap_type); +} + +/** + * This ops get fix product capability from resource layer, this capability fix by product_type, no + * need get from ctrl device + */ +static bool nbl_serv_get_product_fix_cap(void *priv, enum nbl_fix_cap_type cap_type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + cap_type); +} + +static int nbl_serv_init_chip(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + struct device *dev; + int ret = 0; + + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + dev = NBL_COMMON_TO_DEV(common); + + ret = disp_ops->init_chip_module(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) { + dev_err(dev, "init_chip_module failed\n"); + goto module_init_fail; + } + + ret = disp_ops->queue_init(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) { + dev_err(dev, "queue_init failed\n"); + goto queue_init_fail; + } + + ret = disp_ops->vsi_init(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) { + dev_err(dev, "vsi_init failed\n"); + goto vsi_init_fail; + } + + return 0; + +vsi_init_fail: +queue_init_fail: +module_init_fail: + return ret; +} + +static int nbl_serv_destroy_chip(void *p) +{ + return 0; +} + +static int nbl_serv_configure_msix_map(void *priv, u16 num_net_msix, u16 num_others_msix, + bool net_msix_mask_en) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->configure_msix_map(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), num_net_msix, + num_others_msix, net_msix_mask_en); + if (ret) + return -EIO; + + return 0; +} + +static int nbl_serv_destroy_msix_map(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->destroy_msix_map(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + return -EIO; + + return 0; +} + +static int nbl_serv_enable_mailbox_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->enable_mailbox_irq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vector_id, enable_msix); + if (ret) + return -EIO; + + return 0; +} + +static int nbl_serv_enable_abnormal_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->enable_abnormal_irq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vector_id, enable_msix); + if (ret) + return -EIO; + + return 0; +} + +static irqreturn_t nbl_serv_clean_rings(int __always_unused irq, void *data) +{ + struct nbl_serv_vector *vector = (struct nbl_serv_vector *)data; + + napi_schedule_irqoff(&vector->nbl_napi->napi); + + return IRQ_HANDLED; +} + +static int nbl_serv_request_net_irq(void *priv, struct nbl_msix_info_param *msix_info) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_serv_ring *tx_ring, *rx_ring; + struct nbl_serv_vector *vector; + u32 irq_num; + int i, ret = 0; + + for (i = 0; i < ring_mgt->xdp_ring_offset; i++) { + tx_ring = &ring_mgt->tx_rings[i]; + rx_ring = &ring_mgt->rx_rings[i]; + vector = &ring_mgt->vectors[i]; + vector->tx_ring = tx_ring; + vector->rx_ring = rx_ring; + + irq_num = msix_info->msix_entries[i].vector; + snprintf(vector->name, sizeof(vector->name), "nbl_txrx%d@pci:%s", + i, pci_name(NBL_COMMON_TO_PDEV(common))); + ret = devm_request_irq(dev, irq_num, nbl_serv_clean_rings, 0, + vector->name, vector); + if (ret) { + nbl_err(common, NBL_DEBUG_INTR, "TxRx Queue %u requests MSIX irq failed " + "with error %d", i, ret); + goto request_irq_err; + } + if (!cpumask_empty(&vector->cpumask)) + irq_set_affinity_hint(irq_num, &vector->cpumask); + } + + net_resource_mgt->num_net_msix = msix_info->msix_num; + + return 0; + +request_irq_err: + while (--i + 1) { + vector = &ring_mgt->vectors[i]; + + irq_num = msix_info->msix_entries[i].vector; + irq_set_affinity_hint(irq_num, NULL); + devm_free_irq(dev, irq_num, vector); + } + return ret; +} + +static void nbl_serv_free_net_irq(void *priv, struct nbl_msix_info_param *msix_info) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_serv_vector *vector; + u32 irq_num; + int i; + + for (i = 0; i < ring_mgt->xdp_ring_offset; i++) { + vector = &ring_mgt->vectors[i]; + + irq_num = msix_info->msix_entries[i].vector; + irq_set_affinity_hint(irq_num, NULL); + devm_free_irq(dev, irq_num, vector); + } +} + +static u16 nbl_serv_get_global_vector(void *priv, u16 local_vector_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_global_vector(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), local_vector_id); +} + +static u16 nbl_serv_get_msix_entry_id(void *priv, u16 local_vector_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_msix_entry_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), local_vector_id); +} + +static u16 nbl_serv_get_vsi_id(void *priv, u16 func_id, u16 type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id, type); +} + +static void nbl_serv_get_eth_id(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id, u8 *logic_eth_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_eth_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, + eth_mode, eth_id, logic_eth_id); +} + +void nbl_serv_get_rep_drop_stats(struct nbl_service_mgt *serv_mgt, u16 rep_vsi_id, + struct nbl_rep_stats *rep_stats) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_serv_rep_drop *rep_drop; + u16 rep_data_index; + unsigned int start; + + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + rep_data_index = disp_ops->get_rep_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), rep_vsi_id); + if (rep_data_index >= net_resource_mgt->num_vfs) + return; + + rep_drop = &net_resource_mgt->rep_drop[rep_data_index]; + do { + start = u64_stats_fetch_begin(&rep_drop->rep_drop_syncp); + rep_stats->dropped = rep_drop->tx_dropped; + } while (u64_stats_fetch_retry(&rep_drop->rep_drop_syncp, start)); +} + +static void nbl_serv_rep_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct nbl_netdev_priv *rep_priv = netdev_priv(netdev); + struct nbl_rep_stats rep_stats = { 0 }; + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + adapter = NBL_NETDEV_TO_ADAPTER(netdev); + if (!adapter) { + netdev_err(netdev, "rep get stats, adapter is null\n"); + return; + } + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + if (!stats) { + netdev_err(netdev, "rep get stats, stats is null\n"); + return; + } + + disp_ops->get_rep_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + rep_priv->rep->rep_vsi_id, &rep_stats, true); + stats->tx_packets += rep_stats.packets; + stats->tx_bytes += rep_stats.bytes; + + disp_ops->get_rep_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + rep_priv->rep->rep_vsi_id, &rep_stats, false); + stats->rx_packets += rep_stats.packets; + stats->rx_bytes += rep_stats.bytes; + + nbl_serv_get_rep_drop_stats(serv_mgt, rep_priv->rep->rep_vsi_id, &rep_stats); + stats->tx_dropped += rep_stats.dropped; + stats->rx_dropped = 0; + stats->multicast = 0; + stats->rx_errors = 0; + stats->tx_errors = 0; + stats->rx_length_errors = 0; + stats->rx_crc_errors = 0; + stats->rx_frame_errors = 0; +} + +static void nbl_serv_rep_set_rx_mode(struct net_device *dev) +{ +} + +static int nbl_serv_rep_set_mac(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) { + netdev_err(dev, "Temp to change a invalid mac address %pM\n", addr->sa_data); + return -EADDRNOTAVAIL; + } + + if (ether_addr_equal(dev->dev_addr, addr->sa_data)) + return 0; + + return -EOPNOTSUPP; +} + +static int nbl_serv_rep_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + if (vid == NBL_DEFAULT_VLAN_ID) + return 0; + + return -EAGAIN; +} + +static int nbl_serv_rep_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + if (vid == NBL_DEFAULT_VLAN_ID) + return 0; + + return -EAGAIN; +} + +static LIST_HEAD(nbl_serv_rep_block_cb_list); + +static int nbl_serv_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) +{ + struct nbl_netdev_priv *priv = netdev_priv(dev); + + switch (type) { + case TC_SETUP_BLOCK: { + return flow_block_cb_setup_simple((struct flow_block_offload *)type_data, + &nbl_serv_rep_block_cb_list, + nbl_serv_setup_tc_block_cb, + priv, priv, true); + } + default: + return -EOPNOTSUPP; + } +} + +static int nbl_serv_rep_get_phys_port_name(struct net_device *dev, char *name, size_t len) +{ + struct nbl_netdev_priv *priv = netdev_priv(dev); + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 vf_base_vsi_id; + u16 vf_id; + u8 pf_id; + + pf_id = common->eth_id; + if ((NBL_COMMON_TO_ETH_MODE(common) == NBL_TWO_ETHERNET_PORT) && common->eth_id == 2) + pf_id = 1; + + vf_base_vsi_id = disp_ops->get_vf_base_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_MGT_PF(common)); + vf_id = priv->rep->rep_vsi_id - vf_base_vsi_id; + if (snprintf(name, len, "pf%uvf%u", pf_id, vf_id) >= len) + return -EINVAL; + return 0; +} + +static int nbl_serv_rep_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid) +{ + struct nbl_netdev_priv *priv = netdev_priv(dev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u8 mac[ETH_ALEN]; + + disp_ops->get_base_mac_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), mac); + + ppid->id_len = ETH_ALEN; + memcpy(&ppid->id, mac, ppid->id_len); + + return 0; +} + +static struct nbl_indr_dev_priv *nbl_find_indr_dev_priv(void *priv, struct net_device *netdev, + int binder_type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_indr_dev_priv *indr_priv; + + if (!netdev) + return NULL; + + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + list_for_each_entry(indr_priv, &net_resource_mgt->indr_dev_priv_list, list) + if (indr_priv->indr_dev == netdev && indr_priv->binder_type == binder_type) + return indr_priv; + + return NULL; +} + +static void nbl_serv_indr_dev_block_unbind(void *priv) +{ + struct nbl_indr_dev_priv *indr_priv = priv; + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(indr_priv->dev_priv); + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + + list_del(&indr_priv->list); + devm_kfree(dev, indr_priv); +} + +static LIST_HEAD(nbl_serv_indr_block_cb_list); + +static int nbl_serv_indr_dev_setup_block(struct net_device *netdev, struct Qdisc *sch, + struct nbl_netdev_priv *dev_priv, + struct flow_block_offload *flow_bo, + flow_setup_cb_t *setup_cb, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) +{ + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(dev_priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_indr_dev_priv *indr_priv = NULL; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NULL; + struct flow_block_cb *block_cb = NULL; + + if (flow_bo->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && + (flow_bo->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && + !netif_is_ovs_master(netdev))) + return -EOPNOTSUPP; + + flow_bo->unlocked_driver_cb = true; + flow_bo->driver_block_list = &nbl_serv_indr_block_cb_list; + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + switch (flow_bo->command) { + case FLOW_BLOCK_BIND: + indr_priv = nbl_find_indr_dev_priv(serv_mgt, netdev, flow_bo->binder_type); + if (indr_priv) + return -EEXIST; + + indr_priv = devm_kzalloc(dev, sizeof(struct nbl_indr_dev_priv), GFP_KERNEL); + if (!indr_priv) + return -ENOMEM; + + indr_priv->indr_dev = netdev; + indr_priv->dev_priv = dev_priv; + indr_priv->binder_type = flow_bo->binder_type; + list_add_tail(&indr_priv->list, &net_resource_mgt->indr_dev_priv_list); + + block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv, + nbl_serv_indr_dev_block_unbind, flow_bo, + netdev, sch, data, dev_priv, cleanup); + if (IS_ERR(block_cb)) { + netdev_err(netdev, "indr block cb alloc fail\n"); + list_del(&indr_priv->list); + devm_kfree(dev, indr_priv); + return PTR_ERR(block_cb); + } + flow_block_cb_add(block_cb, flow_bo); + list_add_tail(&block_cb->driver_list, &nbl_serv_indr_block_cb_list); + break; + case FLOW_BLOCK_UNBIND: + indr_priv = nbl_find_indr_dev_priv(serv_mgt, netdev, flow_bo->binder_type); + if (!indr_priv) + return -ENOENT; + + block_cb = flow_block_cb_lookup(flow_bo->block, setup_cb, indr_priv); + if (!block_cb) + return -ENOENT; + flow_indr_block_cb_remove(block_cb, flow_bo); + list_del(&block_cb->driver_list); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int nbl_serv_indr_dev_setup_tc(struct net_device *dev, struct Qdisc *sch, void *cb_priv, + enum tc_setup_type type, void *type_data, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) +{ + struct nbl_netdev_priv *priv = cb_priv; + + switch (type) { + case TC_SETUP_BLOCK: + return nbl_serv_indr_dev_setup_block(dev, sch, priv, type_data, + nbl_serv_indr_setup_tc_block_cb, + data, cleanup); + default: + return -EOPNOTSUPP; + } +} + +static void nbl_serv_get_rep_feature(void *priv, struct nbl_register_net_result *register_result) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_rep_feature(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), register_result); +} + +static void nbl_serv_get_rep_queue_num(void *priv, u8 *base_queue_id, u8 *rep_queue_num) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + + *base_queue_id = (u8)ring_mgt->vsi_info[NBL_VSI_CTRL].ring_offset; + *rep_queue_num = (u8)ring_mgt->vsi_info[NBL_VSI_CTRL].ring_num; +} + +static void nbl_serv_get_rep_queue_info(void *priv, u16 *queue_num, u16 *queue_size) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_rep_queue_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + queue_num, queue_size); +} + +static void nbl_serv_get_user_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_user_queue_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + queue_num, queue_size, vsi_id); +} + +static int nbl_serv_rep_enqueue(struct sk_buff *skb, + struct nbl_serv_rep_queue_mgt *rep_queue_mgt) +{ + if (rep_queue_mgt->size == 0) + return -EINVAL; + + return ptr_ring_produce(&rep_queue_mgt->ring, skb); +} + +static struct sk_buff *nbl_serv_rep_dequeue(struct nbl_serv_rep_queue_mgt *rep_queue_mgt) +{ + struct sk_buff *skb; + + if (rep_queue_mgt->size == 0) + return NULL; + + if (__ptr_ring_empty(&rep_queue_mgt->ring)) + skb = NULL; + else + skb = __ptr_ring_consume(&rep_queue_mgt->ring); + + if (unlikely(!skb)) { + /* memory barrier */ + smp_mb__after_atomic(); + if (!__ptr_ring_empty(&rep_queue_mgt->ring)) + skb = __ptr_ring_consume(&rep_queue_mgt->ring); + } + + return skb; +} + +static inline bool nbl_serv_rep_queue_mgt_start(struct nbl_serv_rep_queue_mgt *rep_queue_mgt) +{ + return spin_trylock(&rep_queue_mgt->seq_lock); +} + +static inline void nbl_serv_rep_queue_mgt_end(struct nbl_serv_rep_queue_mgt *rep_queue_mgt) +{ + spin_unlock(&rep_queue_mgt->seq_lock); +} + +static void nbl_serv_rep_update_drop_stats(void *priv, struct sk_buff *skb) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt; + u16 rep_vsi_id; + u16 rep_data_index; + + rep_vsi_id = *(u16 *)&skb->cb[NBL_SKB_FILL_VSI_ID_OFF]; + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + rep_data_index = disp_ops->get_rep_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), rep_vsi_id); + dev_kfree_skb_any(skb); + if (rep_data_index >= net_resource_mgt->num_vfs) + return; + + u64_stats_update_begin(&net_resource_mgt->rep_drop[rep_data_index].rep_drop_syncp); + net_resource_mgt->rep_drop[rep_data_index].tx_dropped++; + u64_stats_update_end(&net_resource_mgt->rep_drop[rep_data_index].rep_drop_syncp); +} + +static void nbl_serv_rep_queue_mgt_run(struct nbl_serv_rep_queue_mgt *rep_queue_mgt, + struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_resource_pt_ops *pt_ops = NBL_ADAPTER_TO_RES_PT_OPS(adapter); + struct sk_buff *skb; + netdev_tx_t ret = NETDEV_TX_OK; + int i = 0; + + skb = nbl_serv_rep_dequeue(rep_queue_mgt); + if (!skb) + return; + for (; skb; skb = nbl_serv_rep_dequeue(rep_queue_mgt)) { + ret = pt_ops->rep_xmit(skb, rep_queue_mgt->netdev); + if (ret == NETDEV_TX_BUSY) { + if (net_ratelimit()) + netdev_dbg(netdev, "dequeue skb tx busy!\n"); + /* never hang in sirq too long, so if a tx_busy is returned, drop it */ + nbl_serv_rep_update_drop_stats(serv_mgt, skb); + } + if (i++ >= NBL_DEFAULT_REP_TX_MAX_NUM) + return; + } +} + +static netdev_tx_t nbl_serv_rep_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct nbl_netdev_priv *rep_priv = netdev_priv(netdev); + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_rep_queue_mgt *rep_queue_mgt; + int ret; + u8 rep_queue_idx; + u8 i; + bool has_locked_flag = false; + + adapter = NBL_NETDEV_TO_ADAPTER(netdev); + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + + rep_queue_idx = (rep_priv->rep->rep_vsi_id - 1) % rep_priv->rep->rep_queue_num; + rep_queue_mgt = &serv_mgt->rep_queue_mgt[rep_queue_idx]; + skb->queue_mapping = rep_queue_idx + rep_priv->rep->base_queue_id; + *(u16 *)(&skb->cb[NBL_SKB_FILL_VSI_ID_OFF]) = rep_priv->rep->rep_vsi_id; + skb->cb[NBL_SKB_FILL_EXT_HDR_OFF] = NBL_REP_FILL_EXT_HDR; + ret = nbl_serv_rep_enqueue(skb, rep_queue_mgt); + + if (unlikely(ret)) { + if (net_ratelimit()) + netdev_info(netdev, "rep enqueue fail, size:%d, rep_vsi_id:%d!!\n", + rep_queue_mgt->size, rep_priv->rep->rep_vsi_id); + } + for (i = 0; i < NBL_DEFAULT_REP_TX_RETRY_NUM; i++) { + if (nbl_serv_rep_queue_mgt_start(rep_queue_mgt)) { + has_locked_flag = true; + nbl_serv_rep_queue_mgt_run(rep_queue_mgt, netdev); + nbl_serv_rep_queue_mgt_end(rep_queue_mgt); + } + } + + if (has_locked_flag) { + if (ret) + ret = NET_XMIT_CN; + else + ret = NET_XMIT_SUCCESS; + } + + if (likely(ret)) { + /* enqueue failed but get lock succ, need a retry */ + if (ret == NET_XMIT_CN) { + return NETDEV_TX_BUSY; + } else if (ret == NET_XMIT_SUCCESS) { + /* enqueue succ and get lock succ, rep_xmit regard as a ok */ + return NETDEV_TX_OK; + } + /* enqueue and get lock failed, free skb and no need a retry */ + nbl_serv_rep_update_drop_stats(serv_mgt, skb); + return NETDEV_TX_OK; + } + + /* enqueue succ but get lock failed, rep_xmit regard as a ok */ + return NETDEV_TX_OK; +} + +static int nbl_serv_alloc_rep_queue_mgt(void *priv, struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct device *dev; + int i, ret; + u8 base_queue_id; + u8 rep_queue_num; + + if (!serv_mgt) + return -EINVAL; + + dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + + dev_info(dev, "nbl serv alloc rep queue mgt start\n"); + nbl_serv_get_rep_queue_num(serv_mgt, &base_queue_id, &rep_queue_num); + serv_mgt->rep_queue_mgt = devm_kcalloc(dev, rep_queue_num, + sizeof(struct nbl_serv_rep_queue_mgt), GFP_KERNEL); + if (!serv_mgt->rep_queue_mgt) + return -ENOMEM; + for (i = 0; i < rep_queue_num; i++) { + ret = ptr_ring_init(&serv_mgt->rep_queue_mgt[i].ring, + NBL_REP_QUEUE_MGT_DESC_NUM, GFP_KERNEL); + if (ret) { + dev_err(dev, "ptr ring init failed\n"); + goto free_ptr_ring; + } + + spin_lock_init(&serv_mgt->rep_queue_mgt[i].seq_lock); + serv_mgt->rep_queue_mgt[i].size = NBL_REP_QUEUE_MGT_DESC_NUM; + serv_mgt->rep_queue_mgt[i].netdev = netdev; + dev_info(dev, "rep_queue_mgt init success\n"); + } + dev_info(dev, "nbl serv alloc rep queue mgt end\n"); + + return 0; + +free_ptr_ring: + for (; i >= 0; i--) + ptr_ring_cleanup(&serv_mgt->rep_queue_mgt[i].ring, 0); + + devm_kfree(dev, serv_mgt->rep_queue_mgt); + serv_mgt->rep_queue_mgt = NULL; + return -ENOMEM; +} + +static int nbl_serv_free_rep_queue_mgt(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct device *dev; + int i; + u8 base_queue_id; + u8 rep_queue_num; + + if (!serv_mgt) + return -EINVAL; + + dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + if (!serv_mgt->rep_queue_mgt) + return -EINVAL; + + nbl_serv_get_rep_queue_num(serv_mgt, &base_queue_id, &rep_queue_num); + for (i = 0; i < rep_queue_num; i++) + ptr_ring_cleanup(&serv_mgt->rep_queue_mgt[i].ring, 0); + + dev_info(dev, "ptr ring cleanup\n"); + devm_kfree(dev, serv_mgt->rep_queue_mgt); + serv_mgt->rep_queue_mgt = NULL; + + return 0; +} + +static void nbl_serv_set_eswitch_mode(void *priv, u16 eswitch_mode) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + if (eswitch_mode == NBL_ESWITCH_OFFLOADS) { + disp_ops->set_dport_fc_th_vld(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, false); + if (net_resource_mgt->lag_info && net_resource_mgt->lag_info->lag_num > 1) + disp_ops->set_shaping_dport_vld(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, false); + } else { + disp_ops->set_dport_fc_th_vld(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, true); + if (net_resource_mgt->lag_info && net_resource_mgt->lag_info->lag_num > 1) + disp_ops->set_shaping_dport_vld(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, true); + } + disp_ops->set_eswitch_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eswitch_mode); +} + +static u16 nbl_serv_get_eswitch_mode(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_eswitch_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_alloc_rep_data(void *priv, int num_vfs, u16 vf_base_vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_dispatch_ops *disp_ops; + struct device *dev; + + if (!serv_mgt) + return -EINVAL; + + dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + net_resource_mgt->rep_drop = devm_kcalloc(dev, num_vfs, + sizeof(struct nbl_serv_rep_drop), + GFP_KERNEL); + if (!net_resource_mgt->rep_drop) + return -ENOMEM; + + net_resource_mgt->num_vfs = num_vfs; + return disp_ops->alloc_rep_data(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), num_vfs, + vf_base_vsi_id); +} + +static void nbl_serv_free_rep_data(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_dispatch_ops *disp_ops; + struct device *dev; + + if (!serv_mgt) + return; + + dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + if (net_resource_mgt->rep_drop) + devm_kfree(dev, net_resource_mgt->rep_drop); + disp_ops->free_rep_data(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_set_rep_netdev_info(void *priv, void *rep_data) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->set_rep_netdev_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), rep_data); +} + +static void nbl_serv_unset_rep_netdev_info(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->unset_rep_netdev_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_disable_phy_flow(void *priv, u8 eth_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->disable_phy_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id); +} + +static int nbl_serv_enable_phy_flow(void *priv, u8 eth_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->enable_phy_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id); +} + +static void nbl_serv_init_acl(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->init_acl(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_uninit_acl(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->uninit_acl(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_set_upcall_rule(void *priv, u8 eth_id, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->add_nd_upcall_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, 0); + + return disp_ops->set_upcall_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, vsi_id); +} + +static int nbl_serv_unset_upcall_rule(void *priv, u8 eth_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->del_nd_upcall_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + + return disp_ops->unset_upcall_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id); +} + +static int nbl_serv_switchdev_init_cmdq(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + return disp_ops->switchdev_init_cmdq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_switchdev_deinit_cmdq(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + return disp_ops->switchdev_deinit_cmdq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_set_tc_flow_info(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->set_tc_flow_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_unset_tc_flow_info(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + return disp_ops->unset_tc_flow_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_get_tc_flow_info(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_tc_flow_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_register_indr_dev_tc_offload(void *priv, struct net_device *netdev) +{ + struct nbl_netdev_priv *dev_priv = netdev_priv(netdev); + + return flow_indr_dev_register(nbl_serv_indr_dev_setup_tc, dev_priv); +} + +static void nbl_serv_unregister_indr_dev_tc_offload(void *priv, struct net_device *netdev) +{ + struct nbl_netdev_priv *dev_priv = netdev_priv(netdev); + + flow_indr_dev_unregister(nbl_serv_indr_dev_setup_tc, dev_priv, + nbl_serv_indr_dev_block_unbind); +} + +static void nbl_serv_set_lag_info(void *priv, struct net_device *bond_netdev, u8 lag_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + + net_resource_mgt->lag_info = devm_kzalloc(dev, sizeof(struct nbl_serv_lag_info), + GFP_KERNEL); + if (!net_resource_mgt->lag_info) + return; + net_resource_mgt->lag_info->bond_netdev = bond_netdev; + net_resource_mgt->lag_info->lag_id = lag_id; + + dev_info(dev, "set lag info, bond_netdev:%p, lag_id:%d\n", bond_netdev, lag_id); +} + +static void nbl_serv_unset_lag_info(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + + if (net_resource_mgt->lag_info) { + devm_kfree(dev, net_resource_mgt->lag_info); + net_resource_mgt->lag_info = NULL; + } +} + +static void nbl_serv_set_netdev_ops(void *priv, struct net_device_ops *net_device_ops, bool is_pf) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + + dev_info(dev, "set netdev ops:%p is_pf:%d\n", net_device_ops, is_pf); + if (is_pf) + net_resource_mgt->netdev_ops.pf_netdev_ops = net_device_ops; + else + net_resource_mgt->netdev_ops.rep_netdev_ops = net_device_ops; +} + +static int nbl_serv_enable_lag_protocol(void *priv, u16 eth_id, bool lag_en) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct net_device *dev = net_resource_mgt->netdev; + struct nbl_netdev_priv *net_priv = netdev_priv(dev); + int ret = 0; + + ret = disp_ops->enable_lag_protocol(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, lag_en); + if (lag_en) + ret = disp_ops->add_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + net_priv->data_vsi); + else + disp_ops->del_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->data_vsi); + + return ret; +} + +static int nbl_serv_cfg_lag_hash_algorithm(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->cfg_lag_hash_algorithm(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, lag_id, hash_type); +} + +static int nbl_serv_cfg_lag_member_fwd(void *priv, u16 eth_id, u16 lag_id, u8 fwd) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + if (net_resource_mgt->lag_info) + net_resource_mgt->lag_info->lag_id = lag_id; + + return disp_ops->cfg_lag_member_fwd(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, lag_id, fwd); +} + +static int nbl_serv_cfg_lag_member_list(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret = 0; + u16 cur_eswitch_mode = NBL_ESWITCH_NONE; + bool shaping_vld = true; + + ret = disp_ops->cfg_lag_member_list(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param); + if (ret) + return ret; + + ret = disp_ops->cfg_duppkt_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param); + if (ret) + return ret; + + if (net_resource_mgt->lag_info) + net_resource_mgt->lag_info->lag_num = param->lag_num; + + cur_eswitch_mode = disp_ops->get_eswitch_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (cur_eswitch_mode == NBL_ESWITCH_OFFLOADS) { + shaping_vld = param->lag_num > 1 ? false : true; + disp_ops->set_shaping_dport_vld(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, shaping_vld); + } + + ret = disp_ops->cfg_eth_bond_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param); + if (ret) + return ret; + + ret = disp_ops->cfg_duppkt_mcc(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param); + + return ret; +} + +static int nbl_serv_cfg_lag_member_up_attr(void *priv, u16 eth_id, u16 lag_id, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->cfg_lag_member_up_attr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, lag_id, enable); +} + +static void nbl_serv_net_stats_update_task(struct work_struct *work) +{ + struct nbl_serv_net_resource_mgt *serv_net_resource_mgt = + container_of(work, struct nbl_serv_net_resource_mgt, net_stats_update); + struct nbl_service_mgt *serv_mgt; + + serv_mgt = serv_net_resource_mgt->serv_mgt; + + nbl_serv_update_stats(serv_mgt, false); +} + +static void nbl_serv_rx_mode_async_task(struct work_struct *work) +{ + struct nbl_serv_net_resource_mgt *serv_net_resource_mgt = + container_of(work, struct nbl_serv_net_resource_mgt, rx_mode_async); + + nbl_modify_submacs(serv_net_resource_mgt); + nbl_modify_promisc_mode(serv_net_resource_mgt); +} + +static void nbl_serv_net_task_service_timer(struct timer_list *t) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = + from_timer(net_resource_mgt, t, serv_timer); + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + + mod_timer(&net_resource_mgt->serv_timer, + round_jiffies(net_resource_mgt->serv_timer_period + jiffies)); + nbl_common_queue_work(&net_resource_mgt->net_stats_update, false, false); + if (flow_mgt->pending_async_work) { + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); + flow_mgt->pending_async_work = 0; + } +} + +static void nbl_serv_setup_flow_mgt(struct nbl_serv_flow_mgt *flow_mgt) +{ + int i = 0; + + INIT_LIST_HEAD(&flow_mgt->vlan_list); + for (i = 0; i < NBL_SUBMAC_MAX; i++) + INIT_LIST_HEAD(&flow_mgt->submac_list[i]); +} + +static void nbl_serv_register_restore_netdev_queue(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_STOP_ABNORMAL_SW_QUEUE, + nbl_serv_chan_stop_abnormal_sw_queue_resp, serv_mgt); + + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_RESTORE_NETDEV_QUEUE, + nbl_serv_chan_restore_netdev_queue_resp, serv_mgt); + + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_RESTART_NETDEV_QUEUE, + nbl_serv_chan_restart_netdev_queue_resp, serv_mgt); +} + +static void nbl_serv_set_wake(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt; + u8 eth_id = NBL_COMMON_TO_ETH_ID(common); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + if (!common->is_vf && common->is_ocp) + disp_ops->set_wol(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, common->wol_ena); +} + +static void nbl_serv_remove_net_resource_mgt(void *priv) +{ + struct device *dev; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + dev = NBL_COMMON_TO_DEV(common); + + if (net_resource_mgt) { + if (common->is_vf) { + nbl_serv_unregister_link_forced_notify(serv_mgt); + nbl_serv_unregister_vlan_notify(serv_mgt); + nbl_serv_unregister_get_vf_stats(serv_mgt); + nbl_serv_unregister_trust_notify(serv_mgt); + nbl_serv_unregister_mirror_outputport_notify(serv_mgt); + } + nbl_serv_set_wake(serv_mgt); + del_timer_sync(&net_resource_mgt->serv_timer); + nbl_common_release_task(&net_resource_mgt->rx_mode_async); + nbl_common_release_task(&net_resource_mgt->net_stats_update); + nbl_common_release_task(&net_resource_mgt->tx_timeout); + if (common->is_vf) { + nbl_common_release_task(&net_resource_mgt->update_link_state); + nbl_common_release_task(&net_resource_mgt->update_vlan); + nbl_common_release_task(&net_resource_mgt->update_mirror_outputport); + } + devm_kfree(dev, net_resource_mgt); + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt) = NULL; + } +} + +static int nbl_serv_phy_init(struct nbl_serv_net_resource_mgt *net_resource_mgt) +{ + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u8 eth_id = NBL_COMMON_TO_ETH_ID(common); + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_phy_caps(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, &net_resource_mgt->phy_caps); + + /* disable wol when driver init */ + if (!common->is_vf && common->is_ocp) + ret = disp_ops->set_wol(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, false); + + return ret; +} + +static void nbl_init_qos_config(struct nbl_serv_net_resource_mgt *net_resource_mgt) +{ + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_dispatch_ops *disp_ops; + int i; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + if (common->is_vf) + return; + + qos_info->rdma_bw = NBL_MAX_BW >> 1; + qos_info->rdma_rate = NBL_COMMON_TO_ETH_MAX_SPEED(common); + qos_info->net_rate = NBL_COMMON_TO_ETH_MAX_SPEED(common); + qos_info->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE; + for (i = 0; i < NBL_DSCP_MAX; i++) + qos_info->dscp2prio_map[i] = i / NBL_MAX_PFC_PRIORITIES; + + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) + disp_ops->get_pfc_buffer_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(common), i, + &qos_info->buffer_sizes[i][0], + &qos_info->buffer_sizes[i][1]); + + disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), NBL_COMMON_TO_ETH_ID(common), + qos_info->pfc, qos_info->trust_mode, qos_info->dscp2prio_map); +} + +static int nbl_serv_init_hw_stats(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + u8 eth_id = NBL_COMMON_TO_ETH_ID(common); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + struct nbl_ustore_stats ustore_stats = {0}; + int ret = 0; + + net_resource_mgt->hw_stats.total_uvn_stat_pkt_drop = + devm_kcalloc(dev, vsi_info->ring_num, sizeof(u64), GFP_KERNEL); + if (!net_resource_mgt->hw_stats.total_uvn_stat_pkt_drop) { + ret = -ENOMEM; + goto alloc_total_uvn_stat_pkt_drop_fail; + } + + if (!common->is_vf) { + ret = disp_ops->get_ustore_total_pkt_drop_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, &ustore_stats); + if (ret) + goto get_ustore_total_pkt_drop_stats_fail; + net_resource_mgt->hw_stats.start_ustore_stats.rx_drop_packets = + ustore_stats.rx_drop_packets; + net_resource_mgt->hw_stats.start_ustore_stats.rx_trun_packets = + ustore_stats.rx_trun_packets; + } + + return 0; + +get_ustore_total_pkt_drop_stats_fail: + devm_kfree(dev, net_resource_mgt->hw_stats.total_uvn_stat_pkt_drop); +alloc_total_uvn_stat_pkt_drop_fail: + return ret; +} + +static int nbl_serv_remove_hw_stats(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + + devm_kfree(dev, net_resource_mgt->hw_stats.total_uvn_stat_pkt_drop); + return 0; +} + +static int nbl_serv_get_rx_dropped(void *priv, u64 *rx_dropped) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_ustore_stats ustore_stats = {0}; + u8 eth_id = NBL_COMMON_TO_ETH_ID(common); + int i = 0; + + for (i = 0; i < vsi_info->active_ring_num; i++) + *rx_dropped += net_resource_mgt->hw_stats.total_uvn_stat_pkt_drop[i]; + + if (!common->is_vf) { + disp_ops->get_ustore_total_pkt_drop_stats + (NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, &ustore_stats); + *rx_dropped += ustore_stats.rx_drop_packets - + net_resource_mgt->hw_stats.start_ustore_stats.rx_drop_packets; + *rx_dropped += ustore_stats.rx_trun_packets - + net_resource_mgt->hw_stats.start_ustore_stats.rx_trun_packets; + } + return 0; +} + +static int nbl_serv_setup_net_resource_mgt(void *priv, struct net_device *netdev, + u16 vlan_proto, u16 vlan_tci, u32 rate) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_serv_net_resource_mgt *net_resource_mgt; + u32 delay_time; + unsigned long hw_stats_delay_time = 0; + + net_resource_mgt = devm_kzalloc(dev, sizeof(struct nbl_serv_net_resource_mgt), GFP_KERNEL); + if (!net_resource_mgt) + return -ENOMEM; + + net_resource_mgt->netdev = netdev; + net_resource_mgt->serv_mgt = serv_mgt; + net_resource_mgt->vlan_proto = vlan_proto; + net_resource_mgt->vlan_tci = vlan_tci; + net_resource_mgt->max_tx_rate = rate; + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt) = net_resource_mgt; + + nbl_serv_phy_init(net_resource_mgt); + nbl_init_qos_config(net_resource_mgt); + nbl_serv_register_restore_netdev_queue(serv_mgt); + if (common->is_vf) { + nbl_serv_register_link_forced_notify(serv_mgt); + nbl_serv_register_vlan_notify(serv_mgt); + nbl_serv_register_get_vf_stats(serv_mgt); + nbl_serv_register_trust_notify(serv_mgt); + nbl_serv_register_mirror_outputport_notify(serv_mgt); + } + net_resource_mgt->hw_stats_period = NBL_HW_STATS_PERIOD_SECONDS * HZ; + get_random_bytes(&delay_time, sizeof(delay_time)); + hw_stats_delay_time = delay_time % net_resource_mgt->hw_stats_period; + timer_setup(&net_resource_mgt->serv_timer, nbl_serv_net_task_service_timer, 0); + + net_resource_mgt->serv_timer_period = HZ; + nbl_common_alloc_task(&net_resource_mgt->rx_mode_async, nbl_serv_rx_mode_async_task); + nbl_common_alloc_task(&net_resource_mgt->net_stats_update, nbl_serv_net_stats_update_task); + nbl_common_alloc_task(&net_resource_mgt->tx_timeout, nbl_serv_handle_tx_timeout); + if (common->is_vf) { + nbl_common_alloc_task(&net_resource_mgt->update_link_state, + nbl_serv_update_link_state); + nbl_common_alloc_task(&net_resource_mgt->update_vlan, + nbl_serv_update_vlan); + nbl_common_alloc_task(&net_resource_mgt->update_mirror_outputport, + nbl_serv_update_mirror_outputport); + } + + INIT_LIST_HEAD(&net_resource_mgt->tmp_add_filter_list); + INIT_LIST_HEAD(&net_resource_mgt->tmp_del_filter_list); + INIT_LIST_HEAD(&net_resource_mgt->indr_dev_priv_list); + net_resource_mgt->get_stats_jiffies = jiffies; + + mod_timer(&net_resource_mgt->serv_timer, + jiffies + net_resource_mgt->serv_timer_period + + hw_stats_delay_time); + + return 0; +} + +static int nbl_serv_enable_adminq_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->enable_adminq_irq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vector_id, enable_msix); + if (ret) + return -EIO; + + return 0; +} + +static u16 nbl_serv_get_rdma_cap_num(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_rdma_cap_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_setup_rdma_id(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->setup_rdma_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_remove_rdma_id(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->remove_rdma_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_register_rdma(void *priv, u16 vsi_id, struct nbl_rdma_register_param *param) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->register_rdma(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, param); +} + +static void nbl_serv_unregister_rdma(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->unregister_rdma(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static void nbl_serv_register_rdma_bond(void *priv, struct nbl_lag_member_list_param *list_param, + struct nbl_rdma_register_param *register_param) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->register_rdma_bond(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + list_param, register_param); +} + +static void nbl_serv_unregister_rdma_bond(void *priv, u16 lag_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->unregister_rdma_bond(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), lag_id); +} + +static u8 __iomem *nbl_serv_get_hw_addr(void *priv, size_t *size) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_hw_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), size); +} + +static u64 nbl_serv_get_real_hw_addr(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_real_hw_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static u16 nbl_serv_get_function_id(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static void nbl_serv_get_real_bdf(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_real_bdf(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, + bus, dev, function); +} + +static int nbl_serv_get_devlink_info(struct devlink *devlink, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct nbl_devlink_priv *priv = devlink_priv(devlink); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv->priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + char firmware_version[NBL_DEVLINK_INFO_FRIMWARE_VERSION_LEN] = {0}; + int ret = 0; + + disp_ops->get_firmware_version(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + firmware_version, sizeof(firmware_version)); + if (ret) + return ret; + + ret = devlink_info_version_fixed_put(req, "FW Version:", firmware_version); + if (ret) + return ret; + + return ret; +} + +/* Why do we need this? + * Because the original function in kernel cannot handle when we set subvendor and subdevice + * to be 0xFFFF, so write a correct one. + */ +static bool +nbl_serv_pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record) +{ + struct pci_dev *pdev = to_pci_dev(context->dev); + struct nbl_serv_pldm_pci_record_id id = { + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .subsystem_vendor = PCI_ANY_ID, + .subsystem_device = PCI_ANY_ID, + }; + struct pldmfw_desc_tlv *desc; + bool ret; + + list_for_each_entry(desc, &record->descs, entry) { + u16 value; + u16 *ptr; + + switch (desc->type) { + case PLDM_DESC_ID_PCI_VENDOR_ID: + ptr = &id.vendor; + break; + case PLDM_DESC_ID_PCI_DEVICE_ID: + ptr = &id.device; + break; + case PLDM_DESC_ID_PCI_SUBVENDOR_ID: + ptr = &id.subsystem_vendor; + break; + case PLDM_DESC_ID_PCI_SUBDEV_ID: + ptr = &id.subsystem_device; + break; + default: + /* Skip unrelated TLVs */ + continue; + } + + value = get_unaligned_le16(desc->data); + /* A value of zero for one of the descriptors is sometimes + * used when the record should ignore this field when matching + * device. For example if the record applies to any subsystem + * device or vendor. + */ + if (value) + *ptr = (int)value; + else + *ptr = PCI_ANY_ID; + } + + if ((id.vendor == (u16)PCI_ANY_ID || id.vendor == pdev->vendor) && + (id.device == (u16)PCI_ANY_ID || id.device == pdev->device) && + (id.subsystem_vendor == (u16)PCI_ANY_ID || + id.subsystem_vendor == pdev->subsystem_vendor) && + (id.subsystem_device == (u16)PCI_ANY_ID || + id.subsystem_device == pdev->subsystem_device)) + ret = true; + else + ret = false; + + return ret; +} + +static int nbl_serv_send_package_data(struct pldmfw *context, const u8 *data, u16 length) +{ + struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, + context); + struct nbl_service_mgt *serv_mgt = priv->serv_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret = 0; + + nbl_info(common, NBL_DEBUG_DEVLINK, "Send package data"); + + ret = disp_ops->flash_lock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + return ret; + + ret = disp_ops->flash_prepare(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + + return 0; +} + +static int nbl_serv_send_component_table(struct pldmfw *context, struct pldmfw_component *component, + u8 transfer_flags) +{ + struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, + context); + struct nbl_service_mgt *serv_mgt = priv->serv_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + nbl_info(common, NBL_DEBUG_DEVLINK, "Send component table, id %d", component->identifier); + + return 0; +} + +static int nbl_serv_flash_component(struct pldmfw *context, struct pldmfw_component *component) +{ + struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, + context); + struct nbl_service_mgt *serv_mgt = priv->serv_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u32 component_crc, calculated_crc; + size_t data_len = component->component_size - NBL_DEVLINK_FLASH_COMPONENT_CRC_SIZE; + int ret = 0; + + nbl_info(common, NBL_DEBUG_DEVLINK, "Flash component table, id %d", component->identifier); + + component_crc = *(u32 *)((u8 *)component->component_data + data_len); + calculated_crc = crc32_le(~0, component->component_data, data_len) ^ ~0; + if (component_crc != calculated_crc) { + nbl_err(common, NBL_DEBUG_DEVLINK, "Flash component crc error"); + disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + return -EFAULT; + } + + ret = disp_ops->flash_image(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), component->identifier, + component->component_data, data_len); + if (ret) + disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + + return ret; +} + +static int nbl_serv_finalize_update(struct pldmfw *context) +{ + struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, + context); + struct nbl_service_mgt *serv_mgt = priv->serv_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret = 0; + + nbl_info(common, NBL_DEBUG_DEVLINK, "Flash activate"); + + ret = disp_ops->flash_activate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + + disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + return ret; +} + +static const struct pldmfw_ops nbl_update_fw_ops = { + .match_record = nbl_serv_pldmfw_op_pci_match_record, + .send_package_data = nbl_serv_send_package_data, + .send_component_table = nbl_serv_send_component_table, + .flash_component = nbl_serv_flash_component, + .finalize_update = nbl_serv_finalize_update, +}; + +int nbl_serv_update_firmware(struct nbl_service_mgt *serv_mgt, const struct firmware *fw, + struct netlink_ext_ack *extack) +{ + struct nbl_serv_update_fw_priv priv = {{0}}; + int ret = 0; + + priv.context.ops = &nbl_update_fw_ops; + priv.context.dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + priv.extack = extack; + priv.serv_mgt = serv_mgt; + + ret = pldmfw_flash_image(&priv.context, fw); + + return ret; +} + +static int nbl_serv_update_devlink_flash(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +{ + struct nbl_devlink_priv *priv = devlink_priv(devlink); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv->priv; + int ret = 0; + + devlink_flash_update_status_notify(devlink, "Flash start", NULL, 0, 0); + + ret = nbl_serv_update_firmware(serv_mgt, params->fw, extack); + + if (ret) + devlink_flash_update_status_notify(devlink, "Flash failed", NULL, 0, 0); + else + devlink_flash_update_status_notify(devlink, + "Flash finished, please reboot to take effect", + NULL, 0, 0); + return ret; +} + +static u32 nbl_serv_get_adminq_tx_buf_size(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_adminq_tx_buf_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_emp_console_write(void *priv, char *buf, size_t count) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->emp_console_write(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), buf, count); +} + +static bool nbl_serv_check_fw_heartbeat(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->check_fw_heartbeat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static bool nbl_serv_check_fw_reset(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->check_fw_reset(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_get_common_irq_num(void *priv, struct nbl_common_irq_num *irq_num) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + irq_num->mbx_irq_num = disp_ops->get_mbx_irq_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_get_ctrl_irq_num(void *priv, struct nbl_ctrl_irq_num *irq_num) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + irq_num->adminq_irq_num = disp_ops->get_adminq_irq_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + irq_num->abnormal_irq_num = + disp_ops->get_abnormal_irq_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_check_offload_status(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + bool is_down = false; + int ret; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->check_offload_status(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &is_down); + + /* ovs down, need to delete related pmd flow rules */ + if (is_down) + disp_ops->del_nd_upcall_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + + return ret; +} + +static u32 nbl_serv_get_chip_temperature(void *priv, enum nbl_hwmon_type type, u32 senser_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_chip_temperature(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), type, senser_id); +} + +static int nbl_serv_get_module_temperature(void *priv, u8 eth_id, enum nbl_hwmon_type type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_module_temperature(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, type); +} + +static int nbl_serv_get_port_attributes(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->get_port_attributes(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + return -EIO; + + return 0; +} + +static int nbl_serv_update_template_config(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret = 0; + + ret = disp_ops->update_ring_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + return ret; + + ret = disp_ops->update_rdma_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + return ret; + + ret = disp_ops->update_rdma_mem_type(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + return ret; + + return 0; +} + +static int nbl_serv_enable_port(void *priv, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->enable_port(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), enable); + if (ret) + return -EIO; + + return 0; +} + +static void nbl_serv_init_port(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->init_port(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); } -static bool nbl_serv_promisc_mode_changed(struct net_device *dev) +static void nbl_serv_configure_rdma_msix_off(void *priv, u16 vector) { - struct nbl_adapter *adapter; - struct nbl_service_mgt *serv_mgt; - struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - adapter = NBL_NETDEV_TO_ADAPTER(dev); - serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + disp_ops->configure_rdma_msix_off(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vector); - return (net_resource_mgt->curr_promiscuout_mode ^ dev->flags) - & (IFF_PROMISC | IFF_ALLMULTI); } -static void nbl_serv_set_rx_mode(struct net_device *dev) +static int nbl_serv_set_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) { - struct nbl_adapter *adapter; - struct nbl_service_mgt *serv_mgt; - struct nbl_serv_net_resource_mgt *net_resource_mgt; - - adapter = NBL_NETDEV_TO_ADAPTER(dev); - serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - spin_lock_bh(&net_resource_mgt->mac_vlan_list_lock); - __dev_uc_sync(dev, nbl_addr_sync, nbl_addr_unsync); - spin_unlock_bh(&net_resource_mgt->mac_vlan_list_lock); + if (NBL_COMMON_TO_VF_CAP(common)) + return 0; + else + return disp_ops->set_eth_mac_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + mac, eth_id); +} - if (!NBL_COMMON_TO_VF_CAP(NBL_SERV_MGT_TO_COMMON(serv_mgt))) { /* only pf support */ - spin_lock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); - if (nbl_serv_promisc_mode_changed(dev)) { - net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; - net_resource_mgt->curr_promiscuout_mode = dev->flags; - } - spin_unlock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); - } +static void nbl_serv_adapt_desc_gother(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); + if (test_bit(NBL_FLAG_HIGH_THROUGHPUT, serv_mgt->flags)) + disp_ops->set_desc_high_throughput(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + else + disp_ops->adapt_desc_gother(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); } -static void nbl_serv_change_rx_flags(struct net_device *dev, int flag) +static void nbl_serv_process_flr(void *priv, u16 vfid) { - struct nbl_adapter *adapter; - struct nbl_service_mgt *serv_mgt; - struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - adapter = NBL_NETDEV_TO_ADAPTER(dev); - serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + disp_ops->flr_clear_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + disp_ops->flr_clear_accel_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + disp_ops->flr_clear_flows(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + disp_ops->flr_clear_accel(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + disp_ops->flr_clear_interrupt(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + disp_ops->flr_clear_rdma(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + disp_ops->flr_clear_net(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); +} - spin_lock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); - if (nbl_serv_promisc_mode_changed(dev)) { - net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; - net_resource_mgt->curr_promiscuout_mode = dev->flags; - } - spin_unlock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); +static u16 nbl_serv_covert_vfid_to_vsi_id(void *priv, u16 vfid) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); + return disp_ops->covert_vfid_to_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); } -static netdev_features_t -nbl_serv_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) +static void nbl_serv_recovery_abnormal(void *priv) { - u32 l2_l3_hrd_len = 0, l4_hrd_len = 0, total_hrd_len = 0; - u8 l4_proto = 0; - __be16 protocol, frag_off; - int ret; - unsigned char *exthdr; - unsigned int offset = 0; - int nexthdr = 0; - int exthdr_num = 0; - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } ip; - union { - struct tcphdr *tcp; - struct udphdr *udp; - unsigned char *hdr; - } l4; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - /* No point in doing any of this if neither checksum nor GSO are - * being requested for this frame. We can rule out both by just - * checking for CHECKSUM_PARTIAL. - */ - if (skb->ip_summed != CHECKSUM_PARTIAL) - return features; + disp_ops->unmask_all_interrupts(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} - /* We cannot support GSO if the MSS is going to be less than - * 256 bytes or bigger than 16383 bytes. If it is then we need to drop support for GSO. - */ - if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < NBL_TX_TSO_MSS_MIN || - skb_shinfo(skb)->gso_size > NBL_TX_TSO_MSS_MAX)) - features &= ~NETIF_F_GSO_MASK; +static void nbl_serv_keep_alive(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - l2_l3_hrd_len = (u32)(skb_transport_header(skb) - skb->data); + disp_ops->keep_alive(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); - protocol = vlan_get_protocol(skb); +static int nbl_serv_register_vsi_info(void *priv, struct nbl_vsi_param *vsi_param) +{ + u16 vsi_index = vsi_param->index; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u32 num_cpus; - if (protocol == htons(ETH_P_IP)) { - l4_proto = ip.v4->protocol; - } else if (protocol == htons(ETH_P_IPV6)) { - exthdr = ip.hdr + sizeof(*ip.v6); - l4_proto = ip.v6->nexthdr; - if (l4.hdr != exthdr) { - ret = ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); - if (ret < 0) - goto out_rm_features; - } + ring_mgt->vsi_info[vsi_index].vsi_index = vsi_index; + ring_mgt->vsi_info[vsi_index].vsi_id = vsi_param->vsi_id; + ring_mgt->vsi_info[vsi_index].ring_offset = vsi_param->queue_offset; + ring_mgt->vsi_info[vsi_index].ring_num = vsi_param->queue_num; - /* IPV6 extension headers - * (1) donot support routing and destination extension headers - * (2) support 2 extension headers mostly - */ - nexthdr = ipv6_find_hdr(skb, &offset, NEXTHDR_ROUTING, NULL, NULL); - if (nexthdr == NEXTHDR_ROUTING) { - netdev_info(dev, "skb contain ipv6 routing ext header\n"); - goto out_rm_features; - } + /* init active ring number before first open, guarantee fd direct config check success. */ + num_cpus = num_online_cpus(); + ring_mgt->vsi_info[vsi_index].active_ring_num = (u16)num_cpus > vsi_param->queue_num ? + vsi_param->queue_num : (u16)num_cpus; - nexthdr = ipv6_find_hdr(skb, &offset, NEXTHDR_DEST, NULL, NULL); - if (nexthdr == NEXTHDR_DEST) { - netdev_info(dev, "skb contain ipv6 routing dest header\n"); - goto out_rm_features; - } + if (disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_ITR_DYNAMIC)) + ring_mgt->vsi_info[vsi_index].itr_dynamic = true; - exthdr_num = nbl_serv_ipv6_exthdr_num(skb, exthdr - skb->data, ip.v6->nexthdr); - if (exthdr_num < 0 || exthdr_num > 2) { - netdev_info(dev, "skb ipv6 exthdr_num:%d\n", exthdr_num); - goto out_rm_features; - } - } else { - goto out_rm_features; - } + /** + * Clear cfgs, in case this function exited abnormaly last time. + * only for data vsi, vf in vm only support data vsi. + * DPDK user vsi can not leak resource. + */ + if (vsi_index == NBL_VSI_DATA) + disp_ops->clear_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_param->vsi_id); + disp_ops->register_vsi_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_index, + vsi_param->queue_offset, vsi_param->queue_num); - switch (l4_proto) { - case IPPROTO_TCP: - l4_hrd_len = (l4.tcp->doff) * 4; - break; - case IPPROTO_UDP: - l4_hrd_len = sizeof(struct udphdr); - break; - case IPPROTO_SCTP: - l4_hrd_len = sizeof(struct sctphdr); - break; - default: - goto out_rm_features; - } + return disp_ops->register_vsi2q(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_index, + vsi_param->vsi_id, vsi_param->queue_offset, + vsi_param->queue_num); +} - total_hrd_len = l2_l3_hrd_len + l4_hrd_len; +static int nbl_serv_st_open(struct inode *inode, struct file *filep) +{ + struct nbl_serv_st_mgt *p = container_of(inode->i_cdev, struct nbl_serv_st_mgt, cdev); - // TX checksum offload support total header len is [0, 255] - if (total_hrd_len > NBL_TX_CHECKSUM_OFFLOAD_L2L3L4_HDR_LEN_MAX) - goto out_rm_features; + filep->private_data = p; - // TSO support total header len is [42, 128] - if (total_hrd_len < NBL_TX_TSO_L2L3L4_HDR_LEN_MIN || - total_hrd_len > NBL_TX_TSO_L2L3L4_HDR_LEN_MAX) - features &= ~NETIF_F_GSO_MASK; + return 0; +} - if (skb->encapsulation) - goto out_rm_features; +static ssize_t nbl_serv_st_write(struct file *file, const char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} - return features; +static ssize_t nbl_serv_st_read(struct file *file, char __user *ubuf, size_t size, loff_t *ppos) +{ + return 0; +} -out_rm_features: - return features & ~(NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_SCTP_CRC | - NETIF_F_GSO_MASK); +static int nbl_serv_st_release(struct inode *inode, struct file *filp) +{ + return 0; } -static void nbl_serv_tx_timeout(struct net_device *netdev, unsigned int txqueue) +static int nbl_serv_process_passthrough(struct nbl_service_mgt *serv_mgt, + unsigned int cmd, unsigned long arg) { - struct nbl_netdev_priv *priv = netdev_priv(netdev); - struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); - struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); - struct nbl_serv_ring_vsi_info *vsi_info; + struct nbl_passthrough_fw_cmd_param *param = NULL, *result = NULL; + int ret = 0; - vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) + goto alloc_param_fail; - ring_mgt->tx_rings[vsi_info->ring_offset + txqueue].need_recovery = true; - ring_mgt->tx_rings[vsi_info->ring_offset + txqueue].tx_timeout_count++; + result = kzalloc(sizeof(*result), GFP_KERNEL); + if (!result) + goto alloc_result_fail; - nbl_warn(common, NBL_DEBUG_QUEUE, "TX timeout on queue %d", txqueue); + ret = copy_from_user(param, (void *)arg, _IOC_SIZE(cmd)); + if (ret) { + nbl_err(common, NBL_DEBUG_ST, "Bad access %d.\n", ret); + return ret; + } - nbl_common_queue_work(&NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->tx_timeout, false, false); -} + nbl_debug(common, NBL_DEBUG_ST, "Passthough opcode: %d\n", param->opcode); -static int nbl_serv_get_phys_port_name(struct net_device *dev, char *name, size_t len) -{ - struct nbl_common_info *common = NBL_NETDEV_TO_COMMON(dev); - u8 pf_id; + ret = disp_ops->passthrough_fw_cmd(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param, result); + if (ret) + goto passthrough_fail; - pf_id = common->eth_id; - if ((NBL_COMMON_TO_ETH_MODE(common) == NBL_TWO_ETHERNET_PORT) && common->eth_id == 2) - pf_id = 1; + ret = copy_to_user((void *)arg, result, _IOC_SIZE(cmd)); - if (snprintf(name, len, "p%u", pf_id) >= len) - return -EINVAL; - return 0; +passthrough_fail: + kfree(result); +alloc_result_fail: + kfree(param); +alloc_param_fail: + return ret; } -static int nbl_serv_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid) +static int nbl_serv_process_st_info(struct nbl_service_mgt *serv_mgt, + unsigned int cmd, unsigned long arg) { - struct nbl_netdev_priv *priv = netdev_priv(dev); - struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); - struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - u8 mac[ETH_ALEN]; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_st_info_param *param = NULL; + int ret = 0; + + nbl_debug(common, NBL_DEBUG_ST, "Get st info\n"); + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) + return -ENOMEM; + + strscpy(param->driver_name, NBL_DRIVER_NAME, sizeof(param->driver_name)); + if (net_resource_mgt->netdev) + strscpy(param->netdev_name[0], net_resource_mgt->netdev->name, + sizeof(param->netdev_name[0])); + + disp_ops->get_driver_version(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param->driver_ver, + sizeof(param->driver_ver)); - disp_ops->get_base_mac_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), mac); + param->bus = common->bus; + param->devid = common->devid; + param->function = common->function; + param->domain = pci_domain_nr(NBL_COMMON_TO_PDEV(common)->bus); - ppid->id_len = ETH_ALEN; - memcpy(&ppid->id, mac, ppid->id_len); + param->version = IOCTL_ST_INFO_VERSION; - return 0; + ret = copy_to_user((void *)arg, param, _IOC_SIZE(cmd)); + + kfree(param); + return ret; } -static int nbl_serv_register_net(void *priv, struct nbl_register_net_param *register_param, - struct nbl_register_net_result *register_result) +static long nbl_serv_st_unlock_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - int p4_type, ret = 0; + struct nbl_serv_st_mgt *st_mgt = file->private_data; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)st_mgt->serv_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret = 0; - ret = disp_ops->register_net(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - register_param, register_result); - if (ret) + if (_IOC_TYPE(cmd) != IOCTL_TYPE) { + nbl_err(common, NBL_DEBUG_ST, "cmd %u, bad magic 0x%x/0x%x.\n", + cmd, _IOC_TYPE(cmd), IOCTL_TYPE); + return -ENOTTY; + } + + if (_IOC_DIR(cmd) & _IOC_READ) + ret = !access_ok((void __user *)arg, _IOC_SIZE(cmd)); + else if (_IOC_DIR(cmd) & _IOC_WRITE) + ret = !access_ok((void __user *)arg, _IOC_SIZE(cmd)); + if (ret) { + nbl_err(common, NBL_DEBUG_ST, "Bad access.\n"); return ret; + } - p4_type = disp_ops->get_p4_used(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); - switch (p4_type) { - case NBL_P4_DEFAULT: - set_bit(NBL_FLAG_P4_DEFAULT, serv_mgt->flags); + switch (cmd) { + case IOCTL_PASSTHROUGH: + ret = nbl_serv_process_passthrough(serv_mgt, cmd, arg); + break; + case IOCTL_ST_INFO: + ret = nbl_serv_process_st_info(serv_mgt, cmd, arg); break; default: - nbl_warn(NBL_SERV_MGT_TO_COMMON(serv_mgt), NBL_DEBUG_CUSTOMIZED_P4, - "Unknown P4 type %d", p4_type); + nbl_err(common, NBL_DEBUG_ST, "Unknown cmd %d.\n", cmd); + return -EFAULT; } - return 0; + return ret; } -static int nbl_serv_unregister_net(void *priv) +static const struct file_operations st_ops = { + .owner = THIS_MODULE, + .open = nbl_serv_st_open, + .write = nbl_serv_st_write, + .read = nbl_serv_st_read, + .unlocked_ioctl = nbl_serv_st_unlock_ioctl, + .release = nbl_serv_st_release, +}; + +static int nbl_serv_alloc_subdev_id(struct nbl_software_tool_table *st_table) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops; + int subdev_id; - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + subdev_id = find_first_zero_bit(st_table->devid, NBL_ST_MAX_DEVICE_NUM); + if (subdev_id == NBL_ST_MAX_DEVICE_NUM) + return -ENOSPC; + set_bit(subdev_id, st_table->devid); - return disp_ops->unregister_net(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + return subdev_id; } -static int nbl_serv_setup_txrx_queues(void *priv, u16 vsi_id, u16 queue_num, u16 net_vector_id) +static void nbl_serv_free_subdev_id(struct nbl_software_tool_table *st_table, int id) +{ + clear_bit(id, st_table->devid); +} + +static int nbl_serv_setup_st(void *priv, void *st_table_param) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_serv_vector *vector; - int i, ret = 0; + struct nbl_software_tool_table *st_table = (struct nbl_software_tool_table *)st_table_param; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_st_mgt *st_mgt = NBL_SERV_MGT_TO_ST_MGT(serv_mgt); + struct device *test_device; + char name[NBL_RESTOOL_NAME_LEN] = {0}; + dev_t devid; + int id, subdev_id, ret = 0; - /* Clear cfgs, in case this function exited abnormaly last time */ - disp_ops->clear_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + id = NBL_COMMON_TO_BOARD_ID(common); - /* queue_num include user&kernel queue */ - ret = disp_ops->alloc_txrx_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, queue_num); - if (ret) - return -EFAULT; + subdev_id = nbl_serv_alloc_subdev_id(st_table); + if (subdev_id < 0) + goto alloc_subdev_id_fail; - /* ring_mgt->tx_ring_number only for kernel use */ - for (i = 0; i < ring_mgt->tx_ring_num; i++) { - ring_mgt->tx_rings[i].local_queue_id = NBL_PAIR_ID_GET_TX(i); - ring_mgt->rx_rings[i].local_queue_id = NBL_PAIR_ID_GET_RX(i); + devid = MKDEV(st_table->major, subdev_id); - vector = &ring_mgt->vectors[i]; - vector->local_vector_id = i + net_vector_id; - vector->global_vector_id = - disp_ops->get_global_vector(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - vsi_id, vector->local_vector_id); - vector->irq_enable_base = - disp_ops->get_msix_irq_enable_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - vector->global_vector_id, - &vector->irq_data); + if (!NBL_COMMON_TO_PCI_FUNC_ID(common)) + snprintf(name, sizeof(name), "nblst%04x_conf%d", + NBL_COMMON_TO_PDEV(common)->device, id); + else + snprintf(name, sizeof(name), "nblst%04x_conf%d.%d", + NBL_COMMON_TO_PDEV(common)->device, id, NBL_COMMON_TO_PCI_FUNC_ID(common)); - disp_ops->set_vector_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - vector->irq_enable_base, - vector->irq_data, i, - ring_mgt->net_msix_mask_en); + st_mgt = devm_kzalloc(NBL_COMMON_TO_DEV(common), sizeof(*st_mgt), GFP_KERNEL); + if (!st_mgt) + goto malloc_fail; + + st_mgt->serv_mgt = serv_mgt; + + st_mgt->major = MAJOR(devid); + st_mgt->minor = MINOR(devid); + st_mgt->devno = devid; + st_mgt->subdev_id = subdev_id; + + cdev_init(&st_mgt->cdev, &st_ops); + ret = cdev_add(&st_mgt->cdev, devid, 1); + if (ret) + goto cdev_add_fail; + + test_device = device_create(st_table->cls, NULL, st_mgt->devno, NULL, name); + if (IS_ERR(test_device)) { + ret = -EBUSY; + goto device_create_fail; } + NBL_SERV_MGT_TO_ST_MGT(serv_mgt) = st_mgt; return 0; + +device_create_fail: + cdev_del(&st_mgt->cdev); +cdev_add_fail: + devm_kfree(NBL_COMMON_TO_DEV(common), st_mgt); +malloc_fail: + nbl_serv_free_subdev_id(st_table, subdev_id); +alloc_subdev_id_fail: + return ret; } -static void nbl_serv_remove_txrx_queues(void *priv, u16 vsi_id) +static void nbl_serv_remove_st(void *priv, void *st_table_param) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_serv_ring_mgt *ring_mgt; - struct nbl_dispatch_ops *disp_ops; + struct nbl_software_tool_table *st_table = (struct nbl_software_tool_table *)st_table_param; + struct nbl_serv_st_mgt *st_mgt = NBL_SERV_MGT_TO_ST_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + if (!st_mgt) + return; - disp_ops->free_txrx_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); -} + device_destroy(st_table->cls, st_mgt->devno); + cdev_del(&st_mgt->cdev); -static int nbl_serv_setup_q2vsi(void *priv, u16 vsi_id) -{ - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + nbl_serv_free_subdev_id(st_table, st_mgt->subdev_id); - return disp_ops->setup_q2vsi(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + NBL_SERV_MGT_TO_ST_MGT(serv_mgt) = NULL; + devm_kfree(NBL_COMMON_TO_DEV(common), st_mgt); } -static void nbl_serv_remove_q2vsi(void *priv, u16 vsi_id) +static void nbl_serv_form_p4_name(struct nbl_common_info *common, int type, char *name, + u16 len, u32 version) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + char eth_num[NBL_P4_NAME_LEN] = {0}; + char ver[NBL_P4_NAME_LEN] = {0}; - disp_ops->remove_q2vsi(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + switch (NBL_COMMON_TO_ETH_MODE(common)) { + case 1: + snprintf(eth_num, sizeof(eth_num), "single"); + break; + case 2: + snprintf(eth_num, sizeof(eth_num), "dual"); + break; + case 4: + snprintf(eth_num, sizeof(eth_num), "quad"); + break; + default: + nbl_err(common, NBL_DEBUG_CUSTOMIZED_P4, "Unknown P4 type %d", type); + return; + } + + switch (version) { + case 0: + snprintf(ver, sizeof(ver), "lg"); + break; + case 1: + snprintf(ver, sizeof(ver), "hg"); + break; + } + + switch (type) { + case NBL_P4_DEFAULT: + /* No need to load default p4 file */ + snprintf(name, len, "nbl/snic_v3r1/m181xx_%s_port_p4_%s", eth_num, ver); + break; + default: + nbl_err(common, NBL_DEBUG_CUSTOMIZED_P4, "Unknown P4 type %d", type); + } } -static int nbl_serv_setup_rss(void *priv, u16 vsi_id) +static int nbl_serv_calculate_md5sum(struct nbl_common_info *common, const u8 *data, + u32 data_len, char *md5_string) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct shash_desc *shash; + struct crypto_shash *tfm; + u8 md5_result[NBL_MD5SUM_LEN]; + int i; + int ret; - return disp_ops->setup_rss(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + tfm = crypto_alloc_shash("md5", 0, 0); + if (IS_ERR(tfm)) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Failed to allocate MD5 transform\n"); + return PTR_ERR(tfm); + } + + shash = kmalloc(sizeof(*shash) + crypto_shash_descsize(tfm), GFP_KERNEL); + if (!shash) { + crypto_free_shash(tfm); + return -ENOMEM; + } + + shash->tfm = tfm; + + ret = crypto_shash_init(shash); + if (ret) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Failed to initialize MD5\n"); + kfree(shash); + crypto_free_shash(tfm); + return ret; + } + + ret = crypto_shash_update(shash, data, data_len); + if (ret) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Failed to update MD5\n"); + kfree(shash); + crypto_free_shash(tfm); + return ret; + } + + ret = crypto_shash_final(shash, md5_result); + if (ret) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Failed to finalize MD5\n"); + kfree(shash); + crypto_free_shash(tfm); + return ret; + } + + for (i = 0; i < NBL_MD5SUM_LEN; i++) + sprintf(md5_string + i * 2, "%02x", md5_result[i]); + + md5_string[32] = '\0'; + + kfree(shash); + crypto_free_shash(tfm); + + return 0; } -static void nbl_serv_remove_rss(void *priv, u16 vsi_id) +static char *nbl_serv_get_md5_verify(int type, u16 version, u8 eth_num) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + if (version == 1) { + switch (eth_num) { + case 1: return NBL_SINGLE_PORT_HG_P4_MD5; + case 2: return NBL_DUAL_PORT_HG_P4_MD5; + case 4: return NBL_QUAD_PORT_HG_P4_MD5; + default: return NULL; + } + } else if (version == 0) { + switch (eth_num) { + case 1: return NBL_SINGLE_PORT_LG_P4_MD5; + case 2: return NBL_DUAL_PORT_LG_P4_MD5; + case 4: return NBL_QUAD_PORT_LG_P4_MD5; + default: return NULL; + } + } - disp_ops->remove_rss(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + return NULL; } -static int nbl_serv_alloc_rings(void *priv, struct net_device *netdev, - u16 tx_num, u16 rx_num, u16 desc_num) +static int nbl_serv_load_p4(struct nbl_service_mgt *serv_mgt, + const struct firmware *fw, char *verify_code, int type, u16 version) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct device *dev; - struct nbl_serv_ring_mgt *ring_mgt; - struct nbl_dispatch_ops *disp_ops; - int ret = 0; - - dev = NBL_SERV_MGT_TO_DEV(serv_mgt); - ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + const struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct elf32_shdr *shdr; + struct nbl_load_p4_param param; + u8 *strtab, *name, *product_code = NULL; + int i; + char md5_result[33]; + char *md5_verify; + u32 p4_size = 0; - ring_mgt->tx_ring_num = tx_num; - ring_mgt->rx_ring_num = rx_num; - ring_mgt->tx_desc_num = desc_num; - ring_mgt->rx_desc_num = desc_num; + if (memcmp(elf_hdr->e_ident, NBL_P4_ELF_IDENT, NBL_P4_ELF_IDENT_LEN)) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, + "Invalid ELF file, load defalut p4 configuration"); + return 0; + } - ret = disp_ops->alloc_rings(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), netdev, - tx_num, rx_num, ring_mgt->tx_desc_num, - ring_mgt->rx_desc_num); - if (ret) - goto alloc_rings_fail; + md5_verify = nbl_serv_get_md5_verify(type, version, NBL_COMMON_TO_ETH_MODE(common)); - ret = nbl_serv_set_tx_rings(ring_mgt, netdev, dev); - if (ret) - goto set_tx_fail; - ret = nbl_serv_set_rx_rings(ring_mgt, netdev, dev); - if (ret) - goto set_rx_fail; + if (nbl_serv_calculate_md5sum(common, fw->data, fw->size, md5_result)) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, + "elf md5sum calculate failed, load defalut p4 configuration"); + return 0; + } - ret = nbl_serv_set_vectors(serv_mgt, netdev, dev); - if (ret) - goto set_vectors_fail; + nbl_info(common, NBL_DEBUG_CUSTOMIZED_P4, "load p4 md5sum: %s\n", md5_result); - return 0; + if (!md5_verify || strncmp(md5_verify, md5_result, 33)) + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, + "elf file does not match driver version, function may be abnormal\n"); -set_vectors_fail: - nbl_serv_remove_rx_ring(ring_mgt, dev); -set_rx_fail: - nbl_serv_remove_tx_ring(ring_mgt, dev); -set_tx_fail: - disp_ops->remove_rings(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); -alloc_rings_fail: - return ret; -} + memset(¶m, 0, sizeof(param)); -static void nbl_serv_free_rings(void *priv) -{ - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct device *dev; - struct nbl_serv_ring_mgt *ring_mgt; - struct nbl_dispatch_ops *disp_ops; + shdr = (Elf32_Shdr *)((u8 *)elf_hdr + elf_hdr->e_shoff); + strtab = (u8 *)elf_hdr + shdr[elf_hdr->e_shstrndx].sh_offset; - dev = NBL_SERV_MGT_TO_DEV(serv_mgt); - ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + for (i = 0; i < elf_hdr->e_shnum; i++) + if (shdr[i].sh_type == SHT_NOTE) { + name = strtab + shdr[i].sh_name; + product_code = (u8 *)elf_hdr + shdr[i].sh_offset; + } - nbl_serv_remove_vectors(ring_mgt, dev); - nbl_serv_remove_rx_ring(ring_mgt, dev); - nbl_serv_remove_tx_ring(ring_mgt, dev); + if (!product_code) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, + "Product code not exist, function may be abnormal"); + return 0; + } - disp_ops->remove_rings(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); -} + if (strncmp(product_code, verify_code, NBL_P4_VERIFY_CODE_LEN)) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, + "Invalid product code %32s, function may be abnormal", product_code); + return 0; + } -static int nbl_serv_enable_napis(void *priv, u16 vsi_index) -{ - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); - struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[vsi_index]; - u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; - int i; + param.start = 1; + disp_ops->load_p4(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); - for (i = start; i < end; i++) - napi_enable(ring_mgt->vectors[i].napi); + for (i = 0; i < elf_hdr->e_shnum; i++) + if (shdr[i].sh_type == SHT_PROGBITS && !(shdr[i].sh_flags & SHF_EXECINSTR)) { + memset(¶m, 0, sizeof(param)); + /* name is used for distinguish configuration, not used for now */ + strscpy(param.name, strtab + shdr[i].sh_name, sizeof(param.name)); + param.addr = shdr[i].sh_addr; + param.size = shdr[i].sh_size; + param.section_index = i; + param.section_offset = 0; + param.data = (u8 *)elf_hdr + shdr[i].sh_offset; + p4_size += param.size; - return 0; -} + disp_ops->load_p4(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + } -static void nbl_serv_disable_napis(void *priv, u16 vsi_index) -{ - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); - struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[vsi_index]; - u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; - int i; + memset(¶m, 0, sizeof(param)); + param.end = 1; + disp_ops->load_p4(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); - for (i = start; i < end; i++) - napi_disable(ring_mgt->vectors[i].napi); + return 0; } -static void nbl_serv_set_mask_en(void *priv, bool enable) +static __maybe_unused void nbl_serv_load_default_p4(struct nbl_service_mgt *serv_mgt) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_serv_ring_mgt *ring_mgt; - - ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - ring_mgt->net_msix_mask_en = enable; + disp_ops->load_p4_default(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); } -static int nbl_serv_start_net_flow(void *priv, struct net_device *netdev, u16 vsi_id) +static int nbl_serv_init_p4(void *priv) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); - struct nbl_serv_vlan_node *vlan_node; - int ret = 0; - - /* Clear cfgs, in case this function exited abnormaly last time */ - disp_ops->clear_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + const struct firmware *fw; + char name[NBL_P4_NAME_LEN] = {0}; + char verify_code[NBL_P4_NAME_LEN] = {0}; + int type, ret = 0; + u32 version; - if (!list_empty(&flow_mgt->vlan_list)) - return -ECONNRESET; + version = disp_ops->get_p4_version(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + type = disp_ops->get_p4_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), verify_code); + if (type < 0 || type > NBL_P4_TYPE_MAX) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, + "p4 type is invalid, load defalut p4 configuration\n"); + return 0; + } - ret = disp_ops->add_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + nbl_serv_form_p4_name(common, type, name, sizeof(name), version); + ret = firmware_request_nowarn(&fw, name, NBL_SERV_MGT_TO_DEV(serv_mgt)); if (ret) - goto add_multi_fail; + goto out; - vlan_node = nbl_serv_alloc_vlan_node(); - if (!vlan_node) - goto alloc_fail; + ret = nbl_serv_load_p4(serv_mgt, fw, verify_code, type, version); - ether_addr_copy(flow_mgt->mac, netdev->dev_addr); - ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - NBL_DEFAULT_VLAN_ID, vsi_id); + release_firmware(fw); + +out: if (ret) - goto add_macvlan_fail; + type = NBL_FLAG_P4_DEFAULT; - vlan_node->vid = 0; + nbl_info(common, NBL_DEBUG_CUSTOMIZED_P4, "Load P4 %d", type); + disp_ops->set_p4_used(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), type); - list_add(&vlan_node->node, &flow_mgt->vlan_list); + /* We always return OK, because at the very least we would use default P4 */ return 0; - -add_macvlan_fail: - nbl_serv_free_vlan_node(vlan_node); -alloc_fail: - disp_ops->del_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); -add_multi_fail: - return ret; } -static void nbl_serv_stop_net_flow(void *priv, u16 vsi_id) +static int nbl_serv_set_spoof_check_addr(void *priv, u8 *mac) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); - - nbl_serv_del_all_vlans(serv_mgt); - nbl_serv_del_all_submacs(serv_mgt); - - disp_ops->del_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - disp_ops->set_vf_spoof_check(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - vsi_id, -1, false); - memset(flow_mgt->mac, 0, sizeof(flow_mgt->mac)); + return disp_ops->set_spoof_check_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), mac); } -static int nbl_serv_set_lldp_flow(void *priv, u16 vsi_id) +static u16 nbl_serv_get_vf_base_vsi_id(void *priv, u16 func_id) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - return disp_ops->add_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + return disp_ops->get_vf_base_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id); } -static void nbl_serv_remove_lldp_flow(void *priv, u16 vsi_id) +static int nbl_serv_get_board_id(void *priv) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - disp_ops->del_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + return disp_ops->get_board_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); } -static int nbl_serv_start_mgt_flow(void *priv) +static int nbl_serv_process_abnormal_event(void *priv) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_abnormal_event_info abnomal_info; + struct nbl_abnormal_details *detail; + u16 local_queue_id; + int type, i, ret = 0; - return disp_ops->setup_multi_group(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + memset(&abnomal_info, 0, sizeof(abnomal_info)); + + ret = disp_ops->process_abnormal_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &abnomal_info); + if (!ret) + return ret; + + for (i = 0; i < NBL_ABNORMAL_EVENT_MAX; i++) { + detail = &abnomal_info.details[i]; + + if (!detail->abnormal) + continue; + + type = nbl_serv_abnormal_event_to_queue(i); + local_queue_id = disp_ops->get_local_queue_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + detail->vsi_id, detail->qid); + if (local_queue_id == U16_MAX) + return 0; + + nbl_serv_restore_queue(serv_mgt, detail->vsi_id, local_queue_id, type, true); + } + + return 0; } -static void nbl_serv_stop_mgt_flow(void *priv) +static int nbl_serv_cfg_bond_shaping(void *priv, u8 eth_id, bool enable) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - return disp_ops->remove_multi_group(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + return disp_ops->cfg_bond_shaping(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, enable); } -static u32 nbl_serv_get_tx_headroom(void *priv) +static void nbl_serv_cfg_bgid_back_pressure(void *priv, u8 main_eth_id, + u8 other_eth_id, bool enable) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - return disp_ops->get_tx_headroom(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + disp_ops->cfg_bgid_back_pressure(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), main_eth_id, + other_eth_id, enable); } -/** - * This ops get flexible product capability from ctrl device, if the device has not manager cap, it - * need get capability from ctr device by channel - */ -static bool nbl_serv_get_product_flex_cap(void *priv, enum nbl_flex_cap_type cap_type) +static void nbl_serv_cfg_eth_bond_event(void *priv, bool enable) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - return disp_ops->get_product_flex_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - cap_type); + disp_ops->cfg_eth_bond_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), enable); } -/** - * This ops get fix product capability from resource layer, this capability fix by product_type, no - * need get from ctrl device - */ -static bool nbl_serv_get_product_fix_cap(void *priv, enum nbl_fix_cap_type cap_type) +static ssize_t nbl_serv_vf_mac_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - - return disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - cap_type); + return sprintf(buf, "usage: write MAC ADDR to set mac address\n"); } -static int nbl_serv_init_chip(void *priv) +static ssize_t nbl_serv_vf_mac_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops; - struct nbl_common_info *common; - struct device *dev; + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + u8 mac[ETH_ALEN]; int ret = 0; - common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - dev = NBL_COMMON_TO_DEV(common); - - ret = disp_ops->init_chip_module(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); - if (ret) { - dev_err(dev, "init_chip_module failed\n"); - goto module_init_fail; - } - - ret = disp_ops->queue_init(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); - if (ret) { - dev_err(dev, "queue_init failed\n"); - goto queue_init_fail; - } - - ret = disp_ops->vsi_init(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); - if (ret) { - dev_err(dev, "vsi_init failed\n"); - goto vsi_init_fail; - } - - return 0; + ret = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5]); + if (ret != ETH_ALEN) + return -EINVAL; -vsi_init_fail: -queue_init_fail: -module_init_fail: - return ret; + ret = nbl_serv_set_vf_mac(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, mac); + return ret ? ret : count; } -static int nbl_serv_destroy_chip(void *p) +static ssize_t nbl_serv_vf_trust_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return 0; + return sprintf(buf, "usage: write to set vf trust\n"); } -static int nbl_serv_configure_msix_map(void *priv, u16 num_net_msix, u16 num_others_msix, - bool net_msix_mask_en) +static ssize_t nbl_serv_vf_trust_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops; + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + bool trusted = false; int ret = 0; - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + if (sysfs_streq(buf, "ON")) + trusted = true; + else if (sysfs_streq(buf, "OFF")) + trusted = false; + else + return -EINVAL; - ret = disp_ops->configure_msix_map(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), num_net_msix, - num_others_msix, net_msix_mask_en); - if (ret) - return -EIO; + ret = nbl_serv_set_vf_trust(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, trusted); + return ret ? ret : count; +} - return 0; +static ssize_t nbl_serv_vf_vlan_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "usage: write to set VF Vlan," + " Qos, and optionally Vlan Protocol (default 802.1Q)\n"); } -static int nbl_serv_destroy_msix_map(void *priv) +static ssize_t nbl_serv_vf_vlan_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops; + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + char vproto_ext[5] = {'\0'}; + __be16 vlan_proto; + u16 vlan_id; + u8 qos; int ret = 0; - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - - ret = disp_ops->destroy_msix_map(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); - if (ret) - return -EIO; + ret = sscanf(buf, "%hu:%hhu:802.%4s", &vlan_id, &qos, vproto_ext); + if (ret == 3) { + if ((strcmp(vproto_ext, "1AD") == 0) || + (strcmp(vproto_ext, "1ad") == 0)) + vlan_proto = htons(ETH_P_8021AD); + else if ((strcmp(vproto_ext, "1Q") == 0) || + (strcmp(vproto_ext, "1q") == 0)) + vlan_proto = htons(ETH_P_8021Q); + else + return -EINVAL; + } else { + ret = sscanf(buf, "%hu:%hhu", &vlan_id, &qos); + if (ret != 2) + return -EINVAL; + vlan_proto = htons(ETH_P_8021Q); + } - return 0; + ret = nbl_serv_set_vf_vlan(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, vlan_id, qos, vlan_proto); + return ret ? ret : count; } -static int nbl_serv_enable_mailbox_irq(void *priv, u16 vector_id, bool enable_msix) +static ssize_t nbl_serv_vf_max_tx_rate_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops; - int ret = 0; + return sprintf(buf, "usage: write RATE to set max_tx_rate(Mbps)\n"); +} - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); +static ssize_t nbl_serv_vf_max_tx_rate_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + int max_tx_rate = 0, ret = 0; - ret = disp_ops->enable_mailbox_irq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - vector_id, enable_msix); + ret = kstrtos32(buf, 0, &max_tx_rate); if (ret) - return -EIO; + return -EINVAL; - return 0; + ret = nbl_serv_set_vf_rate(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, 0, max_tx_rate); + return ret ? ret : count; } -static int nbl_serv_enable_abnormal_irq(void *priv, u16 vector_id, bool enable_msix) +static ssize_t nbl_serv_vf_spoofchk_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops; + return sprintf(buf, "usage: write to set vf spoof check\n"); +} + +static ssize_t nbl_serv_vf_spoofchk_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + bool enable = false; int ret = 0; - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + if (sysfs_streq(buf, "ON")) + enable = true; + else if (sysfs_streq(buf, "OFF")) + enable = false; + else + return -EINVAL; - ret = disp_ops->enable_abnormal_irq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - vector_id, enable_msix); - if (ret) - return -EIO; + ret = nbl_serv_set_vf_spoofchk(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, enable); + return ret ? ret : count; +} - return 0; +static ssize_t nbl_serv_vf_link_state_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "usage: write to set vf link state\n"); } -static irqreturn_t nbl_serv_clean_rings(int __always_unused irq, void *data) +static ssize_t nbl_serv_vf_link_state_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) { - struct nbl_serv_vector *vector = (struct nbl_serv_vector *)data; + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + int state = 0, ret = 0; - napi_schedule_irqoff(vector->napi); + if (sysfs_streq(buf, "AUTO")) + state = IFLA_VF_LINK_STATE_AUTO; + else if (sysfs_streq(buf, "ENABLE")) + state = IFLA_VF_LINK_STATE_ENABLE; + else if (sysfs_streq(buf, "DISABLE")) + state = IFLA_VF_LINK_STATE_DISABLE; + else + return -EINVAL; - return IRQ_HANDLED; + ret = nbl_serv_set_vf_link_state(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, state); + return ret ? ret : count; } -static int nbl_serv_request_net_irq(void *priv, struct nbl_msix_info_param *msix_info) +static ssize_t nbl_serv_vf_stats_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); - struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - struct device *dev = NBL_COMMON_TO_DEV(common); - struct nbl_serv_ring *tx_ring, *rx_ring; - struct nbl_serv_vector *vector; - u32 irq_num; - int i, ret = 0; - - for (i = 0; i < ring_mgt->tx_ring_num; i++) { - tx_ring = &ring_mgt->tx_rings[i]; - rx_ring = &ring_mgt->rx_rings[i]; - vector = &ring_mgt->vectors[i]; - vector->tx_ring = tx_ring; - vector->rx_ring = rx_ring; + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + struct net_device *netdev = serv_mgt->net_resource_mgt->netdev; + struct ifla_vf_stats stats = { 0 }; + int ret = 0; - irq_num = msix_info->msix_entries[i].vector; - snprintf(vector->name, sizeof(vector->name) - 1, "%s%03d-%s-%02u", "NBL", - NBL_COMMON_TO_VSI_ID(common), "TxRx", i); - ret = devm_request_irq(dev, irq_num, nbl_serv_clean_rings, 0, - vector->name, vector); - if (ret) { - nbl_err(common, NBL_DEBUG_INTR, - "TxRx Queue %u requests MSIX irq failed %d", i, ret); - goto request_irq_err; - } + ret = nbl_serv_get_vf_stats(netdev, vf_info->vf_id, &stats); + if (ret) { + netdev_info(netdev, "get_vf %d stats failed %d\n", vf_info->vf_id, ret); + return ret; } - net_resource_mgt->num_net_msix = msix_info->msix_num; - - return 0; - -request_irq_err: - while (--i + 1) { - vector = &ring_mgt->vectors[i]; + return scnprintf(buf, PAGE_SIZE, + "tx_packets : %llu\n" + "tx_bytes : %llu\n" + "tx_dropped : %llu\n" + "rx_packets : %llu\n" + "rx_bytes : %llu\n" + "rx_dropped : %llu\n" + "rx_broadcast : %llu\n" + "rx_multicast : %llu\n", + stats.tx_packets, stats.tx_bytes, stats.tx_dropped, + stats.rx_packets, stats.rx_bytes, stats.rx_dropped, + stats.broadcast, stats.multicast + ); +} + +static ssize_t nbl_serv_vf_tx_rate_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, tx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + int rate = net_resource_mgt->vf_info[vf_info->vf_id].meter_tx_rate; - irq_num = msix_info->msix_entries[i].vector; - devm_free_irq(dev, irq_num, vector); - } - return ret; + return sprintf(buf, "max tx rate(Mbps): %d\n", rate); } -static void nbl_serv_free_net_irq(void *priv, struct nbl_msix_info_param *msix_info) +static ssize_t nbl_serv_vf_tx_rate_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); - struct device *dev = NBL_COMMON_TO_DEV(common); - struct nbl_serv_vector *vector; - u32 irq_num; - int i; + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, tx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + int tx_rate = 0, ret = 0; - for (i = 0; i < ring_mgt->tx_ring_num; i++) { - vector = &ring_mgt->vectors[i]; + ret = kstrtos32(buf, 0, &tx_rate); + if (ret) + return -EINVAL; - irq_num = msix_info->msix_entries[i].vector; - devm_free_irq(dev, irq_num, vector); - } + ret = nbl_serv_set_vf_tx_rate(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, tx_rate, 0, false); + return ret ? ret : count; } -static u16 nbl_serv_get_global_vector(void *priv, u16 local_vector_id) +static ssize_t nbl_serv_vf_tx_burst_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, tx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + int burst = net_resource_mgt->vf_info[vf_info->vf_id].meter_tx_burst; - return disp_ops->get_global_vector(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_VSI_ID(common), local_vector_id); + return sprintf(buf, "max burst depth %d\n", burst); } -static u16 nbl_serv_get_msix_entry_id(void *priv, u16 local_vector_id) +static ssize_t nbl_serv_vf_tx_burst_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, tx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + int burst = 0, ret = 0; + int rate = net_resource_mgt->vf_info[vf_info->vf_id].meter_tx_rate; - return disp_ops->get_msix_entry_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_VSI_ID(common), local_vector_id); + ret = kstrtos32(buf, 0, &burst); + if (ret) + return -EINVAL; + if (burst >= NBL_MAX_BURST) + return -EINVAL; + + if (rate || !burst) + ret = nbl_serv_set_vf_tx_rate(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, rate, burst, true); + else + return -EINVAL; + + return ret ? ret : count; } -static u16 nbl_serv_get_vsi_id(void *priv, u16 func_id, u16 type) +static ssize_t nbl_serv_vf_rx_rate_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, rx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + int rate = net_resource_mgt->vf_info[vf_info->vf_id].meter_rx_rate; - return disp_ops->get_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id, type); + return sprintf(buf, "max rx rate(Mbps): %d\n", rate); } -static void nbl_serv_get_eth_id(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id) +static ssize_t nbl_serv_vf_rx_rate_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, rx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + int rx_rate = 0, ret = 0; + + ret = kstrtos32(buf, 0, &rx_rate); + if (ret) + return -EINVAL; - return disp_ops->get_eth_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, eth_mode, eth_id); + ret = nbl_serv_set_vf_rx_rate(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, rx_rate, 0, false); + return ret ? ret : count; } -static void nbl_serv_get_user_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +static ssize_t nbl_serv_vf_rx_burst_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, rx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + int burst = net_resource_mgt->vf_info[vf_info->vf_id].meter_rx_burst; - disp_ops->get_user_queue_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - queue_num, queue_size, vsi_id); + return sprintf(buf, "max burst depth %d\n", burst); } -static int nbl_serv_enable_lag_protocol(void *priv, u16 vsi_id, bool lag_en) +static ssize_t nbl_serv_vf_rx_burst_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - int ret = 0; + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, rx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + int burst = 0, ret = 0; + int rate = net_resource_mgt->vf_info[vf_info->vf_id].meter_rx_rate; - if (lag_en) - ret = disp_ops->add_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + ret = kstrtos32(buf, 0, &burst); + if (ret) + return -EINVAL; + if (burst > NBL_MAX_BURST) + return -EINVAL; + + if (rate || !burst) + ret = nbl_serv_set_vf_rx_rate(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, rate, burst, true); else - disp_ops->del_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + return -EINVAL; - return ret; + return ret ? ret : count; } -static void nbl_serv_net_stats_update_task(struct work_struct *work) +static ssize_t nbl_serv_vf_config_show(struct kobject *kobj, struct attribute *attr, char *buf) { - struct nbl_serv_net_resource_mgt *serv_net_resource_mgt = - container_of(work, struct nbl_serv_net_resource_mgt, net_stats_update); - struct nbl_service_mgt *serv_mgt; + struct kobj_attribute *kattr = container_of(attr, struct kobj_attribute, attr); - serv_mgt = serv_net_resource_mgt->serv_mgt; + if (kattr->show) + return kattr->show(kobj, kattr, buf); - nbl_serv_update_stats(serv_mgt, false); + return -EIO; } -static void nbl_serv_rx_mode_async_task(struct work_struct *work) +static ssize_t nbl_serv_vf_config_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) { - struct nbl_serv_net_resource_mgt *serv_net_resource_mgt = - container_of(work, struct nbl_serv_net_resource_mgt, rx_mode_async); + struct kobj_attribute *kattr = container_of(attr, struct kobj_attribute, attr); - if (serv_net_resource_mgt->rxmode_set_required & NBL_FLAG_AQ_MODIFY_MAC_FILTER) - nbl_modify_submacs(serv_net_resource_mgt); + if (kattr->show) + return kattr->store(kobj, kattr, buf, count); - if (serv_net_resource_mgt->rxmode_set_required & NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE) - nbl_modify_promisc_mode(serv_net_resource_mgt); + return -EIO; } -static void nbl_serv_net_task_service_timer(struct timer_list *t) +static void dir_release(struct kobject *kobj) { - struct nbl_serv_net_resource_mgt *net_resource_mgt = - from_timer(net_resource_mgt, t, serv_timer); - - mod_timer(&net_resource_mgt->serv_timer, - round_jiffies(net_resource_mgt->serv_timer_period + jiffies)); - nbl_common_queue_work(&net_resource_mgt->net_stats_update, false, false); + //TODO } -static void nbl_serv_setup_flow_mgt(struct nbl_serv_flow_mgt *flow_mgt) -{ - INIT_LIST_HEAD(&flow_mgt->vlan_list); - INIT_LIST_HEAD(&flow_mgt->submac_list); -} +static struct kobj_attribute nbl_attr_vf_mac = { + .attr = {.name = "mac", + .mode = 0644}, + .show = nbl_serv_vf_mac_show, + .store = nbl_serv_vf_mac_store, +}; -static void nbl_serv_register_restore_netdev_queue(struct nbl_service_mgt *serv_mgt) -{ - struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); +static struct kobj_attribute nbl_attr_vf_vlan = { + .attr = {.name = "vlan", + .mode = 0644}, + .show = nbl_serv_vf_vlan_show, + .store = nbl_serv_vf_vlan_store, +}; - if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), - NBL_CHAN_TYPE_MAILBOX)) - return; +static struct kobj_attribute nbl_attr_vf_trust = { + .attr = {.name = "trust", + .mode = 0644}, + .show = nbl_serv_vf_trust_show, + .store = nbl_serv_vf_trust_store, +}; - chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), - NBL_CHAN_MSG_RESTORE_NETDEV_QUEUE, - nbl_serv_chan_restore_netdev_queue_resp, serv_mgt); +static struct kobj_attribute nbl_attr_vf_max_tx_rate = { + .attr = {.name = "max_tx_rate", + .mode = 0644}, + .show = nbl_serv_vf_max_tx_rate_show, + .store = nbl_serv_vf_max_tx_rate_store, +}; - chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), - NBL_CHAN_MSG_RESTART_NETDEV_QUEUE, - nbl_serv_chan_restart_netdev_queue_resp, serv_mgt); -} +static struct kobj_attribute nbl_attr_vf_spoofcheck = { + .attr = {.name = "spoofcheck", + .mode = 0644}, + .show = nbl_serv_vf_spoofchk_show, + .store = nbl_serv_vf_spoofchk_store, +}; -static void nbl_serv_remove_net_resource_mgt(void *priv) -{ - struct device *dev; - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_serv_net_resource_mgt *net_resource_mgt; - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); +static struct kobj_attribute nbl_attr_vf_tx_rate = { + .attr = {.name = "rate", + .mode = 0644}, + .show = nbl_serv_vf_tx_rate_show, + .store = nbl_serv_vf_tx_rate_store, +}; - net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - dev = NBL_COMMON_TO_DEV(common); +static struct kobj_attribute nbl_attr_vf_tx_burst = { + .attr = {.name = "burst", + .mode = 0644}, + .show = nbl_serv_vf_tx_burst_show, + .store = nbl_serv_vf_tx_burst_store, +}; - if (net_resource_mgt) { - del_timer_sync(&net_resource_mgt->serv_timer); - nbl_common_release_task(&net_resource_mgt->rx_mode_async); - nbl_common_release_task(&net_resource_mgt->net_stats_update); - nbl_common_release_task(&net_resource_mgt->tx_timeout); - nbl_free_filter(net_resource_mgt); - devm_kfree(dev, net_resource_mgt); - NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt) = NULL; - } -} +static struct kobj_attribute nbl_attr_vf_rx_rate = { + .attr = {.name = "rate", + .mode = 0644}, + .show = nbl_serv_vf_rx_rate_show, + .store = nbl_serv_vf_rx_rate_store, +}; -static int nbl_serv_phy_init(struct nbl_serv_net_resource_mgt *net_resource_mgt) -{ - struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - u8 eth_id = NBL_COMMON_TO_ETH_ID(common); - struct nbl_dispatch_ops *disp_ops; - int ret = 0; +static struct kobj_attribute nbl_attr_vf_rx_burst = { + .attr = {.name = "burst", + .mode = 0644}, + .show = nbl_serv_vf_rx_burst_show, + .store = nbl_serv_vf_rx_burst_store, +}; - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); +static struct kobj_attribute nbl_attr_vf_link_state = { + .attr = {.name = "link_state", + .mode = 0644}, + .show = nbl_serv_vf_link_state_show, + .store = nbl_serv_vf_link_state_store, +}; - disp_ops->get_phy_caps(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - eth_id, &net_resource_mgt->phy_caps); +static struct kobj_attribute nbl_attr_vf_stats = { + .attr = {.name = "stats", + .mode = 0444}, + .show = nbl_serv_vf_stats_show, +}; - disp_ops->get_phy_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - eth_id, &net_resource_mgt->phy_state); +static struct attribute *nbl_vf_config_attrs[] = { + &nbl_attr_vf_mac.attr, + &nbl_attr_vf_vlan.attr, + &nbl_attr_vf_trust.attr, + &nbl_attr_vf_max_tx_rate.attr, + &nbl_attr_vf_spoofcheck.attr, + &nbl_attr_vf_link_state.attr, + &nbl_attr_vf_stats.attr, + NULL, +}; - return ret; -} +ATTRIBUTE_GROUPS(nbl_vf_config); -static int nbl_serv_setup_net_resource_mgt(void *priv, struct net_device *netdev) -{ - struct device *dev; - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_serv_net_resource_mgt *net_resource_mgt; - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); +static struct attribute *nbl_vf_tx_config_attrs[] = { + &nbl_attr_vf_tx_rate.attr, + &nbl_attr_vf_tx_burst.attr, + NULL, +}; - dev = NBL_COMMON_TO_DEV(common); - net_resource_mgt = devm_kzalloc(dev, sizeof(struct nbl_serv_net_resource_mgt), GFP_KERNEL); - if (!net_resource_mgt) - return -ENOMEM; +ATTRIBUTE_GROUPS(nbl_vf_tx_config); - net_resource_mgt->netdev = netdev; - net_resource_mgt->serv_mgt = serv_mgt; - NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt) = net_resource_mgt; +static struct attribute *nbl_vf_rx_config_attrs[] = { + &nbl_attr_vf_rx_rate.attr, + &nbl_attr_vf_rx_burst.attr, + NULL, +}; - nbl_serv_phy_init(net_resource_mgt); - nbl_serv_register_restore_netdev_queue(serv_mgt); - timer_setup(&net_resource_mgt->serv_timer, nbl_serv_net_task_service_timer, 0); +ATTRIBUTE_GROUPS(nbl_vf_rx_config); - net_resource_mgt->serv_timer_period = HZ; - nbl_common_alloc_task(&net_resource_mgt->rx_mode_async, nbl_serv_rx_mode_async_task); - nbl_common_alloc_task(&net_resource_mgt->net_stats_update, nbl_serv_net_stats_update_task); - nbl_common_alloc_task(&net_resource_mgt->tx_timeout, nbl_serv_handle_tx_timeout); +static const struct sysfs_ops nbl_sysfs_ops_vf = { + .show = nbl_serv_vf_config_show, + .store = nbl_serv_vf_config_store, +}; - INIT_LIST_HEAD(&net_resource_mgt->mac_filter_list); - INIT_LIST_HEAD(&net_resource_mgt->indr_dev_priv_list); - spin_lock_init(&net_resource_mgt->mac_vlan_list_lock); - spin_lock_init(&net_resource_mgt->current_netdev_promisc_flags_lock); - net_resource_mgt->get_stats_jiffies = jiffies; +static const struct kobj_type nbl_kobj_vf_type = { + .sysfs_ops = &nbl_sysfs_ops_vf, + .default_groups = nbl_vf_config_groups, +}; - mod_timer(&net_resource_mgt->serv_timer, - round_jiffies(jiffies + net_resource_mgt->serv_timer_period)); +static const struct kobj_type nbl_kobj_dir = { + .release = dir_release, +}; - return 0; -} +static const struct kobj_type nbl_kobj_vf_tx_type = { + .sysfs_ops = &nbl_sysfs_ops_vf, + .default_groups = nbl_vf_tx_config_groups, +}; -static int nbl_serv_enable_adminq_irq(void *priv, u16 vector_id, bool enable_msix) +static const struct kobj_type nbl_kobj_vf_rx_type = { + .sysfs_ops = &nbl_sysfs_ops_vf, + .default_groups = nbl_vf_rx_config_groups, +}; + +static int nbl_serv_setup_vf_sysfs(struct nbl_service_mgt *serv_mgt) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops; - int ret = 0; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; + int i = 0, ret = 0; + int index = 0; - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + for (i = 0; i < net_resource_mgt->num_vfs; i++) { + index = i; + vf_info[i].priv = serv_mgt; + vf_info[i].vf_id = (u16)i; - ret = disp_ops->enable_adminq_irq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - vector_id, enable_msix); - if (ret) - return -EIO; + ret = kobject_init_and_add(&vf_info[i].kobj, &nbl_kobj_vf_type, + net_resource_mgt->sriov_kobj, "%d", i); + if (ret) + goto err; + + ret = kobject_init_and_add(&vf_info[i].meters_kobj, &nbl_kobj_dir, + &vf_info[i].kobj, "meters"); + if (ret) + goto err; + ret = kobject_init_and_add(&vf_info[i].rx_kobj, &nbl_kobj_dir, + &vf_info[i].meters_kobj, "rx"); + if (ret) + goto err; + ret = kobject_init_and_add(&vf_info[i].tx_kobj, &nbl_kobj_dir, + &vf_info[i].meters_kobj, "tx"); + if (ret) + goto err; + ret = kobject_init_and_add(&vf_info[i].rx_bps_kobj, &nbl_kobj_vf_rx_type, + &vf_info[i].rx_kobj, "bps"); + if (ret) + goto err; + ret = kobject_init_and_add(&vf_info[i].tx_bps_kobj, &nbl_kobj_vf_tx_type, + &vf_info[i].tx_kobj, "bps"); + if (ret) + goto err; + } return 0; -} -static u8 __iomem *nbl_serv_get_hw_addr(void *priv, size_t *size) -{ - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); +err: + for (i = 0; i <= index; i++) { + if (vf_info[i].tx_bps_kobj.state_initialized) + kobject_put(&vf_info[i].tx_bps_kobj); + if (vf_info[i].rx_bps_kobj.state_initialized) + kobject_put(&vf_info[i].rx_bps_kobj); + if (vf_info[i].tx_kobj.state_initialized) + kobject_put(&vf_info[i].tx_kobj); + if (vf_info[i].tx_kobj.state_initialized) + kobject_put(&vf_info[i].tx_kobj); + if (vf_info[i].tx_kobj.state_initialized) + kobject_put(&vf_info[i].tx_kobj); + if (vf_info[i].tx_kobj.state_initialized) + kobject_put(&vf_info[i].tx_kobj); + } - return disp_ops->get_hw_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), size); + return 0; } -static u64 nbl_serv_get_real_hw_addr(void *priv, u16 vsi_id) +static void nbl_serv_remove_vf_sysfs(struct nbl_service_mgt *serv_mgt) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; + int i = 0; - return disp_ops->get_real_hw_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + for (i = 0; i < net_resource_mgt->num_vfs; i++) { + kobject_put(&vf_info[i].tx_bps_kobj); + kobject_put(&vf_info[i].rx_bps_kobj); + kobject_put(&vf_info[i].tx_kobj); + kobject_put(&vf_info[i].rx_kobj); + kobject_put(&vf_info[i].meters_kobj); + kobject_put(&vf_info[i].kobj); + } } -static u16 nbl_serv_get_function_id(void *priv, u16 vsi_id) -{ - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - - return disp_ops->get_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); -} -static void nbl_serv_get_real_bdf(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function) +static int nbl_serv_setup_vf_config(void *priv, int num_vfs, bool is_flush) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u16 func_id = U16_MAX; + u16 vlan_tci; + bool should_notify; + int i, ret = 0; - return disp_ops->get_real_bdf(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, - bus, dev, function); -} - -static int nbl_serv_get_devlink_info(struct devlink *devlink, struct devlink_info_req *req, - struct netlink_ext_ack *extack) -{ - struct nbl_devlink_priv *priv = devlink_priv(devlink); - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv->priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - char firmware_version[NBL_DEVLINK_INFO_FRIMWARE_VERSION_LEN] = {0}; - int ret = 0; + net_resource_mgt->num_vfs = num_vfs; - disp_ops->get_firmware_version(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - firmware_version, sizeof(firmware_version)); - if (ret) - return ret; + for (i = 0; i < net_resource_mgt->num_vfs; i++) { + func_id = nbl_serv_get_vf_function_id(serv_mgt, i); + if (func_id == U16_MAX) { + nbl_err(common, NBL_DEBUG_MAIN, "vf id %d invalid\n", i); + return -EINVAL; + } - ret = devlink_info_version_fixed_put(req, "FW Version:", firmware_version); - if (ret) - return ret; + disp_ops->register_func_mac(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vf_info[i].mac, func_id); - return ret; -} + vlan_tci = vf_info[i].vlan | (u16)(vf_info[i].vlan_qos << VLAN_PRIO_SHIFT); + ret = disp_ops->register_func_vlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id, + vlan_tci, vf_info[i].vlan_proto, + &should_notify); + if (ret) + break; -/* Why do we need this? - * Because the original function in kernel cannot handle when we set subvendor and subdevice - * to be 0xFFFF, so write a correct one. - */ -bool nbl_serv_pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record) -{ - struct pci_dev *pdev = to_pci_dev(context->dev); - struct nbl_serv_pldm_pci_record_id id = { - .vendor = PCI_ANY_ID, - .device = PCI_ANY_ID, - .subsystem_vendor = PCI_ANY_ID, - .subsystem_device = PCI_ANY_ID, - }; - struct pldmfw_desc_tlv *desc; - bool ret; + ret = disp_ops->register_func_trust(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, vf_info[i].trusted, + &should_notify); - list_for_each_entry(desc, &record->descs, entry) { - u16 value; - u16 *ptr; + if (ret) + break; - switch (desc->type) { - case PLDM_DESC_ID_PCI_VENDOR_ID: - ptr = &id.vendor; + ret = disp_ops->register_func_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id, + vf_info[i].max_tx_rate); + if (ret) break; - case PLDM_DESC_ID_PCI_DEVICE_ID: - ptr = &id.device; + + ret = disp_ops->set_tx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, vf_info[i].max_tx_rate, 0); + if (ret) break; - case PLDM_DESC_ID_PCI_SUBVENDOR_ID: - ptr = &id.subsystem_vendor; + + ret = disp_ops->set_rx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, vf_info[i].meter_rx_rate, 0); + if (ret) break; - case PLDM_DESC_ID_PCI_SUBDEV_ID: - ptr = &id.subsystem_device; + + ret = disp_ops->set_vf_spoof_check(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), i, + vf_info[i].spoof_check); + if (ret) break; - default: - /* Skip unrelated TLVs */ - continue; - } - value = get_unaligned_le16(desc->data); - /* A value of zero for one of the descriptors is sometimes - * used when the record should ignore this field when matching - * device. For example if the record applies to any subsystem - * device or vendor. + /* No need to notify vf, vf will get link forced when probe, + * Here we only flush the config. */ - if (value) - *ptr = (int)value; - else - *ptr = PCI_ANY_ID; + ret = disp_ops->register_func_link_forced(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, vf_info[i].state, + &should_notify); + if (ret) + break; } - if ((id.vendor == (u16)PCI_ANY_ID || id.vendor == pdev->vendor) && - (id.device == (u16)PCI_ANY_ID || id.device == pdev->device) && - (id.subsystem_vendor == (u16)PCI_ANY_ID || - id.subsystem_vendor == pdev->subsystem_vendor) && - (id.subsystem_device == (u16)PCI_ANY_ID || - id.subsystem_device == pdev->subsystem_device)) - ret = true; - else - ret = false; + if (!ret && net_resource_mgt->sriov_kobj && !is_flush) + ret = nbl_serv_setup_vf_sysfs(serv_mgt); + + if (ret) + net_resource_mgt->num_vfs = 0; return ret; } -static int nbl_serv_send_package_data(struct pldmfw *context, const u8 *data, u16 length) +static void nbl_serv_remove_vf_config(void *priv) { - struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, - context); - struct nbl_service_mgt *serv_mgt = priv->serv_mgt; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - int ret = 0; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; + int i; - nbl_info(common, NBL_DEBUG_DEVLINK, "Send package data"); + nbl_serv_remove_vf_sysfs(serv_mgt); - ret = disp_ops->flash_lock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); - if (ret) - return ret; + for (i = 0; i < net_resource_mgt->num_vfs; i++) + memset(&vf_info[i], 0, sizeof(vf_info[i])); - ret = disp_ops->flash_prepare(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); - if (ret) - disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + nbl_serv_setup_vf_config(priv, net_resource_mgt->num_vfs, true); - return 0; + net_resource_mgt->num_vfs = 0; } -static int nbl_serv_send_component_table(struct pldmfw *context, struct pldmfw_component *component, - u8 transfer_flags) +static void nbl_serv_register_dev_name(void *priv, u16 vsi_id, char *name) { - struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, - context); - struct nbl_service_mgt *serv_mgt = priv->serv_mgt; - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - - nbl_info(common, NBL_DEBUG_DEVLINK, "Send component table, id %d", component->identifier); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - return 0; + disp_ops->register_dev_name(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, name); } -static int nbl_serv_flash_component(struct pldmfw *context, struct pldmfw_component *component) +static void nbl_serv_get_dev_name(void *priv, u16 vsi_id, char *name) { - struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, - context); - struct nbl_service_mgt *serv_mgt = priv->serv_mgt; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - u32 component_crc, calculated_crc; - size_t data_len = component->component_size - NBL_DEVLINK_FLASH_COMPONENT_CRC_SIZE; - int ret = 0; - - nbl_info(common, NBL_DEBUG_DEVLINK, "Flash component table, id %d", component->identifier); - - component_crc = *(u32 *)((u8 *)component->component_data + data_len); - calculated_crc = crc32_le(~0, component->component_data, data_len) ^ ~0; - if (component_crc != calculated_crc) { - nbl_err(common, NBL_DEBUG_DEVLINK, "Flash component crc error"); - disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); - return -EFAULT; - } - - ret = disp_ops->flash_image(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), component->identifier, - component->component_data, data_len); - if (ret) - disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); - return ret; + disp_ops->get_dev_name(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, name); } -static int nbl_serv_finalize_update(struct pldmfw *context) +static int nbl_serv_setup_vf_resource(void *priv, int num_vfs) { - struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, - context); - struct nbl_service_mgt *serv_mgt = priv->serv_mgt; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - int ret = 0; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_vf_info *vf_info; + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + int i; - nbl_info(common, NBL_DEBUG_DEVLINK, "Flash activate"); + net_resource_mgt->total_vfs = num_vfs; - ret = disp_ops->flash_activate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + net_resource_mgt->vf_info = devm_kcalloc(dev, net_resource_mgt->total_vfs, + sizeof(struct nbl_serv_vf_info), GFP_KERNEL); + if (!net_resource_mgt->vf_info) + return -ENOMEM; - disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); - return ret; -} + vf_info = net_resource_mgt->vf_info; + for (i = 0; i < net_resource_mgt->total_vfs; i++) { + vf_info[i].state = IFLA_VF_LINK_STATE_AUTO; + vf_info[i].spoof_check = false; + } -static const struct pldmfw_ops nbl_update_fw_ops = { - .match_record = nbl_serv_pldmfw_op_pci_match_record, - .send_package_data = nbl_serv_send_package_data, - .send_component_table = nbl_serv_send_component_table, - .flash_component = nbl_serv_flash_component, - .finalize_update = nbl_serv_finalize_update, -}; + net_resource_mgt->sriov_kobj = kobject_create_and_add("sriov", &dev->kobj); + if (!net_resource_mgt->sriov_kobj) + nbl_warn(NBL_SERV_MGT_TO_COMMON(serv_mgt), NBL_DEBUG_MAIN, + "Fail to create sriov sysfs"); + + return 0; +} -static int nbl_serv_update_firmware(struct nbl_service_mgt *serv_mgt, const struct firmware *fw, - struct netlink_ext_ack *extack) +static void nbl_serv_remove_vf_resource(void *priv) { - struct nbl_serv_update_fw_priv priv = {0}; - int ret = 0; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); - priv.context.ops = &nbl_update_fw_ops; - priv.context.dev = NBL_SERV_MGT_TO_DEV(serv_mgt); - priv.extack = extack; - priv.serv_mgt = serv_mgt; + nbl_serv_remove_vf_config(priv); - ret = pldmfw_flash_image(&priv.context, fw); + kobject_put(net_resource_mgt->sriov_kobj); - return ret; + if (net_resource_mgt->vf_info) { + devm_kfree(dev, net_resource_mgt->vf_info); + net_resource_mgt->vf_info = NULL; + } } -static int nbl_serv_update_devlink_flash(struct devlink *devlink, - struct devlink_flash_update_params *params, - struct netlink_ext_ack *extack) +static void nbl_serv_cfg_fd_update_event(void *priv, bool enable) { - struct nbl_devlink_priv *priv = devlink_priv(devlink); - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv->priv; - int ret = 0; - - devlink_flash_update_status_notify(devlink, "Flash start", NULL, 0, 0); - - ret = nbl_serv_update_firmware(serv_mgt, params->fw, extack); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - if (ret) - devlink_flash_update_status_notify(devlink, "Flash failed", NULL, 0, 0); - else - devlink_flash_update_status_notify(devlink, - "Flash finished, please reboot to take effect", - NULL, 0, 0); - return ret; + disp_ops->cfg_fd_update_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), enable); } -static u32 nbl_serv_get_adminq_tx_buf_size(void *priv) +static void nbl_serv_get_xdp_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - return disp_ops->get_adminq_tx_buf_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + disp_ops->get_xdp_queue_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), queue_num, queue_size, + vsi_id); } -static bool nbl_serv_check_fw_heartbeat(void *priv) +static void nbl_serv_assgin_xdp_prog(struct net_device *netdev, struct bpf_prog *prog) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct bpf_prog *old_prog; - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + old_prog = xchg(&ring_mgt->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); - return disp_ops->check_fw_heartbeat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + disp_ops->set_rings_xdp_prog(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), (void *)prog); } -static bool nbl_serv_check_fw_reset(void *priv) +static int nbl_serv_setup_xdp_prog(struct net_device *netdev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + int was_running; + int err; - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + if (prog && test_bit(NBL_USER, adapter->state)) + return -EIO; + + if (!ring_mgt->vsi_info[NBL_VSI_XDP].ring_num) + return -ENOSPC; + + was_running = netif_running(netdev); + if (was_running) { + err = nbl_serv_netdev_stop(netdev); + if (err) { + netdev_err(netdev, "Netdev stop failed while setup prog\n"); + return err; + } + } + + nbl_serv_assgin_xdp_prog(netdev, prog); + + if (was_running) { + err = nbl_serv_netdev_open(netdev); + if (err) { + netdev_err(netdev, "Netdev open failed after setup prog\n"); + return err; + } + } + + if (prog) + set_bit(NBL_XDP, adapter->state); + else + clear_bit(NBL_XDP, adapter->state); - return disp_ops->check_fw_reset(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + return 0; } -static void nbl_serv_get_common_irq_num(void *priv, struct nbl_common_irq_num *irq_num) +static int nbl_serv_set_xdp(struct net_device *netdev, struct netdev_bpf *xdp) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - - irq_num->mbx_irq_num = disp_ops->get_mbx_irq_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + switch (xdp->command) { + case XDP_SETUP_PROG: + return nbl_serv_setup_xdp_prog(netdev, xdp->prog, xdp->extack); + default: + return -EINVAL; + } } -static void nbl_serv_get_ctrl_irq_num(void *priv, struct nbl_ctrl_irq_num *irq_num) +static void nbl_serv_set_hw_status(void *priv, enum nbl_hw_status hw_status) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - irq_num->adminq_irq_num = disp_ops->get_adminq_irq_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); - irq_num->abnormal_irq_num = - disp_ops->get_abnormal_irq_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + disp_ops->set_hw_status(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), hw_status); } -static u32 nbl_serv_get_chip_temperature(void *priv) +static void nbl_serv_get_active_func_bitmaps(void *priv, unsigned long *bitmap, int max_func) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - return disp_ops->get_chip_temperature(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + disp_ops->get_active_func_bitmaps(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), bitmap, max_func); } -static u32 nbl_serv_get_chip_temperature_max(void *priv) +static void nbl_serv_get_rdma_rate(void *priv, int *rdma_rate) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - return disp_ops->get_chip_temperature_max(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + *rdma_rate = qos_info->rdma_rate; } -static u32 nbl_serv_get_chip_temperature_crit(void *priv) +static void nbl_serv_get_net_rate(void *priv, int *net_rate) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - return disp_ops->get_chip_temperature_crit(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + *net_rate = qos_info->net_rate; } -static int nbl_serv_get_module_temperature(void *priv, u8 eth_id, enum nbl_module_temp_type type) +static void nbl_serv_get_rdma_bw(void *priv, int *rdma_bw) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - return disp_ops->get_module_temperature(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, type); + *rdma_bw = qos_info->rdma_bw; } -static int nbl_serv_get_port_attributes(void *priv) +static int nbl_serv_configure_rdma_bw(void *priv, u8 eth_id, int rdma_bw) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops; - int ret = 0; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret; - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + ret = disp_ops->configure_rdma_bw(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, rdma_bw); + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "configure rdma bw failed ret %d\n", ret); + return ret; + } - ret = disp_ops->get_port_attributes(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); - if (ret) - return -EIO; + qos_info->rdma_bw = rdma_bw; return 0; } -static int nbl_serv_update_ring_num(void *priv) +static ssize_t nbl_serv_pfc_show(void *priv, u8 eth_id, char *buf) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - return disp_ops->update_ring_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + return scnprintf(buf, PAGE_SIZE, "%d,%d,%d,%d,%d,%d,%d,%d\n", + qos_info->pfc[0], qos_info->pfc[1], + qos_info->pfc[2], qos_info->pfc[3], + qos_info->pfc[4], qos_info->pfc[5], + qos_info->pfc[6], qos_info->pfc[7]); } -static int nbl_serv_enable_port(void *priv, bool enable) +static int nbl_serv_configure_pfc(void *priv, u8 eth_id, u8 *pfc) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops; - int ret = 0; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + bool changed = false; + int ret; + int i; - disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) { + if (pfc[i] != qos_info->pfc[i]) { + changed = true; + break; + } + } - ret = disp_ops->enable_port(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), enable); - if (ret) - return -EIO; + if (!changed) + return 0; - return 0; -} + ret = disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, pfc, net_resource_mgt->qos_info.trust_mode, + net_resource_mgt->qos_info.dscp2prio_map); -static int nbl_serv_set_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) -{ - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "configure pfc failed ret %d\n", ret); + return ret; + } - if (NBL_COMMON_TO_VF_CAP(common)) - return 0; - else - return disp_ops->set_eth_mac_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - mac, eth_id); + memcpy(net_resource_mgt->qos_info.pfc, pfc, NBL_MAX_PFC_PRIORITIES); + + return ret; } -static void nbl_serv_adapt_desc_gother(void *priv) +static ssize_t nbl_serv_trust_mode_show(void *priv, u8 eth_id, char *buf) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - disp_ops->adapt_desc_gother(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + return scnprintf(buf, PAGE_SIZE, "%s\n", + qos_info->trust_mode == NBL_TRUST_MODE_DSCP ? "dscp" : "802.1p"); } -static void nbl_serv_process_flr(void *priv, u16 vfid) +static int nbl_serv_configure_trust(void *priv, u8 eth_id, u8 trust_mode) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret; - disp_ops->flr_clear_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); - disp_ops->flr_clear_flows(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); - disp_ops->flr_clear_interrupt(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); - disp_ops->flr_clear_net(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + if (net_resource_mgt->qos_info.trust_mode == trust_mode) + return 0; + + ret = disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, net_resource_mgt->qos_info.pfc, trust_mode, + net_resource_mgt->qos_info.dscp2prio_map); + + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "configure trust_mode failed ret %d\n", ret); + return ret; + } + + net_resource_mgt->qos_info.trust_mode = trust_mode; + + return ret; } -static void nbl_serv_recovery_abnormal(void *priv) +static ssize_t nbl_serv_dscp2prio_show(void *priv, u8 eth_id, char *buf) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + int len = 0; + int i; - disp_ops->unmask_all_interrupts(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + len += snprintf(buf + len, PAGE_SIZE - len, "dscp2prio mapping:\n"); + for (i = 0; i < NBL_DSCP_MAX; i++) + len += snprintf(buf + len, PAGE_SIZE - len, "\tprio:%d dscp:%d,\n", + qos_info->dscp2prio_map[i], i); + + return len; } -static void nbl_serv_keep_alive(void *priv) +static int nbl_serv_configure_dscp2prio(void *priv, u8 eth_id, const char *buf, size_t count) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + char cmd[8]; + int dscp, prio, ret; + int i; - disp_ops->keep_alive(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + ret = sscanf(buf, "%7[^,], %d , %d", cmd, &dscp, &prio); + + if (strncmp(cmd, "set", 3) == 0) { + if (ret != 3 || dscp < 0 || dscp >= NBL_DSCP_MAX || prio < 0 || prio > 7) + return -EINVAL; + qos_info->dscp2prio_map[dscp] = prio; + } else if (strncmp(cmd, "del", 3) == 0) { + if (ret != 3 || dscp < 0 || dscp >= NBL_DSCP_MAX) + return -EINVAL; + if (qos_info->dscp2prio_map[dscp] == 0) + return -EINVAL; + qos_info->dscp2prio_map[dscp] = 0; + } else if (strncmp(cmd, "flush", 5) == 0) { + for (i = 0; i < NBL_DSCP_MAX; i++) + qos_info->dscp2prio_map[i] = i / NBL_MAX_PFC_PRIORITIES; + } else { + return -EINVAL; + } + + ret = disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, qos_info->pfc, + qos_info->trust_mode, qos_info->dscp2prio_map); + + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "configure dscp2prio failed ret %d\n", ret); + return ret; + } + + return count; } -static int nbl_serv_register_vsi_info(void *priv, u16 vsi_index, u16 vsi_id, - u16 queue_offset, u16 queue_num) +static int nbl_serv_set_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int xoff, int xon) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret; - ring_mgt->vsi_info[vsi_index].vsi_index = vsi_index; - ring_mgt->vsi_info[vsi_index].vsi_id = vsi_id; - ring_mgt->vsi_info[vsi_index].ring_offset = queue_offset; - ring_mgt->vsi_info[vsi_index].ring_num = queue_num; - if (disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_ITR_DYNAMIC)) - ring_mgt->vsi_info[vsi_index].itr_dynamic = true; + ret = disp_ops->set_pfc_buffer_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, prio, xoff, xon); + + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "configure pfc buffer size failed ret %d\n", ret); + return ret; + } - disp_ops->register_vsi_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - vsi_index, queue_offset, queue_num); + qos_info->buffer_sizes[prio][0] = xoff; + qos_info->buffer_sizes[prio][1] = xon; - return disp_ops->register_vsi2q(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - vsi_index, vsi_id, queue_offset, queue_num); + return ret; } -static int nbl_serv_st_open(struct inode *inode, struct file *filep) +static ssize_t nbl_serv_pfc_buffer_size_show(void *priv, u8 eth_id, char *buf) { - struct nbl_serv_st_mgt *p = container_of(inode->i_cdev, struct nbl_serv_st_mgt, cdev); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + int prio; + ssize_t count = 0; - filep->private_data = p; + for (prio = 0; prio < NBL_MAX_PFC_PRIORITIES; prio++) + count += snprintf(buf + count, PAGE_SIZE - count, "prio %d, xoff %d, xon %d\n", + prio, qos_info->buffer_sizes[prio][0], + qos_info->buffer_sizes[prio][1]); - return 0; + return count; } -static ssize_t nbl_serv_st_write(struct file *file, const char __user *ubuf, - size_t size, loff_t *ppos) +static u8 nbl_serv_dcb_get_num_tc(struct net_device *netdev, struct ieee_ets *ets) { - return 0; -} + bool tc_unused = false; + u8 num_tc = 0; + u8 ret = 0; + int i; -static ssize_t nbl_serv_st_read(struct file *file, char __user *ubuf, size_t size, loff_t *ppos) -{ - return 0; -} + /* Scan the ETS Config Priority Table to find traffic classes + * enabled and create a bitmask of enabled TCs + */ + for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) + num_tc |= BIT(ets->prio_tc[i]); + + /* Scan bitmask for contiguous TCs starting with TC0 */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (num_tc & BIT(i)) { + if (!tc_unused) { + ret++; + } else { + netdev_err(netdev, "Non-contiguous TCs - Disabling DCB\n"); + return 1; + } + } else { + tc_unused = true; + } + } -static int nbl_serv_st_release(struct inode *inode, struct file *filp) -{ - return 0; + /* There is always at least 1 TC */ + if (!ret) + ret = 1; + + return ret; } -static int nbl_serv_process_passthrough(struct nbl_service_mgt *serv_mgt, - unsigned int cmd, unsigned long arg) +static int nbl_serv_bwchk(struct net_device *netdev, struct ieee_ets *ets) { - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - struct nbl_passthrough_fw_cmd_param *param = NULL, *result = NULL; - int ret = 0; - - param = kzalloc(sizeof(*param), GFP_KERNEL); - if (!param) - goto alloc_param_fail; + u8 num_tc, total_bw = 0; + int i; - result = kzalloc(sizeof(*result), GFP_KERNEL); - if (!result) - goto alloc_result_fail; + num_tc = nbl_serv_dcb_get_num_tc(netdev, ets); - ret = copy_from_user(param, (void *)arg, _IOC_SIZE(cmd)); - if (ret) { - nbl_err(common, NBL_DEBUG_ST, "Bad access %d.\n", ret); - return ret; + /* no bandwidth checks required if there's only one TC, so assign + * all bandwidth to TC0 and return + */ + if (num_tc == 1) { + ets->tc_reco_bw[0] = NBL_TC_MAX_BW; + return 0; } - nbl_debug(common, NBL_DEBUG_ST, "Passthough opcode: %d\n", param->opcode); - - ret = disp_ops->passthrough_fw_cmd(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param, result); - if (ret) - goto passthrough_fail; + for (i = 0; i < num_tc; i++) + total_bw += ets->tc_reco_bw[i]; - ret = copy_to_user((void *)arg, result, _IOC_SIZE(cmd)); + if (!total_bw) { + ets->tc_reco_bw[0] = NBL_TC_MAX_BW; + } else if (total_bw != NBL_TC_MAX_BW) { + netdev_err(netdev, "Invalid config, total bandwidth must equal 100\n"); + return -EINVAL; + } -passthrough_fail: - kfree(result); -alloc_result_fail: - kfree(param); -alloc_param_fail: - return ret; + return 0; } -static long nbl_serv_st_unlock_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static int nbl_serv_ieee_setets(struct net_device *netdev, struct ieee_ets *ets) { - struct nbl_serv_st_mgt *st_mgt = file->private_data; - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)st_mgt->serv_mgt; - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - int ret = 0; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct ieee_ets ets_tmp = {0}; + int bwcfg = 0, bwrec = 0; + int ret; + int i; - if (_IOC_TYPE(cmd) != IOCTL_TYPE) { - nbl_err(common, NBL_DEBUG_ST, "cmd %u, bad magic 0x%x/0x%x.\n", - cmd, _IOC_TYPE(cmd), IOCTL_TYPE); - return -ENOTTY; + memcpy(&ets_tmp, ets, sizeof(ets_tmp)); + + if (nbl_serv_bwchk(netdev, &ets_tmp)) + return -EINVAL; + + for (i = 0; i < NBL_MAX_TC_NUM; i++) { + bwcfg += ets->tc_tx_bw[i]; + bwrec += ets->tc_reco_bw[i]; } - if (_IOC_DIR(cmd) & _IOC_READ) - ret = !access_ok((void __user *)arg, _IOC_SIZE(cmd)); - else if (_IOC_DIR(cmd) & _IOC_WRITE) - ret = !access_ok((void __user *)arg, _IOC_SIZE(cmd)); + if (!bwcfg) + ets_tmp.tc_tx_bw[0] = NBL_TC_MAX_BW; + + if (!bwrec) + ets_tmp.tc_reco_bw[0] = NBL_TC_MAX_BW; + + ret = disp_ops->set_tc_wgt(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), + ets_tmp.tc_tx_bw, NBL_MAX_TC_NUM); if (ret) { - nbl_err(common, NBL_DEBUG_ST, "Bad access.\n"); + nbl_err(common, NBL_DEBUG_MAIN, "set_tc_wgt failed ret %d\n", ret); return ret; } - switch (cmd) { - case IOCTL_PASSTHROUGH: - ret = nbl_serv_process_passthrough(serv_mgt, cmd, arg); - break; - default: - nbl_err(common, NBL_DEBUG_ST, "Unknown cmd %d.\n", cmd); - return -EFAULT; - } - - return ret; + memcpy(&qos_info->ets, &ets_tmp, sizeof(struct ieee_ets)); + return 0; } -static const struct file_operations st_ops = { - .owner = THIS_MODULE, - .open = nbl_serv_st_open, - .write = nbl_serv_st_write, - .read = nbl_serv_st_read, - .unlocked_ioctl = nbl_serv_st_unlock_ioctl, - .release = nbl_serv_st_release, -}; - -static int nbl_serv_alloc_subdev_id(struct nbl_software_tool_table *st_table) +static int nbl_serv_ieee_getets(struct net_device *netdev, struct ieee_ets *ets) { - int subdev_id; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - subdev_id = find_first_zero_bit(st_table->devid, NBL_ST_MAX_DEVICE_NUM); - if (subdev_id == NBL_ST_MAX_DEVICE_NUM) - return -ENOSPC; - set_bit(subdev_id, st_table->devid); + memcpy(ets, &qos_info->ets, sizeof(struct ieee_ets)); + ets->ets_cap = NBL_MAX_TC_NUM; + return 0; +} - return subdev_id; +static int nbl_serv_ieee_setpfc(struct net_device *netdev, struct ieee_pfc *pfc) +{ + return 0; } -static void nbl_serv_free_subdev_id(struct nbl_software_tool_table *st_table, int id) +static int nbl_serv_ieee_getpfc(struct net_device *netdev, struct ieee_pfc *pfc) { - clear_bit(id, st_table->devid); + return 0; } -static int nbl_serv_setup_st(void *priv, void *st_table_param) +static int nbl_serv_ieee_delapp(struct net_device *netdev, struct dcb_app *app) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_software_tool_table *st_table = (struct nbl_software_tool_table *)st_table_param; - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - struct nbl_serv_st_mgt *st_mgt = NBL_SERV_MGT_TO_ST_MGT(serv_mgt); - struct device *test_device; - char name[NBL_RESTOOL_NAME_LEN] = {0}; - dev_t devid; - int id, subdev_id, ret = 0; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + int ret; - id = NBL_COMMON_TO_BOARD_ID(common); + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || + app->protocol >= NBL_DSCP_MAX) + return -EINVAL; - subdev_id = nbl_serv_alloc_subdev_id(st_table); - if (subdev_id < 0) - goto alloc_subdev_id_fail; + if (qos_info->dscp2prio_map[app->protocol] != app->priority) + return -ENOENT; + + ret = dcb_ieee_delapp(netdev, app); + if (ret) + return ret; - devid = MKDEV(st_table->major, subdev_id); + qos_info->dscp2prio_map[app->protocol] = 0; + ret = disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(common), qos_info->pfc, + qos_info->trust_mode, qos_info->dscp2prio_map); - if (!NBL_COMMON_TO_PCI_FUNC_ID(common)) - snprintf(name, sizeof(name), "/nblst/nblst%04x_conf%d", - NBL_COMMON_TO_PDEV(common)->device, id); - else - snprintf(name, sizeof(name), "/nblst/nblst%04x_conf%d.%d", - NBL_COMMON_TO_PDEV(common)->device, id, NBL_COMMON_TO_PCI_FUNC_ID(common)); + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "delapp configure dscp2prio failed ret %d\n", ret); + return ret; + } - st_mgt = devm_kzalloc(NBL_COMMON_TO_DEV(common), sizeof(*st_mgt), GFP_KERNEL); - if (!st_mgt) - goto malloc_fail; + return 0; +} - st_mgt->serv_mgt = serv_mgt; +static int nbl_serv_ieee_setapp(struct net_device *netdev, struct dcb_app *app) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + int ret; - st_mgt->major = MAJOR(devid); - st_mgt->minor = MINOR(devid); - st_mgt->devno = devid; - st_mgt->subdev_id = subdev_id; + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || + app->protocol >= NBL_DSCP_MAX) + return -EINVAL; - cdev_init(&st_mgt->cdev, &st_ops); - ret = cdev_add(&st_mgt->cdev, devid, 1); + if (qos_info->dscp2prio_map[app->protocol] == app->priority) + return 0; + + ret = dcb_ieee_setapp(netdev, app); if (ret) - goto cdev_add_fail; + return ret; - test_device = device_create(st_table->cls, NULL, st_mgt->devno, NULL, name); - if (IS_ERR(test_device)) { - ret = -EBUSY; - goto device_create_fail; + qos_info->trust_mode = NBL_TRUST_MODE_DSCP; + qos_info->dscp2prio_map[app->protocol] = app->priority; + ret = disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(common), qos_info->pfc, + qos_info->trust_mode, qos_info->dscp2prio_map); + + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "setapp configure dscp2prio failed ret %d\n", ret); + return ret; } - NBL_SERV_MGT_TO_ST_MGT(serv_mgt) = st_mgt; return 0; +} -device_create_fail: - cdev_del(&st_mgt->cdev); -cdev_add_fail: - devm_kfree(NBL_COMMON_TO_DEV(common), st_mgt); -malloc_fail: - nbl_serv_free_subdev_id(st_table, subdev_id); -alloc_subdev_id_fail: - return ret; +static void nbl_serv_dcbnl_getpfccfg(struct net_device *netdev, int prio, u8 *setting) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + + if (prio >= NBL_MAX_PFC_PRIORITIES) + return; + + *setting = qos_info->pfc[prio]; } -static void nbl_serv_remove_st(void *priv, void *st_table_param) +static int nbl_serv_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_software_tool_table *st_table = (struct nbl_software_tool_table *)st_table_param; - struct nbl_serv_st_mgt *st_mgt = NBL_SERV_MGT_TO_ST_MGT(serv_mgt); - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + *num = NBL_MAX_TC_NUM; - if (!st_mgt) + return 0; +} + +static void nbl_serv_dcbnl_setpfccfg(struct net_device *netdev, int prio, u8 set) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + u8 pfc[NBL_MAX_PFC_PRIORITIES] = {0}; + int ret; + + if (prio >= NBL_MAX_PFC_PRIORITIES) return; - device_destroy(st_table->cls, st_mgt->devno); - cdev_del(&st_mgt->cdev); + if (qos_info->pfc[prio] == set) + return; - nbl_serv_free_subdev_id(st_table, st_mgt->subdev_id); + memcpy(pfc, qos_info->pfc, NBL_MAX_PFC_PRIORITIES); + pfc[prio] = set; + ret = disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(common), pfc, + net_resource_mgt->qos_info.trust_mode, + net_resource_mgt->qos_info.dscp2prio_map); + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "configure pfc failed ret %d\n", ret); + return; + } - NBL_SERV_MGT_TO_ST_MGT(serv_mgt) = NULL; - devm_kfree(NBL_COMMON_TO_DEV(common), st_mgt); + memcpy(qos_info->pfc, pfc, NBL_MAX_PFC_PRIORITIES); } -static void nbl_serv_form_p4_name(struct nbl_common_info *common, int type, char *name, u16 len) +static u8 nbl_serv_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) { - char eth_num[NBL_P4_NAME_LEN] = {0}; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - switch (NBL_COMMON_TO_ETH_MODE(common)) { - case 1: - snprintf(eth_num, sizeof(eth_num), "single"); + *cap = true; + + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; break; - case 2: - snprintf(eth_num, sizeof(eth_num), "dual"); + case DCB_CAP_ATTR_PFC: + *cap = true; break; - case 4: - snprintf(eth_num, sizeof(eth_num), "quad"); + case DCB_CAP_ATTR_UP2TC: + *cap = false; break; - default: - nbl_err(common, NBL_DEBUG_CUSTOMIZED_P4, "Unknown P4 type %d", type); - return; - } - - switch (type) { - case NBL_P4_DEFAULT: - /* No need to load default p4 file */ + case DCB_CAP_ATTR_PG_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_GSP: + *cap = false; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = qos_info->dcbx_mode; break; default: - nbl_err(common, NBL_DEBUG_CUSTOMIZED_P4, "Unknown P4 type %d", type); + *cap = false; + break; } + return 0; } -static int nbl_serv_load_p4(struct nbl_service_mgt *serv_mgt, - const struct firmware *fw, char *verify_code) +static u8 nbl_serv_ieee_getdcbx(struct net_device *netdev) { - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - const struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data; - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - struct elf32_shdr *shdr; - struct nbl_load_p4_param param; - u8 *strtab, *name, *product_code = NULL; - int i, ret = 0; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - if (memcmp(elf_hdr->e_ident, NBL_P4_ELF_IDENT, NBL_P4_ELF_IDENT_LEN)) { - nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Invalid ELF file"); - return -EINVAL; - } + return qos_info->dcbx_mode; +} - memset(¶m, 0, sizeof(param)); +static u8 nbl_serv_ieee_setdcbx(struct net_device *netdev, u8 mode) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - shdr = (struct elf32_shdr *)((u8 *)elf_hdr + elf_hdr->e_shoff); - strtab = (u8 *)elf_hdr + shdr[elf_hdr->e_shstrndx].sh_offset; + qos_info->dcbx_mode = mode; - for (i = 0; i < elf_hdr->e_shnum; i++) - if (shdr[i].sh_type == SHT_NOTE) { - name = strtab + shdr[i].sh_name; - if (!strncmp(name, NBL_P4_PRODUCT_INFO_SECTION_NAME, - sizeof(NBL_P4_PRODUCT_INFO_SECTION_NAME))) - product_code = (u8 *)elf_hdr + shdr[i].sh_offset; - } + return 0; +} - if (!product_code) { - nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Product code not exist"); - return -EINVAL; - } +static u8 nbl_serv_dcnbl_setstate(struct net_device *netdev, u8 state) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - if (strncmp(product_code, verify_code, NBL_P4_VERIFY_CODE_LEN)) { - nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Invalid product code %32s", - product_code); - return -EINVAL; - } + if (qos_info->dcbx_state == state) + return NBL_DCB_NO_HW_CHG; - param.start = 1; - ret = disp_ops->load_p4(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); - if (ret) - return ret; + qos_info->dcbx_state = state; + return NBL_DCB_HW_CHG; +} - for (i = 0; i < elf_hdr->e_shnum; i++) - if (shdr[i].sh_type == SHT_PROGBITS && !(shdr[i].sh_flags & SHF_EXECINSTR)) { - if (shdr[i].sh_size > NBL_P4_SECTION_LEN_MAX) { - nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Section oversize %d", - shdr[i].sh_size); - return -EINVAL; - } +static u8 nbl_serv_dcnbl_getstate(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - memset(¶m, 0, sizeof(param)); - /* name is used for distinguish configuration, not used for now */ - strscpy(param.name, strtab + shdr[i].sh_name, sizeof(param.name)); - param.addr = shdr[i].sh_addr; - param.size = shdr[i].sh_size; - param.section_index = i; - param.section_offset = 0; - param.data = (u8 *)elf_hdr + shdr[i].sh_offset; + return qos_info->dcbx_state; +} - ret = disp_ops->load_p4(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); - if (ret) - return ret; - } +static u8 nbl_serv_dcnbl_getpfcstate(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + int i; - memset(¶m, 0, sizeof(param)); - param.end = 1; - ret = disp_ops->load_p4(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); - if (ret) - return ret; + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) + if (qos_info->pfc[i]) + return 1; return 0; } -static __maybe_unused void nbl_serv_load_default_p4(struct nbl_service_mgt *serv_mgt) +static void nbl_serv_get_board_info(void *priv, struct nbl_board_port_info *board_info) { + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - disp_ops->load_p4_default(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + disp_ops->get_board_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), board_info); } -static int nbl_serv_init_p4(void *priv) +static int nbl_serv_set_rate_limit(void *priv, enum nbl_traffic_type type, u32 rate) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - const struct firmware *fw; - char name[NBL_P4_NAME_LEN] = {0}; - char verify_code[NBL_P4_NAME_LEN] = {0}; - int type, ret = 0; - - type = disp_ops->get_p4_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), verify_code); - if (type < 0 || type > NBL_P4_TYPE_MAX) - return -ENOENT; - - if (type == NBL_P4_DEFAULT) - goto out; - - nbl_serv_form_p4_name(common, type, name, sizeof(name)); - ret = firmware_request_nowarn(&fw, name, NBL_SERV_MGT_TO_DEV(serv_mgt)); - if (ret) - goto out; - - ret = nbl_serv_load_p4(serv_mgt, fw, verify_code); - - release_firmware(fw); + int ret = 0; -out: - if (type == NBL_P4_DEFAULT || ret) { - nbl_info(common, NBL_DEBUG_CUSTOMIZED_P4, "Load P4 default"); - nbl_serv_load_default_p4(serv_mgt); - disp_ops->set_p4_used(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), NBL_P4_DEFAULT); - } else { - nbl_info(common, NBL_DEBUG_CUSTOMIZED_P4, "Load P4 %d", type); - disp_ops->set_p4_used(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), type); + ret = disp_ops->set_rate_limit(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), type, rate); + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "set_rate type %d failed ret %d\n", type, ret); + return ret; } - /* We always return OK, because at the very least we would use default P4 */ - return 0; + if (type == NBL_TRAFFIC_RDMA_TYPE) + qos_info->rdma_rate = rate; + else + qos_info->net_rate = rate; + + return ret; } -static int nbl_serv_set_spoof_check_addr(void *priv, u8 *mac) +static void nbl_serv_get_mirror_table_id(void *priv, u16 vsi_id, int dir, bool mirror_en, + u8 *mt_id) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - return disp_ops->set_spoof_check_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_VSI_ID(common), mac); + disp_ops->get_mirror_table_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, dir, mirror_en, mt_id); } -u16 nbl_serv_get_vf_base_vsi_id(void *priv, u16 func_id) +static int nbl_serv_configure_mirror(void *priv, u16 func_id, bool mirror_en, int dir, + u8 mt_id) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret; - return disp_ops->get_vf_base_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id); + nbl_event_notify(NBL_EVENT_MIRROR_SELECTPORT, &mirror_en, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + ret = disp_ops->configure_mirror(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, mirror_en, dir, mt_id); + return ret; } -static int nbl_serv_get_board_id(void *priv) +static int nbl_serv_configure_mirror_table(void *priv, bool mirror_en, + u16 func_id, u8 mt_id) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret; - return disp_ops->get_board_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + ret = disp_ops->check_vf_is_active(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id); + if (!ret) + return -EIO; + + ret = disp_ops->configure_mirror_table(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + mirror_en, func_id, mt_id); + nbl_serv_chan_notify_mirror_outputport_req(serv_mgt, func_id, mirror_en); + return ret; } -static int nbl_serv_process_abnormal_event(void *priv) +static int nbl_serv_clear_mirror_cfg(void *priv, u16 func_id) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_abnormal_event_info abnomal_info; - struct nbl_abnormal_details *detail; - u16 local_queue_id; - int type, i, ret = 0; + int ret; - memset(&abnomal_info, 0, sizeof(abnomal_info)); + ret = disp_ops->clear_mirror_cfg(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id); - ret = disp_ops->process_abnormal_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &abnomal_info); - if (!ret) - return ret; + return ret; +} - for (i = 0; i < NBL_ABNORMAL_EVENT_MAX; i++) { - detail = &abnomal_info.details[i]; +u16 nbl_serv_get_vf_function_id(void *priv, int vf_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - if (!detail->abnormal) - continue; + if (vf_id >= net_resource_mgt->total_vfs || !net_resource_mgt->vf_info) + return U16_MAX; - type = nbl_serv_abnormal_event_to_queue(i); - local_queue_id = disp_ops->get_local_queue_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - detail->vsi_id, detail->qid); - if (local_queue_id == U16_MAX) - return 0; + return disp_ops->get_vf_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), vf_id); +} - nbl_serv_restore_queue(serv_mgt, detail->vsi_id, local_queue_id, type, true); - } +static void nbl_serv_cfg_mirror_outputport_event(void *priv, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - return 0; + disp_ops->cfg_mirror_outputport_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), enable); } - static struct nbl_service_ops serv_ops = { .init_chip = nbl_serv_init_chip, .destroy_chip = nbl_serv_destroy_chip, @@ -2914,31 +7436,37 @@ static struct nbl_service_ops serv_ops = { .get_common_irq_num = nbl_serv_get_common_irq_num, .get_ctrl_irq_num = nbl_serv_get_ctrl_irq_num, .get_chip_temperature = nbl_serv_get_chip_temperature, - .get_chip_temperature_max = nbl_serv_get_chip_temperature_max, - .get_chip_temperature_crit = nbl_serv_get_chip_temperature_crit, .get_module_temperature = nbl_serv_get_module_temperature, .get_port_attributes = nbl_serv_get_port_attributes, - .update_ring_num = nbl_serv_update_ring_num, + .update_template_config = nbl_serv_update_template_config, .enable_port = nbl_serv_enable_port, + .init_port = nbl_serv_init_port, .set_sfp_state = nbl_serv_set_sfp_state, .register_net = nbl_serv_register_net, .unregister_net = nbl_serv_unregister_net, .setup_txrx_queues = nbl_serv_setup_txrx_queues, .remove_txrx_queues = nbl_serv_remove_txrx_queues, + .check_offload_status = nbl_serv_check_offload_status, + .init_tx_rate = nbl_serv_init_tx_rate, .setup_q2vsi = nbl_serv_setup_q2vsi, .remove_q2vsi = nbl_serv_remove_q2vsi, .setup_rss = nbl_serv_setup_rss, .remove_rss = nbl_serv_remove_rss, + .setup_rss_indir = nbl_serv_setup_rss_indir, .register_vsi_info = nbl_serv_register_vsi_info, .alloc_rings = nbl_serv_alloc_rings, + .cpu_affinity_init = nbl_serv_cpu_affinity_init, .free_rings = nbl_serv_free_rings, .enable_napis = nbl_serv_enable_napis, .disable_napis = nbl_serv_disable_napis, .set_mask_en = nbl_serv_set_mask_en, .start_net_flow = nbl_serv_start_net_flow, .stop_net_flow = nbl_serv_stop_net_flow, + .clear_flow = nbl_serv_clear_flow, + .set_promisc_mode = nbl_serv_set_promisc_mode, + .cfg_multi_mcast = nbl_serv_cfg_multi_mcast, .set_lldp_flow = nbl_serv_set_lldp_flow, .remove_lldp_flow = nbl_serv_remove_lldp_flow, .start_mgt_flow = nbl_serv_start_mgt_flow, @@ -2951,28 +7479,101 @@ static struct nbl_service_ops serv_ops = { .vsi_open = nbl_serv_vsi_open, .vsi_stop = nbl_serv_vsi_stop, .switch_traffic_default_dest = nbl_serv_switch_traffic_default_dest, - .get_user_queue_info = nbl_serv_get_user_queue_info, + .config_fd_flow_state = nbl_serv_config_fd_flow_state, /* For netdev ops */ .netdev_open = nbl_serv_netdev_open, .netdev_stop = nbl_serv_netdev_stop, .change_mtu = nbl_serv_change_mtu, + .change_rep_mtu = nbl_serv_change_rep_mtu, .set_mac = nbl_serv_set_mac, .rx_add_vid = nbl_serv_rx_add_vid, .rx_kill_vid = nbl_serv_rx_kill_vid, .get_stats64 = nbl_serv_get_stats64, .set_rx_mode = nbl_serv_set_rx_mode, .change_rx_flags = nbl_serv_change_rx_flags, + .set_features = nbl_serv_set_features, .features_check = nbl_serv_features_check, + .setup_tc = nbl_serv_setup_tc, .get_phys_port_name = nbl_serv_get_phys_port_name, .get_port_parent_id = nbl_serv_get_port_parent_id, .tx_timeout = nbl_serv_tx_timeout, + .bridge_setlink = nbl_serv_bridge_setlink, + .bridge_getlink = nbl_serv_bridge_getlink, + .set_vf_spoofchk = nbl_serv_set_vf_spoofchk, + .set_vf_link_state = nbl_serv_set_vf_link_state, + .set_vf_mac = nbl_serv_set_vf_mac, + .set_vf_rate = nbl_serv_set_vf_rate, + .set_vf_vlan = nbl_serv_set_vf_vlan, + .get_vf_config = nbl_serv_get_vf_config, + .get_vf_stats = nbl_serv_get_vf_stats, + .select_queue = nbl_serv_select_queue, + .set_vf_trust = nbl_serv_set_vf_trust, + + /* For rep associated */ + .rep_netdev_open = nbl_serv_rep_netdev_open, + .rep_netdev_stop = nbl_serv_rep_netdev_stop, + .rep_start_xmit = nbl_serv_rep_start_xmit, + .rep_get_stats64 = nbl_serv_rep_get_stats64, + .rep_set_rx_mode = nbl_serv_rep_set_rx_mode, + .rep_set_mac = nbl_serv_rep_set_mac, + .rep_rx_add_vid = nbl_serv_rep_rx_add_vid, + .rep_rx_kill_vid = nbl_serv_rep_rx_kill_vid, + .rep_setup_tc = nbl_serv_rep_setup_tc, + .rep_get_phys_port_name = nbl_serv_rep_get_phys_port_name, + .rep_get_port_parent_id = nbl_serv_rep_get_port_parent_id, + .get_rep_feature = nbl_serv_get_rep_feature, + .get_rep_queue_num = nbl_serv_get_rep_queue_num, + .get_rep_queue_info = nbl_serv_get_rep_queue_info, + .get_user_queue_info = nbl_serv_get_user_queue_info, + .alloc_rep_queue_mgt = nbl_serv_alloc_rep_queue_mgt, + .free_rep_queue_mgt = nbl_serv_free_rep_queue_mgt, + .set_eswitch_mode = nbl_serv_set_eswitch_mode, + .get_eswitch_mode = nbl_serv_get_eswitch_mode, + .alloc_rep_data = nbl_serv_alloc_rep_data, + .free_rep_data = nbl_serv_free_rep_data, + .set_rep_netdev_info = nbl_serv_set_rep_netdev_info, + .unset_rep_netdev_info = nbl_serv_unset_rep_netdev_info, + .disable_phy_flow = nbl_serv_disable_phy_flow, + .enable_phy_flow = nbl_serv_enable_phy_flow, + .init_acl = nbl_serv_init_acl, + .uninit_acl = nbl_serv_uninit_acl, + .set_upcall_rule = nbl_serv_set_upcall_rule, + .unset_upcall_rule = nbl_serv_unset_upcall_rule, + .switchdev_init_cmdq = nbl_serv_switchdev_init_cmdq, + .switchdev_deinit_cmdq = nbl_serv_switchdev_deinit_cmdq, + .set_tc_flow_info = nbl_serv_set_tc_flow_info, + .unset_tc_flow_info = nbl_serv_unset_tc_flow_info, + .get_tc_flow_info = nbl_serv_get_tc_flow_info, + .register_indr_dev_tc_offload = nbl_serv_register_indr_dev_tc_offload, + .unregister_indr_dev_tc_offload = nbl_serv_unregister_indr_dev_tc_offload, + .set_lag_info = nbl_serv_set_lag_info, + .unset_lag_info = nbl_serv_unset_lag_info, + .set_netdev_ops = nbl_serv_set_netdev_ops, .get_vsi_id = nbl_serv_get_vsi_id, .get_eth_id = nbl_serv_get_eth_id, .setup_net_resource_mgt = nbl_serv_setup_net_resource_mgt, .remove_net_resource_mgt = nbl_serv_remove_net_resource_mgt, + .init_hw_stats = nbl_serv_init_hw_stats, + .remove_hw_stats = nbl_serv_remove_hw_stats, + .get_rx_dropped = nbl_serv_get_rx_dropped, .enable_lag_protocol = nbl_serv_enable_lag_protocol, + .cfg_lag_hash_algorithm = nbl_serv_cfg_lag_hash_algorithm, + .cfg_lag_member_fwd = nbl_serv_cfg_lag_member_fwd, + .cfg_lag_member_list = nbl_serv_cfg_lag_member_list, + .cfg_lag_member_up_attr = nbl_serv_cfg_lag_member_up_attr, + .cfg_bond_shaping = nbl_serv_cfg_bond_shaping, + .cfg_bgid_back_pressure = nbl_serv_cfg_bgid_back_pressure, + .get_board_info = nbl_serv_get_board_info, + + .get_rdma_cap_num = nbl_serv_get_rdma_cap_num, + .setup_rdma_id = nbl_serv_setup_rdma_id, + .remove_rdma_id = nbl_serv_remove_rdma_id, + .register_rdma = nbl_serv_register_rdma, + .unregister_rdma = nbl_serv_unregister_rdma, + .register_rdma_bond = nbl_serv_register_rdma_bond, + .unregister_rdma_bond = nbl_serv_unregister_rdma_bond, .get_hw_addr = nbl_serv_get_hw_addr, .get_real_hw_addr = nbl_serv_get_real_hw_addr, .get_function_id = nbl_serv_get_function_id, @@ -2982,20 +7583,72 @@ static struct nbl_service_ops serv_ops = { .adapt_desc_gother = nbl_serv_adapt_desc_gother, .process_flr = nbl_serv_process_flr, .get_board_id = nbl_serv_get_board_id, + .covert_vfid_to_vsi_id = nbl_serv_covert_vfid_to_vsi_id, .recovery_abnormal = nbl_serv_recovery_abnormal, .keep_alive = nbl_serv_keep_alive, + .get_mirror_table_id = nbl_serv_get_mirror_table_id, + .configure_mirror = nbl_serv_configure_mirror, + .configure_mirror_table = nbl_serv_configure_mirror_table, + .clear_mirror_cfg = nbl_serv_clear_mirror_cfg, .get_devlink_info = nbl_serv_get_devlink_info, .update_devlink_flash = nbl_serv_update_devlink_flash, .get_adminq_tx_buf_size = nbl_serv_get_adminq_tx_buf_size, + .emp_console_write = nbl_serv_emp_console_write, .check_fw_heartbeat = nbl_serv_check_fw_heartbeat, .check_fw_reset = nbl_serv_check_fw_reset, .set_netdev_carrier_state = nbl_serv_set_netdev_carrier_state, + .cfg_eth_bond_event = nbl_serv_cfg_eth_bond_event, + .cfg_fd_update_event = nbl_serv_cfg_fd_update_event, + .configure_rdma_msix_off = nbl_serv_configure_rdma_msix_off, .setup_st = nbl_serv_setup_st, .remove_st = nbl_serv_remove_st, .get_vf_base_vsi_id = nbl_serv_get_vf_base_vsi_id, + + .setup_vf_config = nbl_serv_setup_vf_config, + .remove_vf_config = nbl_serv_remove_vf_config, + .register_dev_name = nbl_serv_register_dev_name, + .get_dev_name = nbl_serv_get_dev_name, + .setup_vf_resource = nbl_serv_setup_vf_resource, + .remove_vf_resource = nbl_serv_remove_vf_resource, + + .get_xdp_queue_info = nbl_serv_get_xdp_queue_info, + .set_xdp = nbl_serv_set_xdp, + .set_hw_status = nbl_serv_set_hw_status, + .get_active_func_bitmaps = nbl_serv_get_active_func_bitmaps, + .get_net_rate = nbl_serv_get_net_rate, + .get_rdma_rate = nbl_serv_get_rdma_rate, + .get_rdma_bw = nbl_serv_get_rdma_bw, + .configure_rdma_bw = nbl_serv_configure_rdma_bw, + .configure_pfc = nbl_serv_configure_pfc, + .configure_trust = nbl_serv_configure_trust, + .configure_dscp2prio = nbl_serv_configure_dscp2prio, + .trust_mode_show = nbl_serv_trust_mode_show, + .dscp2prio_show = nbl_serv_dscp2prio_show, + .pfc_show = nbl_serv_pfc_show, + .pfc_buffer_size_show = nbl_serv_pfc_buffer_size_show, + .set_pfc_buffer_size = nbl_serv_set_pfc_buffer_size, + .set_rate_limit = nbl_serv_set_rate_limit, + + .ieee_setets = nbl_serv_ieee_setets, + .ieee_getets = nbl_serv_ieee_getets, + .ieee_setpfc = nbl_serv_ieee_setpfc, + .ieee_getpfc = nbl_serv_ieee_getpfc, + .ieee_setapp = nbl_serv_ieee_setapp, + .ieee_delapp = nbl_serv_ieee_delapp, + .dcbnl_setpfccfg = nbl_serv_dcbnl_setpfccfg, + .dcbnl_getpfccfg = nbl_serv_dcbnl_getpfccfg, + .dcbnl_getnumtcs = nbl_serv_dcbnl_getnumtcs, + .ieee_getdcbx = nbl_serv_ieee_getdcbx, + .ieee_setdcbx = nbl_serv_ieee_setdcbx, + .dcbnl_getcap = nbl_serv_dcbnl_getcap, + .dcbnl_getstate = nbl_serv_dcnbl_getstate, + .dcbnl_setstate = nbl_serv_dcnbl_setstate, + .dcbnl_getpfcstate = nbl_serv_dcnbl_getpfcstate, + .get_vf_function_id = nbl_serv_get_vf_function_id, + .cfg_mirror_outputport_event = nbl_serv_cfg_mirror_outputport_event, }; /* Structure starts here, adding an op should not modify anything below */ @@ -3012,17 +7665,17 @@ static int nbl_serv_setup_serv_mgt(struct nbl_common_info *common, NBL_SERV_MGT_TO_COMMON(*serv_mgt) = common; nbl_serv_setup_flow_mgt(NBL_SERV_MGT_TO_FLOW_MGT(*serv_mgt)); - set_bit(NBL_FLAG_MINI_DRIVER, (*serv_mgt)->flags); - return 0; } static void nbl_serv_remove_serv_mgt(struct nbl_common_info *common, struct nbl_service_mgt **serv_mgt) { - struct device *dev; + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(*serv_mgt); - dev = NBL_COMMON_TO_DEV(common); + if (ring_mgt->rss_indir_user) + devm_kfree(dev, ring_mgt->rss_indir_user); devm_kfree(dev, *serv_mgt); *serv_mgt = NULL; } @@ -3042,6 +7695,8 @@ static int nbl_serv_setup_ops(struct device *dev, struct nbl_service_ops_tbl **s NBL_SERV_OPS_TBL_TO_OPS(*serv_ops_tbl) = &serv_ops; nbl_serv_setup_ethtool_ops(&serv_ops); + nbl_serv_setup_ktls_ops(&serv_ops); + nbl_serv_setup_xfrm_ops(&serv_ops); NBL_SERV_OPS_TBL_TO_PRIV(*serv_ops_tbl) = serv_mgt; return 0; diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.h index a8f1a6458705a11f2857d59e405404ba41bd76c4..5a9f95b7243139092b1797ba6064bb27055c1697 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.h @@ -17,6 +17,7 @@ #define NBL_SERV_MGT_TO_REP_QUEUE_MGT(serv_mgt) ((serv_mgt)->rep_queue_mgt) #define NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt) (&(serv_mgt)->flow_mgt) #define NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt) ((serv_mgt)->net_resource_mgt) +#define NBL_SERV_MGT_TO_TC_MGT(serv_mgt) (&(serv_mgt)->tc_mgt) #define NBL_SERV_MGT_TO_ST_MGT(serv_mgt) ((serv_mgt)->st_mgt) #define NBL_SERV_MGT_TO_DISP_OPS_TBL(serv_mgt) ((serv_mgt)->disp_ops_tbl) @@ -28,6 +29,18 @@ #define NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt) (NBL_SERV_MGT_TO_CHAN_OPS_TBL(serv_mgt)->priv) #define NBL_DEFAULT_VLAN_ID 0 +#define NBL_HW_STATS_PERIOD_SECONDS 5 +#define NBL_HW_STATS_RX_RATE_THRESHOLD (1000) /* 1k pps */ + +#define NBL_REP_QUEUE_MGT_DESC_MAX (32768) +#define NBL_REP_QUEUE_MGT_DESC_NUM (2048) +#define NBL_REP_PER_VSI_QUEUE_NUM (1) +#define NBL_DEFAULT_REP_TX_RETRY_NUM 2 +#define NBL_DEFAULT_REP_TX_MAX_NUM 8192 + +#define NBL_MAX_QUEUE_TC_NUM (8) +#define NBL_TC_WEIGHT_GRAVITY (10) +#define NBL_TC_MBPS_DIVSIOR (125000) #define NBL_TX_TSO_MSS_MIN (256) #define NBL_TX_TSO_MSS_MAX (16383) @@ -35,9 +48,6 @@ #define NBL_TX_TSO_L2L3L4_HDR_LEN_MAX (128) #define NBL_TX_CHECKSUM_OFFLOAD_L2L3L4_HDR_LEN_MAX (255) -#define NBL_FLAG_AQ_MODIFY_MAC_FILTER BIT(0) -#define NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT(1) - #define NBL_EEPROM_LENGTH (0) /* input set */ @@ -56,7 +66,14 @@ #define SET_DPORT_TYPE_ETH_LAG (2) #define SET_DPORT_TYPE_SP_PORT (3) -#define NBL_VLAN_SHIFT 8 +#define NBL_MAX_BURST 524287 + +#define NBL_VLAN_PCP_SHIFT 13 + +/* primary vlan in vlan list */ +#define NBL_NO_TRUST_MAX_VLAN 9 +/* primary mac not in submac list */ +#define NBL_NO_TRUST_MAX_MAC 12 #define NBL_DEVLINK_INFO_FRIMWARE_VERSION_LEN 32 #define NBL_DEVLINK_FLASH_COMPONENT_CRC_SIZE 4 @@ -64,9 +81,9 @@ /* For customized P4 */ #define NBL_P4_ELF_IDENT "\x7F\x45\x4C\x46\x01\x01\x01\x00" #define NBL_P4_ELF_IDENT_LEN 8 -#define NBL_P4_SECTION_LEN_MAX 2048 #define NBL_P4_VERIFY_CODE_LEN 9 #define NBL_P4_PRODUCT_INFO_SECTION_NAME "product_info" +#define NBL_MD5SUM_LEN 16 enum { NBL_MGT_SERV_MGT, @@ -78,6 +95,11 @@ enum { NBL_NET_SERV_RDMA, }; +enum { + NBL_TC_INVALID, + NBL_TC_RUNNING, +}; + struct nbl_serv_ring { dma_addr_t dma; u16 index; @@ -89,16 +111,17 @@ struct nbl_serv_ring { struct nbl_serv_vector { char name[32]; + cpumask_t cpumask; struct net_device *netdev; + struct nbl_napi_struct *nbl_napi; + struct nbl_serv_ring *tx_ring; + struct nbl_serv_ring *rx_ring; + u8 __iomem *irq_enable_base; u32 irq_data; - u8 *irq_enable_base; u16 local_vector_id; u16 global_vector_id; u16 intr_rate_usecs; u16 intr_suppress_level; - struct napi_struct *napi; - struct nbl_serv_ring *tx_ring; - struct nbl_serv_ring *rx_ring; }; struct nbl_serv_ring_vsi_info { @@ -115,11 +138,14 @@ struct nbl_serv_ring_mgt { struct nbl_serv_ring *tx_rings; struct nbl_serv_ring *rx_rings; struct nbl_serv_vector *vectors; + void *xdp_prog; struct nbl_serv_ring_vsi_info vsi_info[NBL_VSI_MAX]; + u32 *rss_indir_user; u16 tx_desc_num; u16 rx_desc_num; u16 tx_ring_num; u16 rx_ring_num; + u16 xdp_ring_offset; u16 active_ring_num; bool net_msix_mask_en; }; @@ -127,18 +153,51 @@ struct nbl_serv_ring_mgt { struct nbl_serv_vlan_node { struct list_head node; u16 vid; + // primary_mac_effective means base mac + vlan ok + u16 primary_mac_effective; + // sub_mac_effective means sub mac + vlan ok + u16 sub_mac_effective; + u16 ref_cnt; }; struct nbl_serv_submac_node { struct list_head node; u8 mac[ETH_ALEN]; + // effective means this submac + allvlan flowrule effective + u16 effective; +}; + +enum { + NBL_PROMISC = 0, + NBL_ALLMULTI = 1, + NBL_USER_FLOW = 2, + NBL_MIRROR = 3, +}; + +enum { + NBL_SUBMAC_UNICAST = 0, + NBL_SUBMAC_MULTI = 1, + NBL_SUBMAC_MAX = 2 }; struct nbl_serv_flow_mgt { + struct list_head vlan_list; + struct list_head submac_list[NBL_SUBMAC_MAX]; + u16 vid; u8 mac[ETH_ALEN]; u8 eth; - struct list_head vlan_list; - struct list_head submac_list; + bool trusted_en; + bool trusted_update; + u16 vlan_list_cnt; + u16 active_submac_list; + u16 submac_list_cnt; + u16 unicast_mac_cnt; + u16 multi_mac_cnt; + u16 promisc; + bool force_promisc; + bool unicast_flow_enable; + bool multicast_flow_enable; + bool pending_async_work; }; struct nbl_mac_filter { @@ -146,46 +205,150 @@ struct nbl_mac_filter { u8 macaddr[ETH_ALEN]; }; +struct nbl_serv_tc_mgt { + int state; + u16 orig_num_active_queues; + u16 num_tc; + u16 total_qps; +}; + enum nbl_adapter_flags { /* p4 flags must be at the start */ NBL_FLAG_P4_DEFAULT, NBL_FLAG_LINK_DOWN_ON_CLOSE, - NBL_FLAG_MINI_DRIVER, + NBL_FLAG_NRZ_RS_FEC_544_SUPPORT, + NBL_FLAG_HIGH_THROUGHPUT, NBL_ADAPTER_FLAGS_MAX }; +struct nbl_serv_lag_info { + struct net_device *bond_netdev; + u16 lag_num; + u8 lag_id; +}; + +struct nbl_serv_netdev_ops { + struct net_device_ops *pf_netdev_ops; + struct net_device_ops *rep_netdev_ops; +}; + +struct nbl_serv_rep_drop { + struct u64_stats_sync rep_drop_syncp; + u64 tx_dropped; +}; + +struct nbl_sysfs_vf_config_attr { + struct kobj_attribute mac_attr; + struct kobj_attribute rate_attr; + struct kobj_attribute spoofchk_attr; + struct kobj_attribute state_attr; + void *priv; + int vf_id; +}; + +struct nbl_serv_vf_info { + struct kobject kobj; + struct kobject meters_kobj; + struct kobject rx_kobj; + struct kobject tx_kobj; + struct kobject rx_bps_kobj; + struct kobject tx_bps_kobj; + void *priv; + u16 vf_id; + + int state; + int spoof_check; + int max_tx_rate; + int meter_tx_rate; + int meter_rx_rate; + int meter_tx_burst; + int meter_rx_burst; + u8 mac[ETH_ALEN]; + u16 vlan; + u16 vlan_proto; + u8 vlan_qos; + bool trusted; +}; + +#define NBL_DCB_NO_HW_CHG 1 +#define NBL_DCB_HW_CHG 2 +struct nbl_serv_qos_info { + u8 dcbx_mode; + u8 dcbx_state; + u8 trust_mode; /* Trust Mode value 0:802.1p 1: dscp */ + u8 pfc[NBL_MAX_PFC_PRIORITIES]; + u8 dscp2prio_map[NBL_DSCP_MAX]; /* DSCP -> Priority map */ + int rdma_bw; + u32 rdma_rate; + u32 net_rate; + DECLARE_BITMAP(dscp_mapped, NBL_DSCP_MAX); + struct dcb_app app[NBL_DSCP_MAX]; + int buffer_sizes[NBL_MAX_PFC_PRIORITIES][2]; + struct ieee_ets ets; +}; + struct nbl_serv_net_resource_mgt { struct nbl_service_mgt *serv_mgt; struct net_device *netdev; struct work_struct net_stats_update; struct work_struct rx_mode_async; struct work_struct tx_timeout; + struct work_struct update_link_state; + struct work_struct update_vlan; + struct work_struct update_mirror_outputport; struct delayed_work watchdog_task; struct timer_list serv_timer; unsigned long serv_timer_period; - /* spinlock_t for rx mode submac */ - spinlock_t mac_vlan_list_lock; - /* spinlock_t for rx mode promisc */ - spinlock_t current_netdev_promisc_flags_lock; - struct list_head mac_filter_list; + struct list_head tmp_add_filter_list; + struct list_head tmp_del_filter_list; struct list_head indr_dev_priv_list; - u32 rxmode_set_required; + struct nbl_serv_lag_info *lag_info; + struct nbl_serv_netdev_ops netdev_ops; u16 curr_promiscuout_mode; u16 num_net_msix; + bool update_submac; + int num_vfs; + int total_vfs; /* stats for netdev */ u64 get_stats_jiffies; struct nbl_stats stats; + struct nbl_hw_stats hw_stats; + unsigned long hw_stats_jiffies; + unsigned long hw_stats_period; struct nbl_priv_stats priv_stats; - struct nbl_phy_state phy_state; struct nbl_phy_caps phy_caps; + struct nbl_serv_rep_drop *rep_drop; + struct nbl_serv_vf_info *vf_info; + struct kobject *sriov_kobj; u32 configured_speed; u32 configured_fec; + u16 bridge_mode; + int link_forced; + + u16 vlan_tci; + u16 vlan_proto; + int max_tx_rate; + u32 dump_flag; + u32 dump_perf_len; + struct nbl_serv_qos_info qos_info; +}; + +struct nbl_serv_rep_queue_mgt { + struct ptr_ring ring; + struct net_device *netdev; + + /* spinlock_t for queue mgt */ + spinlock_t seq_lock; + int size; }; #define IOCTL_TYPE 'n' #define IOCTL_PASSTHROUGH _IOWR(IOCTL_TYPE, 0x01, struct nbl_passthrough_fw_cmd_param) +#define IOCTL_ST_INFO _IOR(IOCTL_TYPE, 0x02, struct nbl_st_info_param) + +#define IOCTL_ST_INFO_VERSION 0x10 /* 1.0 */ #define NBL_RESTOOL_NAME_LEN 32 struct nbl_serv_st_mgt { @@ -202,8 +365,10 @@ struct nbl_service_mgt { struct nbl_dispatch_ops_tbl *disp_ops_tbl; struct nbl_channel_ops_tbl *chan_ops_tbl; struct nbl_serv_ring_mgt ring_mgt; + struct nbl_serv_rep_queue_mgt *rep_queue_mgt; struct nbl_serv_flow_mgt flow_mgt; struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_serv_tc_mgt tc_mgt; struct nbl_serv_st_mgt *st_mgt; DECLARE_BITMAP(flags, NBL_ADAPTER_FLAGS_MAX); }; @@ -221,10 +386,33 @@ struct nbl_serv_pldm_pci_record_id { u16 subsystem_device; }; +struct nbl_tc_flow_parse_pattern { + u32 pattern_type; + int (*parse_func)(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common); +}; + +struct nbl_tc_flow_action_driver_ops { + int (*act_update)(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filterr, + struct nbl_tc_flow_param *param); +}; + +struct nbl_serv_notify_vlan_param { + u16 vlan_tci; + u16 vlan_proto; +}; int nbl_serv_netdev_open(struct net_device *netdev); int nbl_serv_netdev_stop(struct net_device *netdev); int nbl_serv_vsi_open(void *priv, struct net_device *netdev, u16 vsi_index, u16 real_qps, bool use_napi); int nbl_serv_vsi_stop(void *priv, u16 vsi_index); +void nbl_serv_get_rep_drop_stats(struct nbl_service_mgt *serv_mgt, u16 rep_vsi_id, + struct nbl_rep_stats *rep_stats); +void nbl_serv_cpu_affinity_init(void *priv, u16 rings_num); +u16 nbl_serv_get_vf_function_id(void *priv, int vf_id); #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..f1b147b5fa82c2043d86325dda7b8bbfcc206e3a --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.c @@ -0,0 +1,632 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_dev.h" + +#define NBL_SET_RO_ATTR(dev_name_attr, attr_name, attr_show) do { \ + typeof(dev_name_attr) _name_attr = (dev_name_attr); \ + (_name_attr)->attr.name = __stringify(attr_name); \ + (_name_attr)->attr.mode = SYSFS_PREALLOC | VERIFY_OCTAL_PERMISSIONS(0444); \ + (_name_attr)->show = attr_show; \ + (_name_attr)->store = NULL; \ +} while (0) + +static ssize_t net_rep_show(struct device *dev, + struct nbl_netdev_name_attr *attr, char *buf) +{ + return scnprintf(buf, IFNAMSIZ, "%s\n", attr->net_dev_name); +} + +const char *const nbl_sysfs_qos_name[] = { + /* rdma */ + "save", + "tc2pri", + "sq_pri_map", + "raq_pri_map", + "pri_imap", + "pfc_imap", + "db_to_csch_en", + "sw_db_csch_th", + "csch_qlen_th", + "poll_wgt", + "sp_wrr", + "tc_wgt", + + "pfc", + "pfc_buffer", + "trust", + "dscp2prio", + "rdma_bw", + "rdma_rate", + "net_rate", +}; + +const char *const nbl_sysfs_mirror_name[] = { + "configure_down_mirror", + "configure_up_mirror", +}; + +static ssize_t rdma_rate_show(struct nbl_sysfs_qos_info *qos_info, char *buf) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + u32 rdma_rate = 0; + + serv_ops->get_rdma_rate(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &rdma_rate); + + return sprintf(buf, "%u\n", rdma_rate); +} + +static ssize_t rdma_rate_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + unsigned long rate; + int ret; + + ret = kstrtoul(buf, 10, &rate); + if (ret) + return -EINVAL; + + ret = serv_ops->set_rate_limit(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TRAFFIC_RDMA_TYPE, rate); + if (ret) { + netdev_err(net_dev->netdev, "configure_rdma_rate_limit: %s failed\n", buf); + return -EIO; + } + + return count; +} + +static ssize_t net_rate_show(struct nbl_sysfs_qos_info *qos_info, char *buf) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + u32 net_rate = 0; + + serv_ops->get_net_rate(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &net_rate); + + return sprintf(buf, "%u\n", net_rate); +} + +static ssize_t net_rate_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + unsigned long rate; + int ret; + + ret = kstrtoul(buf, 10, &rate); + if (ret) + return -EINVAL; + + ret = serv_ops->set_rate_limit(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TRAFFIC_NET_TYPE, rate); + if (ret) { + netdev_err(net_dev->netdev, "configure_net_rate_limit: %s failed\n", buf); + return -EIO; + } + + return count; +} + +static ssize_t rdma_bw_show(struct nbl_sysfs_qos_info *qos_info, char *buf) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + int rdma_bw = 0; + ssize_t ret; + + serv_ops->get_rdma_bw(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &rdma_bw); + + ret = snprintf(buf, PAGE_SIZE, "rdma:%d, normal:%d\n", + rdma_bw, NBL_MAX_BW - rdma_bw); + return ret; +} + +static ssize_t rdma_bw_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int rdma = 0, normal = 0; + int ret; + + if (sscanf(buf, "rdma:%d,normal:%d", &rdma, &normal) != 2) { + pr_err("Invalid format, expected: rdma:,normal:\n"); + return -EINVAL; + } + + if (rdma + normal != NBL_MAX_BW) { + pr_err("Invalid value: sum must be 100\n"); + return -EINVAL; + } + + ret = serv_ops->configure_rdma_bw(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), rdma); + if (ret) { + netdev_err(net_dev->netdev, "configure_rdma_bw: %s failed\n", buf); + return -EIO; + } + + return count; +} + +static ssize_t dscp2prio_show(struct nbl_sysfs_qos_info *qos_info, char *buf) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + return serv_ops->dscp2prio_show(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), buf); +} + +static ssize_t dscp2prio_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + return serv_ops->configure_dscp2prio(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), + buf, count); +} + +static ssize_t trust_mode_show(struct nbl_sysfs_qos_info *qos_info, char *buf) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + return serv_ops->trust_mode_show(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), buf); +} + +static ssize_t trust_mode_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + u8 trust_mode; + int ret; + + if (strncmp(buf, "dscp", 4) == 0) { + trust_mode = NBL_TRUST_MODE_DSCP; + } else if (strncmp(buf, "802.1p", 6) == 0) { + trust_mode = NBL_TRUST_MODE_8021P; + } else { + netdev_err(net_dev->netdev, "Invalid trust mode: %s\n", buf); + return -EINVAL; + } + + ret = serv_ops->configure_trust(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), trust_mode); + if (ret) { + netdev_err(net_dev->netdev, "configure_qos trust mode: %s failed\n", buf); + return -EIO; + } + + netdev_info(net_dev->netdev, "Trust mode set to %s\n", buf); + return count; +} + +static ssize_t pfc_buffer_size_show(struct nbl_sysfs_qos_info *qos_info, char *buf) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + return serv_ops->pfc_buffer_size_show(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), buf); +} + +static ssize_t pfc_buffer_size_store(struct nbl_sysfs_qos_info *qos_info, + const char *buf, size_t count) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int prio, xoff, xon; + int ret; + + if (sscanf(buf, "%d,%d,%d", &prio, &xoff, &xon) != 3) + return -EINVAL; + + if (prio < 0 || prio >= NBL_MAX_PFC_PRIORITIES) + return -EINVAL; + + ret = serv_ops->set_pfc_buffer_size(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), prio, xoff, xon); + if (ret) { + netdev_err(net_dev->netdev, "set_pfc_buffer_size failed\n"); + return ret; + } + + return count; +} + +static ssize_t pfc_show(struct nbl_sysfs_qos_info *qos_info, char *buf) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + return serv_ops->pfc_show(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), buf); +} + +static ssize_t pfc_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + u8 pfc_config[NBL_MAX_PFC_PRIORITIES]; + int ret, i; + ssize_t len = count; + + while (len > 0 && (buf[len - 1] == '\n' || buf[len - 1] == ' ')) + len--; + + if (len == 0) { + netdev_err(net_dev->netdev, "Invalid input: no data to parse.\n"); + return count; + } + + if (len != 15) { + netdev_err(net_dev->netdev, "Invalid input length %ld.\n", len); + return -EINVAL; + } + + ret = sscanf(buf, "%hhd,%hhd,%hhd,%hhd,%hhd,%hhd,%hhd,%hhd", + &pfc_config[0], &pfc_config[1], &pfc_config[2], &pfc_config[3], + &pfc_config[4], &pfc_config[5], &pfc_config[6], &pfc_config[7]); + + if (ret != NBL_MAX_PFC_PRIORITIES) { + netdev_err(net_dev->netdev, "Failed to parse PFC. Expected 8 got %d\n", ret); + return -EINVAL; + } + + netdev_info(net_dev->netdev, "Parsed PFC configuration: %u %u %u %u %u %u %u %u\n", + pfc_config[0], pfc_config[1], pfc_config[2], pfc_config[3], + pfc_config[4], pfc_config[5], pfc_config[6], pfc_config[7]); + + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) + if (pfc_config[i] > 1) + return -EINVAL; + + ret = serv_ops->configure_pfc(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), pfc_config); + if (ret) { + netdev_err(net_dev->netdev, "configure_qos trust mode: %s failed\n", buf); + return -EIO; + } + + return count; +} + +static ssize_t nbl_qos_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct nbl_sysfs_qos_info *qos_info = + container_of(attr, struct nbl_sysfs_qos_info, kobj_attr); + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + + switch (qos_info->offset) { + case NBL_QOS_PFC: + return pfc_show(qos_info, buf); + case NBL_QOS_TRUST: + return trust_mode_show(qos_info, buf); + case NBL_QOS_DSCP2PRIO: + return dscp2prio_show(qos_info, buf); + case NBL_QOS_PFC_BUFFER: + return pfc_buffer_size_show(qos_info, buf); + case NBL_QOS_RDMA_BW: + return rdma_bw_show(qos_info, buf); + case NBL_QOS_RDMA_RATE: + return rdma_rate_show(qos_info, buf); + case NBL_QOS_NET_RATE: + return net_rate_show(qos_info, buf); + case NBL_QOS_RDMA_SAVE: + case NBL_QOS_RDMA_TC2PRI: + case NBL_QOS_RDMA_SQ_PRI_MAP: + case NBL_QOS_RDMA_RAQ_PRI_MAP: + case NBL_QOS_RDMA_PRI_IMAP: + case NBL_QOS_RDMA_PFC_IMAP: + case NBL_QOS_RDMA_DB_TO_CSCH_EN: + case NBL_QOS_RDMA_SW_DB_CSCH_TH: + case NBL_QOS_RDMA_CSCH_QLEN_TH: + case NBL_QOS_RDMA_POLL_WGT: + case NBL_QOS_RDMA_SP_WRR: + case NBL_QOS_RDMA_TC_WGT: + return nbl_dev_rdma_qos_cfg_show(dev_mgt, qos_info->offset, buf); + default: + return -EINVAL; + } +} + +static ssize_t nbl_qos_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_sysfs_qos_info *qos_info = + container_of(attr, struct nbl_sysfs_qos_info, kobj_attr); + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + + switch (qos_info->offset) { + case NBL_QOS_PFC: + return pfc_store(qos_info, buf, count); + case NBL_QOS_TRUST: + return trust_mode_store(qos_info, buf, count); + case NBL_QOS_DSCP2PRIO: + return dscp2prio_store(qos_info, buf, count); + case NBL_QOS_PFC_BUFFER: + return pfc_buffer_size_store(qos_info, buf, count); + case NBL_QOS_RDMA_BW: + return rdma_bw_store(qos_info, buf, count); + case NBL_QOS_RDMA_RATE: + return rdma_rate_store(qos_info, buf, count); + case NBL_QOS_NET_RATE: + return net_rate_store(qos_info, buf, count); + case NBL_QOS_RDMA_SAVE: + case NBL_QOS_RDMA_TC2PRI: + case NBL_QOS_RDMA_SQ_PRI_MAP: + case NBL_QOS_RDMA_RAQ_PRI_MAP: + case NBL_QOS_RDMA_PRI_IMAP: + case NBL_QOS_RDMA_PFC_IMAP: + case NBL_QOS_RDMA_DB_TO_CSCH_EN: + case NBL_QOS_RDMA_SW_DB_CSCH_TH: + case NBL_QOS_RDMA_CSCH_QLEN_TH: + case NBL_QOS_RDMA_POLL_WGT: + case NBL_QOS_RDMA_SP_WRR: + case NBL_QOS_RDMA_TC_WGT: + return nbl_dev_rdma_qos_cfg_store(dev_mgt, qos_info->offset, buf, count); + default: + return -EINVAL; + } +} + +static ssize_t nbl_mirror_select_port_show(struct nbl_sysfs_mirror_info *mirror_info, + char *buf) +{ + ssize_t ret; + + ret = snprintf(buf, PAGE_SIZE, "mirror_en: %d, mirror_port: vf%d\n", + mirror_info->mirror_en, mirror_info->vf_id); + return ret; +} + +static ssize_t nbl_mirror_select_port_store(struct nbl_sysfs_mirror_info *mirror_info, + const char *buf, size_t count, int dir) +{ + struct nbl_dev_net *net_dev = mirror_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int vf_id; + int mirror_en; + int ret; + u16 function_id = U16_MAX; + u8 mt_id; + + if (sscanf(buf, "mirror_en: %d, mirror_port: vf%d", &mirror_en, &vf_id) != 2) + return -EINVAL; + + function_id = serv_ops->get_vf_function_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vf_id); + if (function_id == U16_MAX) { + netdev_info(net_dev->netdev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + + serv_ops->get_mirror_table_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common), dir, !!mirror_en, &mt_id); + + if (mt_id == 8) { + netdev_err(net_dev->netdev, "The mirror table configuration is full!"); + return -EINVAL; + } + + ret = serv_ops->configure_mirror(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->mgt_pf, + !!mirror_en, dir, mt_id); + if (ret) { + netdev_err(net_dev->netdev, "configure mirror failed\n"); + return -EIO; + } + + ret = serv_ops->configure_mirror_table(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + !!mirror_en, function_id, mt_id); + if (ret) { + netdev_err(net_dev->netdev, "configure mirror table failed\n"); + return -EIO; + } + + mirror_info->mirror_en = mirror_en; + mirror_info->vf_id = vf_id; + return ret ? ret : count; +} + +static ssize_t nbl_mirror_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct nbl_sysfs_mirror_info *mirror_info = + container_of(attr, struct nbl_sysfs_mirror_info, kobj_attr); + + switch (mirror_info->offset) { + case NBL_MIRROR_SELECT_SRC_PORT: + case NBL_MIRROR_SELECT_DST_PORT: + return nbl_mirror_select_port_show(mirror_info, buf); + default: + return -EINVAL; + } +} + +static ssize_t nbl_mirror_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_sysfs_mirror_info *mirror_info = + container_of(attr, struct nbl_sysfs_mirror_info, kobj_attr); + + switch (mirror_info->offset) { + case NBL_MIRROR_SELECT_SRC_PORT: + case NBL_MIRROR_SELECT_DST_PORT: + return nbl_mirror_select_port_store(mirror_info, buf, count, + mirror_info->offset); + default: + return -EINVAL; + } +} + +int nbl_netdev_add_sysfs(struct net_device *netdev, struct nbl_dev_net *net_dev) +{ + int ret; + int i; + + net_dev->qos_config.qos_kobj = kobject_create_and_add("qos", &netdev->dev.kobj); + if (!net_dev->qos_config.qos_kobj) + return -ENOMEM; + + for (i = 0; i < NBL_QOS_TYPE_MAX; i++) { + net_dev->qos_config.qos_info[i].net_dev = net_dev; + net_dev->qos_config.qos_info[i].offset = i; + /* create qos sysfs */ + sysfs_attr_init(&net_dev->qos_config.qos_info[i].kobj_attr.attr); + net_dev->qos_config.qos_info[i].kobj_attr.attr.name = nbl_sysfs_qos_name[i]; + net_dev->qos_config.qos_info[i].kobj_attr.attr.mode = 0644; + net_dev->qos_config.qos_info[i].kobj_attr.show = nbl_qos_show; + net_dev->qos_config.qos_info[i].kobj_attr.store = nbl_qos_store; + ret = sysfs_create_file(net_dev->qos_config.qos_kobj, + &net_dev->qos_config.qos_info[i].kobj_attr.attr); + if (ret) + netdev_err(netdev, "Failed to create %s sysfs file\n", + nbl_sysfs_qos_name[i]); + } + + return 0; +} + +int nbl_netdev_add_mirror_sysfs(struct net_device *netdev, struct nbl_dev_net *net_dev) +{ + int ret; + int i; + + net_dev->mirror_config.mirror_kobj = kobject_create_and_add("mirror", &netdev->dev.kobj); + if (!net_dev->mirror_config.mirror_kobj) + return -ENOMEM; + + for (i = 0; i < NBL_MIRROR_TYPE_MAX; i++) { + net_dev->mirror_config.mirror_info[i].net_dev = net_dev; + net_dev->mirror_config.mirror_info[i].offset = i; + + sysfs_attr_init(&net_dev->mirror_config.mirror_info[i].kobj_attr.attr); + net_dev->mirror_config.mirror_info[i].kobj_attr.attr.name = + nbl_sysfs_mirror_name[i]; + net_dev->mirror_config.mirror_info[i].kobj_attr.attr.mode = 0644; + net_dev->mirror_config.mirror_info[i].kobj_attr.show = nbl_mirror_show; + net_dev->mirror_config.mirror_info[i].kobj_attr.store = nbl_mirror_store; + + ret = sysfs_create_file(net_dev->mirror_config.mirror_kobj, + &net_dev->mirror_config.mirror_info[i].kobj_attr.attr); + + if (ret) + netdev_err(netdev, "Failed to create %s sysfs file\n", + nbl_sysfs_mirror_name[i]); + } + return 0; +} + +void nbl_netdev_remove_sysfs(struct nbl_dev_net *net_dev) +{ + int i; + + if (!net_dev->qos_config.qos_kobj) + return; + + for (i = 0; i < NBL_QOS_TYPE_MAX; i++) + sysfs_remove_file(net_dev->qos_config.qos_kobj, + &net_dev->qos_config.qos_info[i].kobj_attr.attr); + + kobject_put(net_dev->qos_config.qos_kobj); +} + +void nbl_netdev_remove_mirror_sysfs(struct nbl_dev_net *net_dev) +{ + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + int i; + + serv_ops->clear_mirror_cfg(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->mgt_pf); + + if (!net_dev->mirror_config.mirror_kobj) + return; + + for (i = 0; i < NBL_MIRROR_TYPE_MAX; i++) + sysfs_remove_file(net_dev->mirror_config.mirror_kobj, + &net_dev->mirror_config.mirror_info[i].kobj_attr.attr); + + kobject_put(net_dev->mirror_config.mirror_kobj); +} + +void nbl_net_add_name_attr(struct nbl_netdev_name_attr *attr, char *rep_name) +{ + sysfs_attr_init(&attr->attr); + NBL_SET_RO_ATTR(attr, dev_name, net_rep_show); + strscpy(attr->net_dev_name, rep_name, IFNAMSIZ); +} + +void nbl_net_remove_dev_attr(struct nbl_dev_net *net_dev) +{ + sysfs_remove_file(&net_dev->netdev->dev.kobj, &net_dev->dev_attr.dev_name_attr.attr); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.h new file mode 100644 index 0000000000000000000000000000000000000000..6063679f98bdedde3c8f3bb6742252e0237f7e34 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_SYSFS_H_ +#define _NBL_SYSFS_H_ + +enum nbl_qos_param_types { + NBL_QOS_RDMA_SAVE, + NBL_QOS_RDMA_TC2PRI, + NBL_QOS_RDMA_SQ_PRI_MAP, + NBL_QOS_RDMA_RAQ_PRI_MAP, + NBL_QOS_RDMA_PRI_IMAP, + NBL_QOS_RDMA_PFC_IMAP, + NBL_QOS_RDMA_DB_TO_CSCH_EN, + NBL_QOS_RDMA_SW_DB_CSCH_TH, + NBL_QOS_RDMA_CSCH_QLEN_TH, + NBL_QOS_RDMA_POLL_WGT, + NBL_QOS_RDMA_SP_WRR, + + /* function base */ + NBL_QOS_RDMA_TC_WGT, + NBL_QOS_PFC, + NBL_QOS_PFC_BUFFER, + NBL_QOS_TRUST, + NBL_QOS_DSCP2PRIO, + NBL_QOS_RDMA_BW, + NBL_QOS_RDMA_RATE, + NBL_QOS_NET_RATE, + NBL_QOS_TYPE_MAX +}; + +enum nbl_mirror_param_types { + NBL_MIRROR_SELECT_SRC_PORT, + NBL_MIRROR_SELECT_DST_PORT, + NBL_MIRROR_TYPE_MAX +}; + +struct nbl_sysfs_qos_info { + int offset; + struct nbl_dev_net *net_dev; + struct kobj_attribute kobj_attr; +}; + +struct nbl_net_qos { + struct kobject *qos_kobj; + struct nbl_sysfs_qos_info qos_info[NBL_QOS_TYPE_MAX]; +}; + +struct nbl_sysfs_mirror_info { + int offset; + int mirror_en; + u16 vf_id; + struct nbl_dev_net *net_dev; + struct kobj_attribute kobj_attr; +}; + +struct nbl_net_mirror { + struct kobject *mirror_kobj; + struct nbl_sysfs_mirror_info mirror_info[NBL_MIRROR_TYPE_MAX]; +}; + +#endif + diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.c new file mode 100644 index 0000000000000000000000000000000000000000..1288574b59e73400408fea6d2a15047799bc92d9 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.c @@ -0,0 +1,1532 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include +#include +#include "nbl_tc.h" +#include "nbl_tc_tun.h" + +static int nbl_tc_pedit_header_offsets[] = { + [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct nbl_tc_pedit_headers, eth), + [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct nbl_tc_pedit_headers, ip4), + [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct nbl_tc_pedit_headers, ip6), + [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct nbl_tc_pedit_headers, tcp), + [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct nbl_tc_pedit_headers, udp), +}; + +#define nbl_pedit_header(_ph, _htype) ((void *)(_ph) + nbl_tc_pedit_header_offsets[_htype]) + +static int nbl_tc_parse_proto(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_basic match; + u16 type = 0; + + flow_rule_match_basic(rule, &match); + + if (match.key->n_proto & match.mask->n_proto) { + type = ntohs(match.key->n_proto); + if (type != ETH_P_IP && type != ETH_P_IPV6 && + type != ETH_P_8021Q && type != ETH_P_8021AD) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow with ethtype 0x%04x is not supported\n", + type); + return -EOPNOTSUPP; + } + + filter->input.l2_data.ether_type = ntohs(match.key->n_proto); + filter->input.l2_mask.ether_type = ntohs(match.mask->n_proto); + filter->key_flag |= NBL_FLOW_KEY_ETHERTYPE_FLAG; + } + if (match.key->ip_proto & match.mask->ip_proto) { + filter->key_flag |= NBL_FLOW_KEY_PROTOCOL_FLAG; + filter->input.ip.proto = match.key->ip_proto; + filter->input.ip_mask.proto = match.mask->ip_proto; + } + + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow parse proto (%u) l2_data.ether_type=0x%04x, l2_mask.ether_type=0x%04x", + match.key->ip_proto, filter->input.l2_data.ether_type, + filter->input.l2_mask.ether_type); + return 0; +} + +static int nbl_tc_parse_eth(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_eth_addrs match; + int idx = 0; + + flow_rule_match_eth_addrs(rule, &match); + + if (match.key && match.mask) { + if (is_broadcast_ether_addr(match.key->dst) || + is_multicast_ether_addr(match.key->dst) || + is_zero_ether_addr(match.key->dst) || + !is_broadcast_ether_addr(match.mask->dst)) { + /* ignore src mac check for normal flow offload */ + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow dmac broadcast, multicast or fuzzy match is not supported\n"); + return -EOPNOTSUPP; + } + + ether_addr_copy(filter->input.l2_mask.dst_mac, match.mask->dst); + for (idx = 0; idx < ETH_ALEN; idx++) + filter->input.l2_data.dst_mac[idx] = match.key->dst[ETH_ALEN - 1 - idx]; + + filter->key_flag |= NBL_FLOW_KEY_DSTMAC_FLAG; + /* set vlan flag to match table profile graph even there is no vlan match */ + filter->key_flag |= NBL_FLOW_KEY_SVLAN_FLAG; + filter->key_flag |= NBL_FLOW_KEY_CVLAN_FLAG; + } + + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow l2_data.dst_mac=0x%02x:%02x:%02x:%02x:%02x:%02x", + filter->input.l2_data.dst_mac[5], filter->input.l2_data.dst_mac[4], + filter->input.l2_data.dst_mac[3], filter->input.l2_data.dst_mac[2], + filter->input.l2_data.dst_mac[1], filter->input.l2_data.dst_mac[0]); + + return 0; +} + +static int nbl_tc_parse_control(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + + if (match.key->addr_type & match.mask->addr_type) { + if (!filter->input.l2_data.ether_type) { + filter->input.l2_data.ether_type = ntohs(match.key->addr_type); + filter->input.l2_mask.ether_type = ntohs(match.mask->addr_type); + } + } + + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow parse conrtol.ether_type=0x%04x, flag:%x", + filter->input.l2_data.ether_type, match.key->flags); + return 0; +} + +static int nbl_tc_parse_vlan(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + if (match.key && match.mask) { + if (match.mask->vlan_id == VLAN_VID_MASK) { + filter->input.svlan_tag = match.key->vlan_id & 0xFFF; + filter->input.svlan_mask = match.mask->vlan_id; + filter->input.svlan_type = filter->input.l2_data.ether_type; + filter->input.vlan_cnt++; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow l2data.vlan_id=%d,vlan_type=0x%04x", + filter->input.svlan_tag, filter->input.svlan_type); + } else { + nbl_info(common, NBL_DEBUG_FLOW, "tc flow fuzzy vlan mask 0x%04x is not supported\n", + match.mask->vlan_id); + return -EINVAL; + } + } + + return 0; +} + +static int nbl_tc_parse_cvlan(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + filter->input.is_cvlan = true; + + return 0; +} + +static int nbl_tc_parse_tunnel_ip(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + return 0; +} + +static int nbl_tc_parse_tunnel_ports(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_ports enc_ports; + + flow_rule_match_enc_ports(rule, &enc_ports); + + if (memchr_inv(&enc_ports.mask->dst, 0xff, sizeof(enc_ports.mask->dst))) { + nbl_err(common, NBL_DEBUG_FLOW, "nbl tc parse tunnel err: " + "udp tunnel decap filter must match dst_port fully.\n"); + return -EOPNOTSUPP; + } + + filter->input.l4_outer.dst_port = be16_to_cpu(enc_ports.key->dst); + filter->input.l4_mask_outer.dst_port = enc_ports.mask->dst; + + filter->key_flag |= NBL_FLOW_KEY_T_DSTPORT_FLAG; + + nbl_debug(common, NBL_DEBUG_FLOW, "parse outer tnl udp:dport:0x%x.\n", + filter->input.l4_outer.dst_port); + + return 0; +} + +static int nbl_tc_parse_tunnel_keyid(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_enc_keyid enc_keyid; +#define NBL_TC_VNI_FLAG_BIT 8 + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) + return 0; + + flow_rule_match_enc_keyid(rule, &enc_keyid); + if (!enc_keyid.mask->keyid) + return 0; + + filter->input.tnl.vni = be32_to_cpu(enc_keyid.key->keyid) << NBL_TC_VNI_FLAG_BIT; + filter->input.tnl_mask.vni = enc_keyid.mask->keyid; + + filter->key_flag |= NBL_FLOW_KEY_T_VNI_FLAG; + nbl_debug(common, NBL_DEBUG_FLOW, "parse outer tnl keyid:0x%x/0x%x.\n", + filter->input.tnl.vni, filter->input.tnl_mask.vni); + + return 0; +} + +static bool +nbl_tc_find_ipv4_address(const struct net_device *dev, __be32 ipv4_addr) +{ + bool ip_find = false; + struct in_ifaddr *ifa; + struct in_device *in_dev = in_dev_get(dev); + + /* check whether the dev has the ip addr */ + if (!in_dev) + goto end; + + for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { + if (ifa->ifa_address == ipv4_addr) { + ip_find = true; + break; + } + } + + in_dev_put(in_dev); + +end: + return ip_find; +} + +static bool +nbl_tc_find_vlan_dev_ipv4_address(const struct net_device *dev, __be32 ipv4_addr) +{ + struct net_device *child; + const struct net_device *real_dev; + bool ip_find = false; + + for_each_netdev(dev_net(dev), child) { + if (is_vlan_dev(child)) { + real_dev = vlan_dev_real_dev(child); + if (real_dev != dev) + continue; + ip_find = nbl_tc_find_ipv4_address(child, ipv4_addr); + if (ip_find) + break; + } + } + + return ip_find; +} + +static bool +nbl_tc_find_ipv6_address(const struct net_device *dev, struct in6_addr ipv6_addr) +{ + bool ip_find = false; + struct inet6_ifaddr *ifa6; + struct inet6_dev *in6_dev = in6_dev_get(dev); + + /* check whether the dev has the ip addr */ + if (!in6_dev) + goto end; + + read_lock_bh(&in6_dev->lock); + list_for_each_entry(ifa6, &in6_dev->addr_list, if_list) { + char addr[INET6_ADDRSTRLEN]; + + snprintf(addr, sizeof(addr), "%pI6", &ifa6->addr); + if (!memcmp(&ifa6->addr, &ipv6_addr, sizeof(ifa6->addr))) { + ip_find = true; + break; + } + } + read_unlock_bh(&in6_dev->lock); + + in6_dev_put(in6_dev); + +end: + return ip_find; +} + +static bool +nbl_tc_find_vlan_dev_ipv6_address(const struct net_device *dev, struct in6_addr ipv6_addr) +{ + struct net_device *child; + const struct net_device *real_dev; + bool ip_find = false; + + for_each_netdev(dev_net(dev), child) { + if (is_vlan_dev(child)) { + real_dev = vlan_dev_real_dev(child); + if (real_dev != dev) + continue; + ip_find = nbl_tc_find_ipv6_address(child, ipv6_addr); + if (ip_find) + break; + } + } + + return ip_find; +} + +static int nbl_tc_parse_tunnel_control(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_control match; + u16 addr_type; + int max_idx = NBL_IPV6_ADDR_LEN_AS_U8 - 1; + int idx = 0; + bool dev_ok = false; + + flow_rule_match_enc_control(rule, &match); + addr_type = match.key->addr_type; + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs ip_addrs; + + flow_rule_match_enc_ipv4_addrs(rule, &ip_addrs); + filter->input.ip_outer.src_ip.addr = be32_to_cpu(ip_addrs.key->src); + filter->input.ip_mask_outer.src_ip.addr = ip_addrs.mask->src; + filter->input.ip_outer.dst_ip.addr = be32_to_cpu(ip_addrs.key->dst); + filter->input.ip_mask_outer.dst_ip.addr = ip_addrs.mask->dst; + + filter->input.ip_outer.ip_ver = NBL_IP_VERSION_V4; + filter->key_flag |= NBL_FLOW_KEY_T_DIPV4_FLAG; + filter->key_flag |= NBL_FLOW_KEY_T_OPT_DATA_FLAG; + filter->key_flag |= NBL_FLOW_KEY_T_OPT_CLASS_FLAG; + + nbl_debug(common, NBL_DEBUG_FLOW, "parse outer tnl ctl ip: " + "sip:0x%x/0x%x, dip:0x%x/0x%x.\n", + filter->input.ip_outer.src_ip.addr, + filter->input.ip_mask_outer.src_ip.addr, + filter->input.ip_outer.dst_ip.addr, + filter->input.ip_mask_outer.dst_ip.addr); + if (filter->input.port & NBL_FLOW_IN_PORT_TYPE_LAG) { + dev_ok = true; + } else { + dev_ok = nbl_tc_find_ipv4_address(filter->input_dev, ip_addrs.key->dst); + if (!dev_ok) + dev_ok = nbl_tc_find_vlan_dev_ipv4_address(filter->input_dev, + ip_addrs.key->dst); + } + } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs ip6_addrs; + char sipv6[INET6_ADDRSTRLEN]; + char dipv6[INET6_ADDRSTRLEN]; + char sipv6_msk[INET6_ADDRSTRLEN]; + char dipv6_msk[INET6_ADDRSTRLEN]; + + flow_rule_match_enc_ipv6_addrs(rule, &ip6_addrs); + + for (idx = 0; idx < NBL_IPV6_ADDR_LEN_AS_U8; idx++) { + filter->input.ip_outer.src_ip.v6_addr[idx] = + ip6_addrs.key->src.in6_u.u6_addr8[max_idx - idx]; + filter->input.ip_mask_outer.src_ip.v6_addr[idx] = + ip6_addrs.mask->src.in6_u.u6_addr8[max_idx - idx]; + filter->input.ip_outer.dst_ip.v6_addr[idx] = + ip6_addrs.key->dst.in6_u.u6_addr8[max_idx - idx]; + filter->input.ip_mask_outer.dst_ip.v6_addr[idx] = + ip6_addrs.mask->dst.in6_u.u6_addr8[max_idx - idx]; + } + filter->input.ip_outer.ip_ver = NBL_IP_VERSION_V6; + filter->key_flag |= NBL_FLOW_KEY_T_DIPV6_FLAG; + filter->key_flag |= NBL_FLOW_KEY_T_OPT_DATA_FLAG; + filter->key_flag |= NBL_FLOW_KEY_T_OPT_CLASS_FLAG; + + snprintf(sipv6, sizeof(sipv6), "%pI6", &ip6_addrs.key->src); + snprintf(dipv6, sizeof(dipv6), "%pI6", &ip6_addrs.key->dst); + snprintf(sipv6_msk, sizeof(sipv6_msk), "%pI6", &ip6_addrs.mask->src); + snprintf(dipv6_msk, sizeof(dipv6_msk), "%pI6", &ip6_addrs.mask->src); + + nbl_debug(common, NBL_DEBUG_FLOW, "parse outer tnl ctl ipv6, sip:%s/%s, dip:%s/%s\n", + sipv6, sipv6_msk, dipv6, dipv6_msk); + + if (filter->input.port & NBL_FLOW_IN_PORT_TYPE_LAG) { + dev_ok = true; + } else { + dev_ok = nbl_tc_find_ipv6_address(filter->input_dev, ip6_addrs.key->dst); + if (!dev_ok) + dev_ok = nbl_tc_find_vlan_dev_ipv6_address(filter->input_dev, + ip6_addrs.key->dst); + } + } + + if (dev_ok) + return 0; + else + return -EOPNOTSUPP; + +} + +static int nbl_tc_parse_ip(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_ip ip; + + flow_rule_match_ip(rule, &ip); + filter->input.ip.tos = ip.key->tos; + filter->input.ip.ttl = ip.key->ttl; + filter->input.ip_mask.tos = ip.mask->tos; + filter->input.ip_mask.ttl = ip.mask->ttl; + filter->key_flag |= NBL_FLOW_KEY_TTL_FLAG; + filter->key_flag |= NBL_FLOW_KEY_TOS_FLAG; + filter->key_flag |= NBL_FLOW_KEY_DSCP_FLAG; + + nbl_debug(common, NBL_DEBUG_FLOW, "tos is %u, ttl is %u", ip.key->tos, ip.key->ttl); + return 0; +} + +static int nbl_tc_parse_ip4(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_ipv4_addrs ip_addrs; + + flow_rule_match_ipv4_addrs(rule, &ip_addrs); + if (ip_addrs.mask->dst == 0 || ip_addrs.key->dst == 0) { + nbl_debug(common, NBL_DEBUG_FLOW, "dst ipv4:key 0x%x masked 0x%x", + ip_addrs.key->dst, ip_addrs.mask->dst); + return 0; + } else if (ip_addrs.mask->dst != NBL_FLOW_TABLE_IPV4_DEFAULT_MASK) { + nbl_info(common, NBL_DEBUG_FLOW, "dst ipv4:0x%x mask:0x%x not support", + ip_addrs.key->dst, ip_addrs.mask->dst); + return -EINVAL; + } + + filter->input.ip.ip_ver = NBL_IP_VERSION_V4; + filter->key_flag |= NBL_FLOW_KEY_DIPV4_FLAG; + filter->key_flag |= NBL_FLOW_KEY_SIPV4_FLAG; + nbl_debug(common, NBL_DEBUG_FLOW, "nbl parse dst ipv4:0x%x mask:0x%x", + ip_addrs.key->dst, ip_addrs.mask->dst); + filter->input.ip.src_ip.addr = be32_to_cpu(ip_addrs.key->src); + filter->input.ip_mask.src_ip.addr = ip_addrs.mask->src; + filter->input.ip.dst_ip.addr = be32_to_cpu(ip_addrs.key->dst); + filter->input.ip_mask.dst_ip.addr = ip_addrs.mask->dst; + return 0; +} + +static int nbl_tc_parse_ip6(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_ipv6_addrs ip6_addrs; + int idx = 0; + int max_idx = NBL_IPV6_ADDR_LEN_AS_U8 - 1; + u8 mask_ip6[NBL_IPV6_ADDR_LEN_AS_U8] = {0}; + u8 exact_ip6[NBL_IPV6_ADDR_LEN_AS_U8]; + + memset(exact_ip6, 0xff, sizeof(exact_ip6)); + flow_rule_match_ipv6_addrs(rule, &ip6_addrs); + if (!memcmp(mask_ip6, ip6_addrs.mask->dst.in6_u.u6_addr8, NBL_IPV6_ADDR_LEN_AS_U8) || + !memcmp(mask_ip6, ip6_addrs.key->dst.in6_u.u6_addr8, NBL_IPV6_ADDR_LEN_AS_U8)) { + nbl_debug(common, NBL_DEBUG_FLOW, "dst ipv6:0x%x-0x%x-0x%x-0x%x masked", + ip6_addrs.key->dst.in6_u.u6_addr32[0], + ip6_addrs.key->dst.in6_u.u6_addr32[1], + ip6_addrs.key->dst.in6_u.u6_addr32[2], + ip6_addrs.key->dst.in6_u.u6_addr32[3]); + return 0; + } else if (memcmp(exact_ip6, ip6_addrs.mask->dst.in6_u.u6_addr8, sizeof(exact_ip6))) { + nbl_info(common, NBL_DEBUG_FLOW, "dst ipv6:0x%x-0x%x-0x%x-0x%x mask:0x%x-0x%x-0x%x-0x%x not support", + ip6_addrs.key->dst.in6_u.u6_addr32[0], + ip6_addrs.key->dst.in6_u.u6_addr32[1], + ip6_addrs.key->dst.in6_u.u6_addr32[2], + ip6_addrs.key->dst.in6_u.u6_addr32[3], + ip6_addrs.mask->dst.in6_u.u6_addr32[1], + ip6_addrs.mask->dst.in6_u.u6_addr32[1], + ip6_addrs.mask->dst.in6_u.u6_addr32[2], + ip6_addrs.mask->dst.in6_u.u6_addr32[3]); + return -EINVAL; + } + + filter->input.ip.ip_ver = NBL_IP_VERSION_V6; + filter->key_flag |= NBL_FLOW_KEY_DIPV6_FLAG; + filter->key_flag |= NBL_FLOW_KEY_SIPV6_FLAG; + filter->key_flag |= NBL_FLOW_KEY_HOPLIMIT_FLAG; + nbl_debug(common, NBL_DEBUG_FLOW, "nbl pasre dst ipv6:0x%x-0x%x-0x%x-0x%x mask:0x%x-0x%x-0x%x-0x%x", + ip6_addrs.key->dst.in6_u.u6_addr32[0], ip6_addrs.key->dst.in6_u.u6_addr32[1], + ip6_addrs.key->dst.in6_u.u6_addr32[2], ip6_addrs.key->dst.in6_u.u6_addr32[3], + ip6_addrs.mask->dst.in6_u.u6_addr32[1], ip6_addrs.mask->dst.in6_u.u6_addr32[1], + ip6_addrs.mask->dst.in6_u.u6_addr32[2], ip6_addrs.mask->dst.in6_u.u6_addr32[3]); + for (idx = 0; idx < NBL_IPV6_ADDR_LEN_AS_U8; idx++) { + filter->input.ip.src_ip.v6_addr[idx] = + ip6_addrs.key->src.in6_u.u6_addr8[max_idx - idx]; + filter->input.ip_mask.src_ip.v6_addr[idx] = + ip6_addrs.mask->src.in6_u.u6_addr8[max_idx - idx]; + filter->input.ip.dst_ip.v6_addr[idx] = + ip6_addrs.key->dst.in6_u.u6_addr8[max_idx - idx]; + filter->input.ip_mask.dst_ip.v6_addr[idx] = + ip6_addrs.mask->dst.in6_u.u6_addr8[max_idx - idx]; + } + + return 0; +} + +static int nbl_tc_parse_ports(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_ports port; + + flow_rule_match_ports(rule, &port); + if (!port.mask->dst && !port.mask->src) { + nbl_debug(common, NBL_DEBUG_FLOW, "src and dst port:%d-%d masked", + port.key->src, port.key->dst); + return 0; + } else if (port.mask->dst != NBL_FLOW_TABLE_L4_PORT_DEFAULT_MASK || + port.mask->src != NBL_FLOW_TABLE_L4_PORT_DEFAULT_MASK) { + nbl_info(common, NBL_DEBUG_FLOW, "src and dst port mask:%d-%d not support", + port.mask->src, port.mask->dst); + return -EINVAL; + } + + filter->key_flag |= NBL_FLOW_KEY_DSTPORT_FLAG; + filter->key_flag |= NBL_FLOW_KEY_SRCPORT_FLAG; + nbl_debug(common, NBL_DEBUG_FLOW, "nbl parse src and dst port key:%d-%d, mask:%d-%d", + port.key->src, port.key->dst, port.mask->src, port.mask->dst); + filter->input.l4.dst_port = be16_to_cpu(port.key->dst); + filter->input.l4_mask.dst_port = be16_to_cpu(port.mask->dst); + filter->input.l4.src_port = be16_to_cpu(port.key->src); + filter->input.l4_mask.src_port = be16_to_cpu(port.mask->src); + return 0; +} + +static struct nbl_tc_flow_parse_pattern parse_pattern_list[] = { + { FLOW_DISSECTOR_KEY_BASIC, nbl_tc_parse_proto }, + { FLOW_DISSECTOR_KEY_ETH_ADDRS, nbl_tc_parse_eth }, + { FLOW_DISSECTOR_KEY_CONTROL, nbl_tc_parse_control }, + { FLOW_DISSECTOR_KEY_VLAN, nbl_tc_parse_vlan }, + { FLOW_DISSECTOR_KEY_CVLAN, nbl_tc_parse_cvlan }, + { FLOW_DISSECTOR_KEY_ENC_IP, nbl_tc_parse_tunnel_ip}, + { FLOW_DISSECTOR_KEY_ENC_PORTS, nbl_tc_parse_tunnel_ports }, + { FLOW_DISSECTOR_KEY_ENC_KEYID, nbl_tc_parse_tunnel_keyid }, + { FLOW_DISSECTOR_KEY_ENC_CONTROL, nbl_tc_parse_tunnel_control }, + { FLOW_DISSECTOR_KEY_IPV4_ADDRS, nbl_tc_parse_ip4 }, + { FLOW_DISSECTOR_KEY_IPV6_ADDRS, nbl_tc_parse_ip6 }, + { FLOW_DISSECTOR_KEY_IP, nbl_tc_parse_ip }, + { FLOW_DISSECTOR_KEY_PORTS, nbl_tc_parse_ports }, +}; + +static int nbl_tc_flow_set_out_param(struct net_device *out_dev, + struct nbl_serv_lag_info *lag_info, + struct nbl_tc_port *out, + struct nbl_common_info *common) +{ + struct nbl_netdev_priv *dev_priv = NULL; + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + u16 eswitch_mode = NBL_ESWITCH_NONE; + + if (netif_is_lag_master(out_dev)) { + if (lag_info && lag_info->bond_netdev && lag_info->bond_netdev == out_dev) { + out->type = NBL_TC_PORT_TYPE_BOND; + out->id = lag_info->lag_id; + goto set_param_end; + } else { + return -EINVAL; + } + } + + dev_priv = netdev_priv(out_dev); + if (!dev_priv->adapter) + return -EINVAL; + + if (common->tc_inst_id != dev_priv->adapter->common.tc_inst_id) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow rule in different nic is not supported\n"); + return -EOPNOTSUPP; + } + + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(dev_priv->adapter); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + eswitch_mode = + disp_ops->get_eswitch_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (eswitch_mode != NBL_ESWITCH_OFFLOADS) + return -EINVAL; + + if (dev_priv->rep) { + out->type = NBL_TC_PORT_TYPE_VSI; + out->id = dev_priv->rep->rep_vsi_id; + } else { + out->type = NBL_TC_PORT_TYPE_ETH; + out->id = dev_priv->adapter->common.eth_id; + } + +set_param_end: + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow set out.type=%s, out.id=%d\n", + out->type == NBL_TC_PORT_TYPE_VSI ? "vsi" : "uplink", out->id); + + return 0; +} + +static bool +nbl_tc_is_valid_netdev(struct net_device *netdev, struct nbl_serv_netdev_ops *netdev_ops) +{ + if (netif_is_lag_master(netdev)) + return true; + + if (netdev->netdev_ops == netdev_ops->pf_netdev_ops || + netdev->netdev_ops == netdev_ops->rep_netdev_ops) + return true; + + return false; +} + +static int nbl_tc_flow_init_param(struct nbl_netdev_priv *priv, struct flow_cls_offload *f, + struct nbl_common_info *common, struct nbl_tc_flow_param *param) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + const struct flow_action_entry *act_entry; + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(priv->adapter); + struct nbl_serv_netdev_ops *netdev_ops = &serv_mgt->net_resource_mgt->netdev_ops; + struct nbl_serv_lag_info *lag_info = NULL; + int i = 0; + int ret = 0; + int redirect_cnt = 0; + int mirred_cnt = 0; + const struct rtnl_link_ops *tnl_ops; + + if (priv->rep) { + param->in.type = NBL_TC_PORT_TYPE_VSI; + param->in.id = priv->rep->rep_vsi_id; + } else if (serv_mgt->net_resource_mgt->lag_info) { + if (serv_mgt->net_resource_mgt->lag_info->lag_id >= NBL_LAG_MAX_NUM) + return -EINVAL; + param->in.type = NBL_TC_PORT_TYPE_BOND; + param->in.id = serv_mgt->net_resource_mgt->lag_info->lag_id; + } else { + param->in.type = NBL_TC_PORT_TYPE_ETH; + param->in.id = common->eth_id; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow init param in.type=%s, type=%d, in.id=%d, dev:%s", + param->in.type == NBL_TC_PORT_TYPE_VSI ? "vsi" : "uplink", + param->in.type, param->in.id, priv->netdev ? priv->netdev->name : "NULL"); + + flow_action_for_each(i, act_entry, &rule->action) { + if (act_entry->id == FLOW_ACTION_REDIRECT) { + if (!act_entry->dev) + return -EINVAL; + if (redirect_cnt) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow with more than one redirect outport is not supported"); + return -EINVAL; + } + tnl_ops = act_entry->dev->rtnl_link_ops; + + if (!tnl_ops || + (tnl_ops && memcmp(tnl_ops->kind, "vxlan", sizeof("vxlan")))) { + if (!nbl_tc_is_valid_netdev(act_entry->dev, + netdev_ops)) + return -ENODEV; + + if (netif_is_lag_master(act_entry->dev)) + lag_info = serv_mgt->net_resource_mgt->lag_info; + + ret = nbl_tc_flow_set_out_param(act_entry->dev, lag_info, + ¶m->out, common); + if (ret) + return ret; + } + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow init redirect outport"); + + redirect_cnt++; + } else if (act_entry->id == FLOW_ACTION_MIRRED) { + if (!act_entry->dev) + return -EINVAL; + if (mirred_cnt) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow with more than one mirror outport is not supported"); + return -EINVAL; + } + if (!nbl_tc_is_valid_netdev(act_entry->dev, + &serv_mgt->net_resource_mgt->netdev_ops)) + return -ENODEV; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow init mirror outport"); + + lag_info = NULL; + if (netif_is_lag_master(act_entry->dev)) + lag_info = serv_mgt->net_resource_mgt->lag_info; + + ret = nbl_tc_flow_set_out_param(act_entry->dev, lag_info, + ¶m->mirror_out, common); + if (ret) + return ret; + mirred_cnt++; + } else if (redirect_cnt > 0 || mirred_cnt > 0) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow different edit action with multiple outport is not supported"); + return -EOPNOTSUPP; + } + } + + return ret; +} + +static int nbl_tc_parse_pattern(struct nbl_service_mgt *serv_mgt, + struct flow_cls_offload *f, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u32 i = 0; + int ret = 0; + + switch (param->in.type) { + case NBL_TC_PORT_TYPE_VSI: + filter->input.port = param->in.id | NBL_FLOW_IN_PORT_TYPE_VSI; + break; + case NBL_TC_PORT_TYPE_ETH: + filter->input.port = param->in.id; + break; + case NBL_TC_PORT_TYPE_BOND: + filter->input.port = param->in.id | NBL_FLOW_IN_PORT_TYPE_LAG; + break; + default: + nbl_err(common, NBL_DEBUG_FLOW, "tc flow invalid in_port type:%d\n", + param->in.type); + return -EINVAL; + } + filter->key_flag |= NBL_FLOW_KEY_INPORT8_FLAG; + filter->key_flag |= NBL_FLOW_KEY_INPORT4_FLAG; + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow dissector->used_keys=%llx\n", + dissector->used_keys); + if (dissector->used_keys & + ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow key used: 0x%llx is not supported\n", + dissector->used_keys); + return -EOPNOTSUPP; + } + + for (i = 0; i < ARRAY_SIZE(parse_pattern_list); i++) { + if (flow_rule_match_key(rule, parse_pattern_list[i].pattern_type)) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow key %d\n", + parse_pattern_list[i].pattern_type); + ret = parse_pattern_list[i].parse_func(rule, filter, common); + + if (ret != 0) + return ret; + } + } + + return 0; +} + +static int nbl_tc_fill_encap_out_info(struct nbl_tc_flow_param *param, + struct nbl_rule_action *rule_act) +{ + const struct nbl_serv_lag_info *lag_info = + param->serv_mgt->net_resource_mgt->lag_info; + struct nbl_netdev_priv *dev_priv = NULL; + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + u16 eswitch_mode = NBL_ESWITCH_NONE; + + if (netif_is_lag_master(rule_act->tc_tun_encap_out_dev)) { + if (lag_info && lag_info->bond_netdev && + lag_info->bond_netdev == rule_act->tc_tun_encap_out_dev) { + rule_act->port_type = SET_DPORT_TYPE_ETH_LAG; + rule_act->port_id = (lag_info->lag_id << 2) | NBL_FLOW_OUT_PORT_TYPE_LAG; + rule_act->vlan.port_id = lag_info->lag_id; + rule_act->vlan.port_type = NBL_TC_PORT_TYPE_BOND; + goto end; + } else { + nbl_err(param->common, NBL_DEBUG_FLOW, "fill encap out info err.\n"); + return -EINVAL; + } + } + + dev_priv = netdev_priv(rule_act->tc_tun_encap_out_dev); + if (!dev_priv->adapter) { + nbl_err(param->common, NBL_DEBUG_FLOW, "encap out dev priv adapter is NULL, out_dev:%s.\n", + rule_act->tc_tun_encap_out_dev->name); + return -EINVAL; + } + + if (param->common->tc_inst_id != dev_priv->adapter->common.tc_inst_id) { + nbl_err(param->common, NBL_DEBUG_FLOW, "tc flow rule in different nic is not supported\n"); + return -EINVAL; + } + + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(dev_priv->adapter); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + eswitch_mode = disp_ops->get_eswitch_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (eswitch_mode != NBL_ESWITCH_OFFLOADS) { + nbl_err(param->common, NBL_DEBUG_FLOW, "eswitch mode is not in offload.\n"); + return -EINVAL; + } + + if (dev_priv->rep) { + rule_act->port_type = SET_DPORT_TYPE_VSI_HOST; + rule_act->port_id = dev_priv->rep->rep_vsi_id; + rule_act->vlan.port_id = dev_priv->rep->rep_vsi_id; + rule_act->vlan.port_type = NBL_TC_PORT_TYPE_VSI; + } else { + rule_act->port_type = SET_DPORT_TYPE_ETH_LAG; + rule_act->port_id = dev_priv->adapter->common.eth_id | NBL_FLOW_OUT_PORT_TYPE_ETH; + rule_act->vlan.port_id = dev_priv->adapter->common.eth_id + NBL_VLAN_TYPE_ETH_BASE; + rule_act->vlan.port_type = NBL_TC_PORT_TYPE_ETH; + } + +end: + return 0; +} + +static inline bool nbl_tc_is_dmac_offset(u32 oft) +{ + return (oft < 6); +} + +static inline bool nbl_tc_is_smac_offset(u32 oft) +{ + return (oft >= 6 && oft < 12); +} + +static inline bool nbl_tc_is_sip_offset(u32 oft) +{ + return (oft >= 12 && oft < 16); +} + +static inline bool nbl_tc_is_dip_offset(u32 oft) +{ + return (oft >= 16 && oft < 20); +} + +static inline bool nbl_tc_is_sip6_offset(u32 oft) +{ + return (oft >= 8 && oft < 24); +} + +static inline bool nbl_tc_is_dip6_offset(u32 oft) +{ + return (oft >= 24 && oft < 40); +} + +static inline bool nbl_tc_is_sp_offset(u32 oft) +{ + return (oft >= 0 && oft < 2); +} + +static inline bool nbl_tc_is_dp_offset(u32 oft) +{ + return (oft >= 2 && oft < 4); +} + +static int nbl_tc_pedit_parse_eth(u32 offset, u64 *act_flag) +{ + int ret = 0; + + if (nbl_tc_is_dmac_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_DST_MAC; + else if (nbl_tc_is_smac_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_SRC_MAC; + else + ret = -EOPNOTSUPP; + + return ret; +} + +static int nbl_tc_pedit_parse_ip(u32 offset, u64 *act_flag) +{ + int ret = 0; + + if (nbl_tc_is_dip_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_IPV4_DST_IP; + else if (nbl_tc_is_sip_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_IPV4_SRC_IP; + else + /* we only support sip & dip field now */ + ret = -EOPNOTSUPP; + + return ret; +} + +static int nbl_tc_pedit_parse_ip6(u32 offset, u64 *act_flag) +{ + int ret = 0; + + if (nbl_tc_is_dip6_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_IPV6_DST_IP; + else if (nbl_tc_is_sip6_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_IPV6_SRC_IP; + else + /* we only support sip6 & dip6 field now */ + ret = -EOPNOTSUPP; + + return ret; +} + +static int nbl_tc_pedit_parse_port(u32 offset, u64 *act_flag) +{ + int ret = 0; + + if (nbl_tc_is_dp_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_DST_PORT; + else if (nbl_tc_is_sp_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_SRC_PORT; + else + /* we only support src & dst port field now */ + ret = -EOPNOTSUPP; + + return ret; +} + +static int nbl_tc_pedit_check_field(const struct nbl_common_info *common, u32 offset, + u8 pedit_type, u64 *pedit_flag) +{ + int ret = 0; + + switch (pedit_type) { + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: + ret = nbl_tc_pedit_parse_eth(offset, pedit_flag); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + ret = nbl_tc_pedit_parse_ip(offset, pedit_flag); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: + ret = nbl_tc_pedit_parse_ip6(offset, pedit_flag); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + ret = nbl_tc_pedit_parse_port(offset, pedit_flag); + break; + default: + nbl_info(common, NBL_DEBUG_FLOW, "nbl_tc_pedit not support %d\n", pedit_type); + ret = -EOPNOTSUPP; + } + + if (ret) + nbl_info(common, NBL_DEBUG_FLOW, "nbl_tc_pedit:type(%d)-oft(%u) err\n", + pedit_type, offset); + return ret; +} + +static int nbl_tc_pedit_set_val(u8 htype, u32 mask, u32 val, u32 offset, + struct nbl_tc_pedit_info *pedit_info) +{ + u32 *cur_pmask = (u32 *)(nbl_pedit_header(&pedit_info->mask, htype) + offset); + u32 *cur_pval = (u32 *)(nbl_pedit_header(&pedit_info->val, htype) + offset); + + if (*cur_pmask & mask) + return -EINVAL; + + *cur_pmask |= mask; + *cur_pval |= (val & mask); + + return 0; +} + +static u32 nbl_tc_pedit_update_oft(u32 *oft, u32 mask) +{ + int ret = 0; + + if (NBL_TC_MASK_FORWARD_OFT0(mask)) + *oft += 0; + else if (NBL_TC_MASK_FORWARD_OFT1(mask)) + *oft += 1; + else if (NBL_TC_MASK_FORWARD_OFT2(mask)) + *oft += 2; + else if (NBL_TC_MASK_FORWARD_OFT3(mask)) + *oft += 3; + else if (NBL_TC_MASK_BACKWARD_OFT3(mask)) + *oft = *oft > 3 ? (*oft - 3) : *oft; + else if (NBL_TC_MASK_BACKWARD_OFT2(mask)) + *oft = *oft > 2 ? (*oft - 2) : *oft; + else if (NBL_TC_MASK_BACKWARD_OFT1(mask)) + *oft = *oft > 1 ? (*oft - 1) : *oft; + else + ret = -EINVAL; + return ret; +} + +static int nbl_tc_pedit_parse_edit_info(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + struct nbl_tc_flow_param *param) +{ + int ret = 0; + u8 htype = (u8)act_entry->mangle.htype; + u32 mask = act_entry->mangle.mask; + u32 val = act_entry->mangle.val; + u32 offset = act_entry->mangle.offset; + const struct nbl_common_info *common = param->common; + + if (htype == FLOW_ACT_MANGLE_UNSPEC) { + nbl_info(common, NBL_DEBUG_FLOW, "legacy pedit isn't offloaded"); + ret = -EOPNOTSUPP; + goto pedit_err; + } + + if (htype > FLOW_ACT_MANGLE_HDR_TYPE_UDP) { + nbl_info(common, NBL_DEBUG_FLOW, "pedit:%d isn't offloaded", htype); + ret = -EOPNOTSUPP; + goto pedit_err; + } + + /* try get located pedit val, drop it if we got a bad location*/ + ret = nbl_tc_pedit_set_val(htype, ~mask, val, offset, &rule_act->tc_pedit_info); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_tc_pedit err: disallow edit on same location"); + goto pedit_err; + } + + ret = nbl_tc_pedit_update_oft(&offset, mask); + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_tc_pedit:type-val-mask-oft->%d-%u-%x-%u %s", + htype, val, mask, offset, ret ? "failed" : "success"); + if (ret) + goto pedit_err; + if (htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) + NBL_TC_PEDIT_SET_NODE_RES_PRO(rule_act->tc_pedit_info.pedit_node); + + /* now set action flag if we supported it */ + ret = nbl_tc_pedit_check_field(common, offset, htype, &rule_act->flag); + if (ret) + goto pedit_err; + + NBL_TC_PEDIT_INC_NODE_RES_EDITS(rule_act->tc_pedit_info.pedit_node); +pedit_err: + return ret; +} + +static int nbl_tc_handle_action_pedit(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + return nbl_tc_pedit_parse_edit_info(rule_act, act_entry, param); +} + +static int nbl_tc_handle_action_csum(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + return 0; +} + +static int +nbl_tc_handle_action_port_id(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + int ret = 0; + struct net_device *encap_dev = act_entry->dev; + + if (param->mirror_out.type) + return 0; + + if (param->encap) { + param->encap = false; + /* encap info */ + ret = nbl_tc_tun_parse_encap_info(rule_act, param, encap_dev); + + if (ret) { + nbl_info(param->common, NBL_DEBUG_FLOW, "parse tc encap info failed.\n"); + return ret; + } + + /* fill encap out port info */ + ret = nbl_tc_fill_encap_out_info(param, rule_act); + if (ret) + return ret; + } else { + switch (param->out.type) { + case NBL_TC_PORT_TYPE_VSI: + rule_act->port_type = SET_DPORT_TYPE_VSI_HOST; + rule_act->port_id = param->out.id; + rule_act->vlan.port_id = param->out.id; + rule_act->vlan.port_type = NBL_TC_PORT_TYPE_VSI; + break; + case NBL_TC_PORT_TYPE_ETH: + rule_act->port_type = SET_DPORT_TYPE_ETH_LAG; + rule_act->port_id = param->out.id | NBL_FLOW_OUT_PORT_TYPE_ETH; + rule_act->vlan.port_id = param->out.id + NBL_VLAN_TYPE_ETH_BASE; + rule_act->vlan.port_type = NBL_TC_PORT_TYPE_ETH; + break; + case NBL_TC_PORT_TYPE_BOND: + rule_act->port_type = SET_DPORT_TYPE_ETH_LAG; + rule_act->port_id = (param->out.id << 2) | NBL_FLOW_OUT_PORT_TYPE_LAG; + rule_act->vlan.port_id = param->out.id; + rule_act->vlan.port_type = NBL_TC_PORT_TYPE_BOND; + break; + default: + return -EINVAL; + } + } + rule_act->flag |= NBL_FLOW_ACTION_PORT_ID; + + return 0; +} + +static int +nbl_tc_handle_action_drop(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + rule_act->flag |= NBL_FLOW_ACTION_DROP; + rule_act->drop_flag = 1; + return 0; +} + +static int +nbl_tc_handle_action_mirror(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + if (!(param->out.type && param->mirror_out.type)) + return -EINVAL; + + if (rule_act->mcc_cnt >= NBL_TC_MCC_MEMBER_MAX) + return -EINVAL; + rule_act->port_mcc[rule_act->mcc_cnt].dport_id = param->out.id; + rule_act->port_mcc[rule_act->mcc_cnt].port_type = param->out.type; + rule_act->mcc_cnt++; + + if (rule_act->mcc_cnt >= NBL_TC_MCC_MEMBER_MAX) + return -EINVAL; + rule_act->port_mcc[rule_act->mcc_cnt].dport_id = param->mirror_out.id; + rule_act->port_mcc[rule_act->mcc_cnt].port_type = param->mirror_out.type; + rule_act->mcc_cnt++; + + rule_act->flag |= NBL_FLOW_ACTION_MCC; + + return 0; +} + +static int +nbl_tc_handle_action_push_vlan(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + rule_act->vlan.eth_proto = htons(act_entry->vlan.proto); + if (rule_act->vlan.eth_proto != NBL_VLAN_TPID_VALUE && + rule_act->vlan.eth_proto != NBL_QINQ_TPID_VALUE) + return -EINVAL; + + if (filter->input.svlan_tag) + rule_act->flag |= NBL_FLOW_ACTION_PUSH_OUTER_VLAN; + else + rule_act->flag |= NBL_FLOW_ACTION_PUSH_INNER_VLAN; + rule_act->vlan.vlan_tag = act_entry->vlan.vid; + rule_act->vlan.vlan_tag |= act_entry->vlan.prio << NBL_VLAN_PCP_SHIFT; + + return 0; +} + +static int +nbl_tc_handle_action_pop_vlan(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + if (filter->input.is_cvlan) + rule_act->flag |= NBL_FLOW_ACTION_POP_OUTER_VLAN; + else + rule_act->flag |= NBL_FLOW_ACTION_POP_INNER_VLAN; + + return 0; +} + +static int +nbl_tc_handle_action_tun_encap(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + param->tunnel = (struct ip_tunnel_info *)act_entry->tunnel; + if (param->tunnel) { + rule_act->flag |= NBL_FLOW_ACTION_TUNNEL_ENCAP; + param->encap = true; + return 0; + } else { + return -EOPNOTSUPP; + } +} + +static int +nbl_tc_handle_action_tun_decap(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + rule_act->flag |= NBL_FLOW_ACTION_TUNNEL_DECAP; + + return 0; +} + +const struct nbl_tc_flow_action_driver_ops nbl_port_id_act = { + .act_update = nbl_tc_handle_action_port_id, +}; + +const struct nbl_tc_flow_action_driver_ops nbl_drop = { + .act_update = nbl_tc_handle_action_drop, +}; + +const struct nbl_tc_flow_action_driver_ops nbl_mirror_act = { + .act_update = nbl_tc_handle_action_mirror, +}; + +const struct nbl_tc_flow_action_driver_ops nbl_push_vlan = { + .act_update = nbl_tc_handle_action_push_vlan, +}; + +const struct nbl_tc_flow_action_driver_ops nbl_pop_vlan = { + .act_update = nbl_tc_handle_action_pop_vlan, +}; + +const struct nbl_tc_flow_action_driver_ops nbl_tunnel_encap_act = { + .act_update = nbl_tc_handle_action_tun_encap, +}; + +const struct nbl_tc_flow_action_driver_ops nbl_tunnel_decap_act = { + .act_update = nbl_tc_handle_action_tun_decap, +}; + +const struct nbl_tc_flow_action_driver_ops nbl_pedit_act = { + .act_update = nbl_tc_handle_action_pedit, +}; + +const struct nbl_tc_flow_action_driver_ops nbl_csum_act = { + .act_update = nbl_tc_handle_action_csum, +}; + +const struct nbl_tc_flow_action_driver_ops *nbl_act_ops[] = { + [FLOW_ACTION_REDIRECT] = &nbl_port_id_act, + [FLOW_ACTION_DROP] = &nbl_drop, + [FLOW_ACTION_MIRRED] = &nbl_mirror_act, + [FLOW_ACTION_VLAN_PUSH] = &nbl_push_vlan, + [FLOW_ACTION_VLAN_POP] = &nbl_pop_vlan, + [FLOW_ACTION_TUNNEL_ENCAP] = &nbl_tunnel_encap_act, + [FLOW_ACTION_TUNNEL_DECAP] = &nbl_tunnel_decap_act, + [FLOW_ACTION_MANGLE] = &nbl_pedit_act, + [FLOW_ACTION_CSUM] = &nbl_csum_act, +}; + +/** + * @brief: handle action parse by type + * + * @param[in] type: action type + * @param[in] actions: nbl_flow_pattern_conf info + * @param[in] act: nbl_rule_action info storage + * @param[out] error: error info + * @return int: 0-success, other-failed + */ +static int nbl_tc_parse_action_by_type(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + const struct nbl_tc_flow_action_driver_ops *fops; + + fops = nbl_act_ops[type]; + + if (!fops) + return 0; + + return fops->act_update(rule_act, act_entry, type, filter, param); +} + +/** + * @brief: handle action parse + * + * @param[in] attr: attr info + * @param[in] action: nbl_flow_pattern_conf info + * @param[in] act: nbl_rule_action info storage + * @param[out] error: error info + * @return int: 0-success, other-failed + * + */ +static int nbl_tc_parse_action(struct nbl_service_mgt *serv_mgt, + struct flow_cls_offload *f, + struct nbl_flow_pattern_conf *filter, + struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + const struct flow_action_entry *act_entry; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int i; + int ret = 0; + + flow_action_for_each(i, act_entry, &rule->action) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow parse action id %d, " + "act idx %d\n", act_entry->id, i); + switch (act_entry->id) { + case FLOW_ACTION_REDIRECT: + case FLOW_ACTION_DROP: + case FLOW_ACTION_MIRRED: + case FLOW_ACTION_VLAN_PUSH: + case FLOW_ACTION_VLAN_POP: + case FLOW_ACTION_TUNNEL_ENCAP: + case FLOW_ACTION_TUNNEL_DECAP: + case FLOW_ACTION_MANGLE: + case FLOW_ACTION_CSUM: + ret = nbl_tc_parse_action_by_type(rule_act, act_entry, + act_entry->id, filter, param); + if (ret) + return ret; + break; + default: + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow action %d is not supported", + act_entry->id); + return -EOPNOTSUPP; + } + } + + return ret; +} + +static int nbl_serv_add_cls_flower(struct nbl_netdev_priv *priv, struct flow_cls_offload *f) +{ + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(priv->adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_flow_pattern_conf *filter = NULL; + struct nbl_rule_action *act = NULL; + struct nbl_tc_flow_param *param = NULL; + int ret = 0; + int ret_act = 0; + + if (!tc_can_offload(priv->netdev)) + return -EOPNOTSUPP; + + if (!nbl_tc_is_valid_netdev(priv->netdev, &serv_mgt->net_resource_mgt->netdev_ops)) + return -EOPNOTSUPP; + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) + return -ENOMEM; + param->key.cookie = f->cookie; + ret = disp_ops->flow_index_lookup(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param->key); + if (!ret) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow cookie %llx has already add, do not add again, dev %s.\n", + param->key.cookie, netdev_name(priv->netdev)); + ret = -EEXIST; + goto ret_param_fail; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow add cls, cookie=%lx, dev %s.\n", + f->cookie, netdev_name(priv->netdev)); + + if (nbl_tc_flow_init_param(priv, f, common, param)) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow init param failed, dev %s.\n", + netdev_name(priv->netdev)); + ret = -EINVAL; + goto ret_param_fail; + } + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) { + ret = -ENOMEM; + goto ret_param_fail; + } + + param->common = common; + param->serv_mgt = serv_mgt; + + filter->input_dev = priv->netdev; + ret = nbl_tc_parse_pattern(serv_mgt, f, filter, param); + if (ret) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow failed to parse " + "pattern, dev %s, ret %d.\n", netdev_name(priv->netdev), ret); + ret = -EINVAL; + goto ret_filter_fail; + } + + act = kzalloc(sizeof(*act), GFP_KERNEL); + if (!act) { + ret = -ENOMEM; + goto ret_filter_fail; + } + + act->in_port = priv->netdev; + ret = nbl_tc_parse_action(serv_mgt, f, filter, act, param); + if (ret) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow failed to parse action, " + "dev %s, ret %d.\n", netdev_name(priv->netdev), ret); + ret = -EINVAL; + goto ret_act_fail; + } + + memcpy(¶m->filter, filter, sizeof(param->filter)); + memcpy(¶m->act, act, sizeof(param->act)); + + ret = disp_ops->add_tc_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param); + +ret_act_fail: + /* free edit act */ + if (ret && act->flag & NBL_FLOW_ACTION_TUNNEL_ENCAP && + act->encap_parse_ok) { + ret_act = disp_ops->tc_tun_encap_del(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + &act->encap_key); + if (ret_act) + nbl_debug(common, NBL_DEBUG_FLOW, "in add tc flow err, encap del err, " + "tc flow add ret %d, encap_idx:%d, encap del ret:%d", + ret, act->encap_idx, ret_act); + } + + kfree(act); +ret_filter_fail: + kfree(filter); +ret_param_fail: + kfree(param); + return ret; +} + +static int nbl_serv_del_cls_flower(struct nbl_netdev_priv *priv, struct flow_cls_offload *f) +{ + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(priv->adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_tc_flow_param *param = NULL; + int ret = 0; + + if (!nbl_tc_is_valid_netdev(priv->netdev, &serv_mgt->net_resource_mgt->netdev_ops)) + return -EOPNOTSUPP; + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) + return -ENOMEM; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow del cls, cookie=%lx\n", f->cookie); + param->key.cookie = f->cookie; + + ret = disp_ops->del_tc_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param); + kfree(param); + + return ret; +} + +static int nbl_serv_stats_cls_flower(struct nbl_netdev_priv *priv, struct flow_cls_offload *f) +{ + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(priv->adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_stats_param param = {0}; + int ret = 0; + + if (!tc_can_offload(priv->netdev)) + return -EOPNOTSUPP; + + if (!nbl_tc_is_valid_netdev(priv->netdev, &serv_mgt->net_resource_mgt->netdev_ops)) + return -EOPNOTSUPP; + + param.f = f; + + ret = disp_ops->query_tc_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + + return ret; +} + +static int +nbl_serv_setup_tc_cls_flower(struct nbl_netdev_priv *priv, + struct flow_cls_offload *cls_flower) +{ + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(priv->adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 eswitch_mode = NBL_ESWITCH_NONE; + + eswitch_mode = disp_ops->get_eswitch_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (eswitch_mode != NBL_ESWITCH_OFFLOADS) + return -EINVAL; + + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return nbl_serv_add_cls_flower(priv, cls_flower); + case FLOW_CLS_DESTROY: + return nbl_serv_del_cls_flower(priv, cls_flower); + case FLOW_CLS_STATS: + return nbl_serv_stats_cls_flower(priv, cls_flower); + default: + return -EOPNOTSUPP; + } +} + +int nbl_serv_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + struct nbl_netdev_priv *priv = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return nbl_serv_setup_tc_cls_flower(priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +int nbl_serv_indr_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + struct nbl_indr_dev_priv *indr_priv = cb_priv; + struct nbl_netdev_priv *priv = indr_priv->dev_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return nbl_serv_setup_tc_cls_flower(priv, type_data); + default: + return -EOPNOTSUPP; + } +} + diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.h new file mode 100644 index 0000000000000000000000000000000000000000..0619389d87515b52a3202004af46a4d512326c31 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_TC_OFFLOAD_H +#define _NBL_TC_OFFLOAD_H + +#include "nbl_service.h" + +#define NBL_TC_MASK_FORWARD_OFT3(mask) ((mask) == 0xffffff) +#define NBL_TC_MASK_FORWARD_OFT2(mask) ((mask) == 0xffff) +#define NBL_TC_MASK_FORWARD_OFT1(mask) ((mask) == 0xff) +#define NBL_TC_MASK_FORWARD_OFT0(mask) ((mask) == 0) + +#define NBL_TC_MASK_BACKWARD_OFT3(mask) ((mask) == 0xffffff00) +#define NBL_TC_MASK_BACKWARD_OFT2(mask) ((mask) == 0xffff0000) +#define NBL_TC_MASK_BACKWARD_OFT1(mask) ((mask) == 0xff000000) + +int nbl_serv_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv); +int nbl_serv_indr_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.c new file mode 100644 index 0000000000000000000000000000000000000000..5c4425cabf87bf9fe8e344cb021c5c2c0eacb0d8 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.c @@ -0,0 +1,563 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include +#include +#include +#include +#include "nbl_resource.h" +#include "nbl_service.h" +#include "nbl_tc_tun.h" + +static int nbl_copy_tun_info(const struct ip_tunnel_info *tun_info, + struct nbl_rule_action *rule_act) +{ + size_t tun_size; + + if (tun_info->options_len) + tun_size = sizeof(*tun_info) + tun_info->options_len; + else + tun_size = sizeof(*tun_info); + + rule_act->tunnel = kzalloc(tun_size, GFP_KERNEL); + if (!rule_act->tunnel) + return -ENOMEM; + + memcpy(rule_act->tunnel, tun_info, tun_size); + + return 0; +} + +/* only support vxlan currently */ +static struct nbl_tc_tunnel *nbl_tc_get_tunnel(struct net_device *tunnel_dev) +{ + if (netif_is_vxlan(tunnel_dev)) + return &vxlan_tunnel; + else + return NULL; +} + +static int nbl_tc_tun_gen_tunnel_header_vxlan(char buf[], u8 *ip_proto, + const struct ip_tunnel_key *tun_key) +{ + __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id); + struct udphdr *udp = (struct udphdr *)(buf); + struct vxlanhdr *vxh; + + vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); + *ip_proto = IPPROTO_UDP; + + udp->dest = tun_key->tp_dst; + vxh->vx_flags = VXLAN_HF_VNI; + vxh->vx_vni = vxlan_vni_field(tun_id); + + return 0; +} + +static int nbl_tc_tun_get_vxlan_hdr_len(void) +{ + return sizeof(struct vxlanhdr); +} + +static void nbl_tc_tun_route_cleanup(struct nbl_tc_tunnel_route_info *tun_route_info) +{ + if (tun_route_info->n) + neigh_release(tun_route_info->n); + if (tun_route_info->real_out_dev) + dev_put(tun_route_info->real_out_dev); +} + +static int nbl_route_lookup_ipv4(const struct nbl_common_info *common, + struct net_device *encap_mirred_dev, + struct nbl_tc_tunnel_route_info *tun_route_info, + struct nbl_serv_netdev_ops *netdev_ops) +{ + int ret = 0; + struct net_device *out_dev; + struct net_device *real_out_dev; + struct net_device *parent_dev; + struct neighbour *n; + struct rtable *rt; + + rt = ip_route_output_key(dev_net(encap_mirred_dev), &tun_route_info->fl.fl4); + if (IS_ERR(rt)) + return (int)PTR_ERR(rt); + + if (rt->rt_type != RTN_UNICAST) { + ret = -ENETUNREACH; + nbl_err(common, NBL_DEBUG_FLOW, "get route table failed, the route type is not unicast."); + goto rt_err; + } + + out_dev = rt->dst.dev; + if (is_vlan_dev(out_dev)) { + parent_dev = vlan_dev_priv(out_dev)->real_dev; + if (is_vlan_dev(parent_dev)) { + nbl_debug(common, NBL_DEBUG_FLOW, "ipv4 encap out dev is %s, " + "parent_dev:%s is vlan, not support two vlan\n", + out_dev->name, parent_dev ? parent_dev->name : "NULL"); + ret = -EOPNOTSUPP; + goto rt_err; + } + + real_out_dev = vlan_dev_real_dev(out_dev); + nbl_debug(common, NBL_DEBUG_FLOW, "ipv4 encap out dev is %s, real_out_dev:%s\n", + out_dev->name, real_out_dev ? real_out_dev->name : "NULL"); + } else { + real_out_dev = out_dev; + } + + if (!netif_is_lag_master(real_out_dev) && + real_out_dev->netdev_ops != netdev_ops->pf_netdev_ops && + real_out_dev->netdev_ops != netdev_ops->rep_netdev_ops) { + nbl_info(common, NBL_DEBUG_FLOW, "encap out dev is %s, not ours, not support\n", + real_out_dev->name); + ret = -EOPNOTSUPP; + goto rt_err; + } + + dev_hold(real_out_dev); + if (!tun_route_info->ttl) + tun_route_info->ttl = (u8)ip4_dst_hoplimit(&rt->dst); + + nbl_debug(common, NBL_DEBUG_FLOW, "route lookup: rt->rt_type:%u, " + "rt->dst.dev:%s, rt->dst.ops:%p, real_dev:%s, ttl:%u", + rt->rt_type, rt->dst.dev ? rt->dst.dev->name : "null", + rt->dst.ops, real_out_dev ? real_out_dev->name : "NULL", + tun_route_info->ttl); + + n = dst_neigh_lookup(&rt->dst, &tun_route_info->fl.fl4.daddr); + if (!n) { + ret = -ENONET; + nbl_info(common, NBL_DEBUG_FLOW, "get neigh failed."); + goto dev_release; + } + ip_rt_put(rt); + + tun_route_info->out_dev = out_dev; + tun_route_info->real_out_dev = real_out_dev; + tun_route_info->n = n; + + return 0; + +dev_release: + dev_put(real_out_dev); +rt_err: + ip_rt_put(rt); + return ret; +} + +static char *nbl_tc_tun_gen_eth_hdr(char *buf, struct net_device *dev, + const unsigned char *hw_dst, u16 proto, + const struct nbl_common_info *common) +{ + struct ethhdr *eth = (struct ethhdr *)buf; + char *ip; + + ether_addr_copy(eth->h_dest, hw_dst); + ether_addr_copy(eth->h_source, dev->dev_addr); + if (is_vlan_dev(dev)) { + struct vlan_hdr *vlan = + (struct vlan_hdr *)((char *)eth + sizeof(struct ethhdr)); + + ip = (char *)vlan + sizeof(struct vlan_hdr); + eth->h_proto = vlan_dev_vlan_proto(dev); + vlan->h_vlan_TCI = htons(vlan_dev_vlan_id(dev)); + vlan->h_vlan_encapsulated_proto = htons(proto); + nbl_debug(common, NBL_DEBUG_FLOW, "output is vlan dev: " + "vlan_TCI:0x%x, vlan_proto:0x%x, eth_proto:0x%x", + vlan->h_vlan_TCI, vlan->h_vlan_encapsulated_proto, + eth->h_proto); + } else { + eth->h_proto = htons(proto); + ip = (char *)eth + sizeof(struct ethhdr); + } + + return ip; +} + +static int nbl_tc_tun_create_header_ipv4(struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param, + struct net_device *encap_mirred_dev, + struct nbl_encap_key *key) +{ + int ret = 0; + const struct nbl_common_info *common = param->common; + const struct ip_tunnel_key *tun_key = &key->ip_tun_key; + struct nbl_serv_netdev_ops *netdev_ops = ¶m->serv_mgt->net_resource_mgt->netdev_ops; + struct iphdr *ip; + struct nbl_tc_tunnel_route_info tun_route_info; + struct udphdr *udp; + struct vxlanhdr *vxh; + unsigned char hw_dst[ETH_ALEN]; + + u8 total_len = 0; + u8 eth_len = 0; + u8 l4_len = 0; + u8 nud_state; + + memset(&tun_route_info, 0, sizeof(tun_route_info)); + memset(hw_dst, 0, sizeof(hw_dst)); + tun_route_info.fl.fl4.flowi4_tos = tun_key->tos; + tun_route_info.fl.fl4.flowi4_proto = IPPROTO_UDP; + tun_route_info.fl.fl4.fl4_dport = tun_key->tp_dst; + tun_route_info.fl.fl4.daddr = tun_key->u.ipv4.dst; + tun_route_info.fl.fl4.saddr = tun_key->u.ipv4.src; + tun_route_info.ttl = tun_key->ttl; + + ret = nbl_route_lookup_ipv4(common, encap_mirred_dev, &tun_route_info, netdev_ops); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "get route failed in create encap head v4, encap_dev:%s, ret %d", + encap_mirred_dev->name, ret); + return ret; + } + + rule_act->tc_tun_encap_out_dev = tun_route_info.real_out_dev; + + /* cpoy mac */ + read_lock_bh(&tun_route_info.n->lock); + nud_state = tun_route_info.n->nud_state; + ether_addr_copy(hw_dst, tun_route_info.n->ha); + read_unlock_bh(&tun_route_info.n->lock); + + /* add ether header */ + ip = (struct iphdr *)nbl_tc_tun_gen_eth_hdr(rule_act->encap_buf, + tun_route_info.out_dev, hw_dst, ETH_P_IP, common); + + total_len += sizeof(struct ethhdr); + if (is_vlan_dev(tun_route_info.out_dev)) { + rule_act->encap_idx_info.info.vlan_offset = total_len - 2; + total_len += sizeof(struct vlan_hdr); + } + + eth_len = total_len; + rule_act->encap_idx_info.info.l4_ck_mod = NBL_FLOW_L4_CK_NO_MODIFY; + rule_act->encap_idx_info.info.phid2_offset = total_len; + + /* add ip header */ + ip->tos = tun_key->tos; + ip->version = NBL_FLOW_IPV4; + ip->ihl = NBL_FLOW_IHL; + ip->frag_off = NBL_FLOW_DF; + ip->ttl = tun_route_info.ttl; + ip->saddr = tun_route_info.fl.fl4.saddr; + ip->daddr = tun_route_info.fl.fl4.daddr; + + rule_act->encap_idx_info.info.len_en0 = 1; + rule_act->encap_idx_info.info.len_offset0 = total_len + NBL_FLOW_IPV4_LEN_OFFSET; + rule_act->encap_idx_info.info.l3_ck_en = 1; + rule_act->encap_idx_info.info.dscp_offset = (total_len + 1) * 8; + total_len += sizeof(struct iphdr); + + /* add tunnel proto header */ + ret = ((struct nbl_tc_tunnel *)key->tc_tunnel)->generate_tunnel_hdr((char *)ip + + sizeof(struct iphdr), &ip->protocol, &key->ip_tun_key); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "nbl tc flow gen tun hdr err, ret:%d", ret); + goto destroy_neigh; + } + + rule_act->encap_idx_info.info.phid3_offset = total_len; + rule_act->encap_idx_info.info.sport_offset = total_len; + rule_act->encap_idx_info.info.len_en1 = 1; + rule_act->encap_idx_info.info.len_offset1 = total_len + NBL_FLOW_UDP_LEN_OFFSET; + rule_act->encap_idx_info.info.l4_ck_mod = NBL_FLOW_L4_CK_MODE_0; + total_len += sizeof(struct udphdr); + + /* tnl info */ + rule_act->encap_idx_info.info.vni_offset = total_len + NBL_FLOW_VNI_OFFSET; + total_len += ((struct nbl_tc_tunnel *)(key->tc_tunnel))->get_tun_hlen(); + + ip->tot_len = total_len - eth_len; + l4_len = (u8)(ip->tot_len - sizeof(struct iphdr)); + ip->tot_len = be16_to_cpu(ip->tot_len); + + udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr)); + vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); + + if (udp) + udp->len = be16_to_cpu(l4_len); + + rule_act->encap_idx_info.info.tnl_len = total_len; + rule_act->encap_size = total_len; + rule_act->vni = be32_to_cpu(vxh->vx_vni); + + if (!(nud_state & NUD_VALID)) { + neigh_event_send(tun_route_info.n, NULL); + goto destroy_neigh; + } + + nbl_tc_tun_route_cleanup(&tun_route_info); + + nbl_debug(common, NBL_DEBUG_FLOW, "create ipv4 header ok: encap_len:%d", total_len); + + return 0; + +destroy_neigh: + nbl_tc_tun_route_cleanup(&tun_route_info); + + return ret; +} + +static int nbl_route_lookup_ipv6(const struct nbl_common_info *common, + struct net_device *encap_mirred_dev, + struct nbl_tc_tunnel_route_info *tun_route_info, + struct nbl_serv_netdev_ops *netdev_ops) +{ + int ret = 0; + struct net_device *out_dev; + struct net_device *real_out_dev; + struct net_device *parent_dev; + struct neighbour *n; + struct dst_entry *dst; + + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(encap_mirred_dev), NULL, + &tun_route_info->fl.fl6, NULL); + if (IS_ERR(dst)) + return (int)PTR_ERR(dst); + + out_dev = dst->dev; + if (is_vlan_dev(out_dev)) { + parent_dev = vlan_dev_priv(out_dev)->real_dev; + real_out_dev = vlan_dev_real_dev(out_dev); + if (is_vlan_dev(parent_dev)) { + nbl_debug(common, NBL_DEBUG_FLOW, "ipv6 encap out dev is %s, " + "parent_dev:%s is vlan, not support two vlan\n", + out_dev->name, parent_dev ? parent_dev->name : "NULL"); + ret = -EOPNOTSUPP; + goto err; + } + nbl_debug(common, NBL_DEBUG_FLOW, "ipv6 encap out dev is %s, real_out_dev:%s\n", + out_dev->name, real_out_dev ? real_out_dev->name : "NULL"); + } else { + real_out_dev = out_dev; + } + + if (!netif_is_lag_master(real_out_dev) && + real_out_dev->netdev_ops != netdev_ops->pf_netdev_ops && + real_out_dev->netdev_ops != netdev_ops->rep_netdev_ops) { + nbl_err(common, NBL_DEBUG_FLOW, "encap out dev is %s, not ours, not support\n", + out_dev->name); + ret = -EOPNOTSUPP; + goto err; + } + + dev_hold(real_out_dev); + + if (!tun_route_info->ttl) + tun_route_info->ttl = (u8)ip6_dst_hoplimit(dst); + + n = dst_neigh_lookup(dst, &tun_route_info->fl.fl6.daddr); + if (!n) { + ret = -ENONET; + nbl_err(common, NBL_DEBUG_FLOW, "get neigh failed."); + goto dev_release; + } + + dst_release(dst); + tun_route_info->out_dev = out_dev; + tun_route_info->real_out_dev = real_out_dev; + tun_route_info->n = n; + + return 0; + +dev_release: + dev_put(real_out_dev); +err: + dst_release(dst); + return ret; +} + +static int nbl_tc_tun_create_header_ipv6(struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param, + struct net_device *encap_mirred_dev, + struct nbl_encap_key *key) +{ + int ret = 0; + const struct nbl_common_info *common = param->common; + const struct ip_tunnel_key *tun_key = &key->ip_tun_key; + struct nbl_serv_netdev_ops *netdev_ops = ¶m->serv_mgt->net_resource_mgt->netdev_ops; + struct ipv6hdr *ip; + struct nbl_tc_tunnel_route_info tun_route_info; + struct udphdr *udp; + struct vxlanhdr *vxh; + unsigned char hw_dst[ETH_ALEN]; + + u8 total_len = 0; + u8 eth_len = 0; + u8 l4_len = 0; + u8 nud_state; + + memset(&tun_route_info, 0, sizeof(tun_route_info)); + memset(hw_dst, 0, sizeof(hw_dst)); + tun_route_info.fl.fl6.flowlabel = + ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); + tun_route_info.fl.fl6.fl6_dport = tun_key->tp_dst; + tun_route_info.fl.fl6.fl6_sport = tun_key->tp_src; + tun_route_info.fl.fl6.daddr = tun_key->u.ipv6.dst; + tun_route_info.fl.fl6.saddr = tun_key->u.ipv6.src; + tun_route_info.ttl = tun_key->ttl; + + ret = nbl_route_lookup_ipv6(common, encap_mirred_dev, &tun_route_info, netdev_ops); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "get route failed in create encap head v6, encap_dev:%s, ret %d", + encap_mirred_dev->name, ret); + return ret; + } + + rule_act->tc_tun_encap_out_dev = tun_route_info.real_out_dev; + + /* copy mac */ + read_lock_bh(&tun_route_info.n->lock); + nud_state = tun_route_info.n->nud_state; + ether_addr_copy(hw_dst, tun_route_info.n->ha); + read_unlock_bh(&tun_route_info.n->lock); + + /* add ether header */ + ip = (struct ipv6hdr *)nbl_tc_tun_gen_eth_hdr(rule_act->encap_buf, + tun_route_info.out_dev, hw_dst, ETH_P_IPV6, common); + + total_len += sizeof(struct ethhdr); + if (is_vlan_dev(tun_route_info.out_dev)) { + rule_act->encap_idx_info.info.vlan_offset = total_len - 2; + total_len += sizeof(struct vlan_hdr); + } + + eth_len = total_len; + rule_act->encap_idx_info.info.l4_ck_mod = NBL_FLOW_L4_CK_NO_MODIFY; + rule_act->encap_idx_info.info.phid2_offset = total_len; + + /* add ip header */ + ip6_flow_hdr(ip, tun_key->tos, 0); + ip->hop_limit = tun_route_info.ttl; + ip->saddr = tun_route_info.fl.fl6.saddr; + ip->daddr = tun_route_info.fl.fl6.daddr; + + rule_act->encap_idx_info.info.len_en0 = 1; + rule_act->encap_idx_info.info.len_offset0 = total_len + NBL_FLOW_IPV6_LEN_OFFSET; + rule_act->encap_idx_info.info.dscp_offset = (total_len * 8) + 4; + total_len += sizeof(struct ipv6hdr); + + /* add tunnel proto header */ + ret = ((struct nbl_tc_tunnel *)key->tc_tunnel)->generate_tunnel_hdr((char *)ip + + sizeof(struct ipv6hdr), &ip->nexthdr, &key->ip_tun_key); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "nbl tc flow gen v6 tun hdr err, ret:%d", ret); + goto destroy_neigh; + } + + rule_act->encap_idx_info.info.phid3_offset = total_len; + rule_act->encap_idx_info.info.sport_offset = total_len; + rule_act->encap_idx_info.info.len_en1 = 1; + rule_act->encap_idx_info.info.len_offset1 = total_len + NBL_FLOW_UDP_LEN_OFFSET; + rule_act->encap_idx_info.info.l4_ck_mod = NBL_FLOW_L4_CK_MODE_1; + total_len += sizeof(struct udphdr); + + /* tnl info */ + rule_act->encap_idx_info.info.vni_offset = total_len + NBL_FLOW_VNI_OFFSET; + total_len += ((struct nbl_tc_tunnel *)(key->tc_tunnel))->get_tun_hlen(); + + ip->payload_len = total_len - eth_len; + l4_len = (u8)(ip->payload_len - sizeof(struct ipv6hdr)); + ip->payload_len = be16_to_cpu(l4_len); + + udp = (struct udphdr *)((char *)ip + sizeof(struct ipv6hdr)); + vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); + + if (udp) + udp->len = be16_to_cpu(l4_len); + + rule_act->encap_idx_info.info.tnl_len = total_len; + rule_act->encap_size = total_len; + rule_act->vni = be32_to_cpu(vxh->vx_vni); + + if (!(nud_state & NUD_VALID)) { + neigh_event_send(tun_route_info.n, NULL); + goto destroy_neigh; + } + + nbl_tc_tun_route_cleanup(&tun_route_info); + + nbl_debug(common, NBL_DEBUG_FLOW, "create ipv6 header ok: encap_len:%d", total_len); + + return 0; + +destroy_neigh: + nbl_tc_tun_route_cleanup(&tun_route_info); + + return ret; +} + +int nbl_tc_tun_parse_encap_info(struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param, + struct net_device *encap_mirred_dev) +{ + int ret = 0; + const struct nbl_common_info *common = param->common; + struct nbl_dispatch_mgt *disp_mgt; + struct nbl_dispatch_ops *disp_ops; + unsigned short ip_family; + bool is_encap_find = false; + + ret = nbl_copy_tun_info(param->tunnel, rule_act); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "alloc tunnel_info failed, ret %d\n", ret); + return ret; + } + + ip_family = ip_tunnel_info_af(rule_act->tunnel); + memcpy(&rule_act->encap_key.ip_tun_key, &rule_act->tunnel->key, + sizeof(rule_act->encap_key.ip_tun_key)); + rule_act->encap_key.tc_tunnel = nbl_tc_get_tunnel(encap_mirred_dev); + if (!rule_act->encap_key.tc_tunnel) { + nbl_err(common, NBL_DEBUG_FLOW, "unsupport tunnel type: %s", + encap_mirred_dev->rtnl_link_ops->kind); + ret = -EOPNOTSUPP; + goto malloc_err; + } + + disp_mgt = NBL_SERV_MGT_TO_DISP_PRIV(param->serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(param->serv_mgt); + is_encap_find = disp_ops->tc_tun_encap_lookup(disp_mgt, rule_act, param); + if (is_encap_find) + goto parse_encap_finish; + + if (ip_family == AF_INET) + ret = nbl_tc_tun_create_header_ipv4(rule_act, param, + encap_mirred_dev, + &rule_act->encap_key); + else + ret = nbl_tc_tun_create_header_ipv6(rule_act, param, + encap_mirred_dev, + &rule_act->encap_key); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "create tnl header failed, ret %d!", ret); + goto malloc_err; + } + + ret = disp_ops->tc_tun_encap_add(disp_mgt, rule_act); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "add tnl encap hash failed, ret %d!", ret); + goto malloc_err; + } + +parse_encap_finish: + kfree(rule_act->tunnel); + rule_act->encap_parse_ok = true; + return ret; + +malloc_err: + kfree(rule_act->tunnel); + + return ret; +} + +struct nbl_tc_tunnel vxlan_tunnel = { + .tunnel_type = NBL_TC_TUNNEL_TYPE_VXLAN, + .generate_tunnel_hdr = nbl_tc_tun_gen_tunnel_header_vxlan, + .get_tun_hlen = nbl_tc_tun_get_vxlan_hdr_len, +}; + diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.h new file mode 100644 index 0000000000000000000000000000000000000000..53344c620bd1fba1d7aac9246edf85f691ae7c3f --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef __NBL_TC_TUN_H__ +#define __NBL_TC_TUN_H__ + +#include +#include "nbl_include.h" +#include "nbl_core.h" +#include "nbl_resource.h" + +#define NBL_FLOW_IPV4 4 +#define NBL_FLOW_IPV6 6 +#define NBL_FLOW_IHL 5 +#define NBL_FLOW_DF 0x40 + +#define NBL_FLOW_L4_CK_NO_MODIFY 7 +#define NBL_FLOW_IPV4_LEN_OFFSET 2 +#define NBL_FLOW_IPV6_LEN_OFFSET 4 +#define NBL_FLOW_UDP_LEN_OFFSET 4 +#define NBL_FLOW_VNI_OFFSET 4 + +#define NBL_FLOW_L4_CK_MODE_0 0 +#define NBL_FLOW_L4_CK_MODE_1 1 + +enum { + NBL_TC_TUNNEL_TYPE_UNKNOWN, + NBL_TC_TUNNEL_TYPE_VXLAN, + NBL_TC_TUNNEL_TYPE_GENEVE, + NBL_TC_TUNNEL_TYPE_GRE, +}; + +struct nbl_decap_key { + struct ethhdr key; +}; + +struct nbl_tc_tunnel_route_info { + struct net_device *out_dev; + struct net_device *real_out_dev; + union { + struct flowi4 fl4; + struct flowi6 fl6; + } fl; + struct neighbour *n; + u8 ttl; +}; + +struct nbl_tc_tunnel { + u8 tunnel_type; + int (*generate_tunnel_hdr)(char buf[], u8 *ip_proto, + const struct ip_tunnel_key *tun_key); + int (*get_tun_hlen)(void); +}; + +extern struct nbl_tc_tunnel vxlan_tunnel; + +int nbl_tc_tun_parse_encap_info(struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param, + struct net_device *encap_mirred_dev); + +#endif /* end of __NBL_TC_TUN_H__ */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_export/nbl_export_rdma.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_export/nbl_export_rdma.h new file mode 100644 index 0000000000000000000000000000000000000000..1192ff14e89d7a77291858b2a11dbd9f3e6384cd --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_export/nbl_export_rdma.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_EXPORT_RDMA_H_ +#define _NBL_EXPORT_RDMA_H_ + +enum nbl_core_reset_event { + NBL_CORE_FATAL_ERR_EVENT, /* Most hw module is not work nomal exclude pcie/emp */ + NBL_CORE_RESET_MAX_EVENT +}; + +#include + +#define RDMA_MSG_MAX_SIZE 256 +#define NBL_COREDEV_TO_DMA_DEV(core) ((core)->dma_dev) + +struct nbl_chan_rdma_resp { + u8 resp_data[RDMA_MSG_MAX_SIZE]; + u16 data_len; +}; + +struct nbl_core_dev_lag_mem { + u16 vsi_id; + u8 eth_id; + bool active; +}; + +#define NBL_RDMA_LAG_MAX_PORTS 2 +struct nbl_core_dev_lag_info { + struct net_device *bond_netdev; + struct nbl_core_dev_lag_mem lag_mem[NBL_RDMA_LAG_MAX_PORTS]; + u16 lag_id; + u8 lag_num; +}; + +struct nbl_core_dev_info { + /* Devices */ + struct pci_dev *pdev; + struct net_device *netdev; + struct device *dma_dev; + /* Bar addr */ + u8 __iomem *hw_addr; + u64 real_hw_addr; + /* Interrupts */ + struct msix_entry *msix_entries; + u16 *global_vector_id; + u16 msix_count; + /* VSI */ + u16 vsi_id; + u8 real_bus; + u8 real_dev; + u8 real_function; + /* Send function */ + int (*send)(struct pci_dev *pdev, u8 *req_args, u8 req_len, + void *resp, u16 resp_len); + u8 eth_mode; + u16 function_id; + u8 eth_id; + /* Lag info */ + struct nbl_core_dev_lag_info lag_info; + int (*lag_mem_notify)(struct auxiliary_device *adev, + struct nbl_core_dev_lag_info *lag_info); + int (*offload_status_notify)(struct auxiliary_device *adev, bool status); + int (*register_bond)(struct pci_dev *pdev, bool enable); + bool is_lag; + /* Info */ + u32 mem_type; + u16 rdma_cap_num; + int (*change_mtu_notify)(struct auxiliary_device *adev, int new_mtu); + bool mirror_enable; +}; + +struct nbl_aux_dev { + struct auxiliary_device adev; + struct nbl_core_dev_info *cdev_info; + void (*recv)(struct auxiliary_device *device, void *req_args, u16 req_len, + struct nbl_chan_rdma_resp *resp); + void (*abnormal_event_process)(struct auxiliary_device *grc_adev); + void (*process_flr_event)(struct auxiliary_device *grc_adev, u16 vsi_id); + int (*reset_event_notify)(struct auxiliary_device *adev, enum nbl_core_reset_event event); + ssize_t (*qos_cfg_store)(struct auxiliary_device *adev, int offset, + const char *buf, size_t count); + ssize_t (*qos_cfg_show)(struct auxiliary_device *adev, int offset, char *buf); + int (*mirror_enable_notify)(struct auxiliary_device *adev, bool enable); +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.c new file mode 100644 index 0000000000000000000000000000000000000000..85e402f0f661afa9e242272af838c3cd908dd464 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.c @@ -0,0 +1,950 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ +#include "nbl_accel.h" + +static int nbl_res_alloc_ktls_tx_index(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + u32 index; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + index = find_first_zero_bit(accel_mgt->tx_ktls_bitmap, NBL_MAX_KTLS_SESSION); + if (index >= NBL_MAX_KTLS_SESSION) + return -ENOSPC; + + set_bit(index, accel_mgt->tx_ktls_bitmap); + accel_mgt->dtls_cfg_info[index].vld = true; + accel_mgt->dtls_cfg_info[index].vsi = vsi; + return index; +} + +static void nbl_res_free_ktls_tx_index(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + clear_bit(index, accel_mgt->tx_ktls_bitmap); + memset(&accel_mgt->dtls_cfg_info[index], 0, sizeof(struct nbl_tls_cfg_info)); +} + +static void nbl_res_cfg_ktls_tx_keymat(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->cfg_ktls_tx_keymat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, mode, salt, key, key_len); +} + +static int nbl_res_alloc_ktls_rx_index(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + u32 index; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + index = find_first_zero_bit(accel_mgt->rx_ktls_bitmap, NBL_MAX_KTLS_SESSION); + if (index >= NBL_MAX_KTLS_SESSION) + return -ENOSPC; + + set_bit(index, accel_mgt->rx_ktls_bitmap); + accel_mgt->utls_cfg_info[index].vld = true; + accel_mgt->utls_cfg_info[index].vsi = vsi; + return index; +} + +static void nbl_res_free_ktls_rx_index(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + clear_bit(index, accel_mgt->rx_ktls_bitmap); + memset(&accel_mgt->utls_cfg_info[index], 0, sizeof(struct nbl_tls_cfg_info)); +} + +static void nbl_res_cfg_ktls_rx_keymat(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + phy_ops->cfg_ktls_rx_keymat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, mode, salt, key, key_len); +} + +static void nbl_res_cfg_ktls_rx_record(void *priv, u32 index, u32 tcp_sn, u64 rec_num, bool init) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + phy_ops->cfg_ktls_rx_record(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, tcp_sn, rec_num, init); +} + +static int nbl_res_alloc_ipsec_tx_index(void *priv, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + u32 index; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + index = find_first_zero_bit(accel_mgt->tx_ipsec_bitmap, NBL_MAX_IPSEC_SESSION); + if (index >= NBL_MAX_IPSEC_SESSION) + return -ENOSPC; + + set_bit(index, accel_mgt->tx_ipsec_bitmap); + memcpy(&accel_mgt->tx_cfg_info[index], cfg_info, sizeof(struct nbl_ipsec_cfg_info)); + return index; +} + +static void nbl_res_free_ipsec_tx_index(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + clear_bit(index, accel_mgt->tx_ipsec_bitmap); + memset(&accel_mgt->tx_cfg_info[index], 0, sizeof(struct nbl_ipsec_cfg_info)); +} + +static int nbl_res_alloc_ipsec_rx_index(void *priv, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + u32 index; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + index = find_first_zero_bit(accel_mgt->rx_ipsec_bitmap, NBL_MAX_IPSEC_SESSION); + if (index >= NBL_MAX_IPSEC_SESSION) + return -ENOSPC; + + set_bit(index, accel_mgt->rx_ipsec_bitmap); + memcpy(&accel_mgt->rx_cfg_info[index], cfg_info, sizeof(struct nbl_ipsec_cfg_info)); + return index; +} + +static void nbl_res_free_ipsec_rx_index(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + clear_bit(index, accel_mgt->rx_ipsec_bitmap); + memset(&accel_mgt->rx_cfg_info[index], 0, sizeof(struct nbl_ipsec_cfg_info)); +} + +static void nbl_res_cfg_ipsec_tx_sad(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + struct nbl_ipsec_esn_state *esn_state = &sa_entry->esn_state; + struct nbl_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; + struct nbl_ipsec_cfg_info *cfg_info = &sa_entry->cfg_info; + struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm; + u32 ip_data[NBL_DIPSEC_SAD_IP_TOTAL] = {0}; + int i; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + if (attrs->nat_flag) + phy_ops->cfg_dipsec_nat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), attrs->sport); + + phy_ops->cfg_dipsec_sad_iv(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), index, aes_gcm->seq_iv); + + phy_ops->cfg_dipsec_sad_esn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, esn_state->sn, esn_state->esn, + esn_state->wrap_en, esn_state->enable); + + phy_ops->cfg_dipsec_sad_lifetime(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, cfg_info->lft_cnt, cfg_info->lft_diff, + cfg_info->limit_enable, cfg_info->limit_type); + + phy_ops->cfg_dipsec_sad_crypto(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, aes_gcm->aes_key, aes_gcm->salt, + aes_gcm->crypto_type, attrs->tunnel_mode, aes_gcm->icv_len); + + if (attrs->is_ipv6) { + for (i = 0; i < NBL_DIPSEC_SAD_IP_LEN; i++) + ip_data[i] = ntohl(attrs->daddr.a6[NBL_DIPSEC_SAD_IP_LEN - i - 1]); + + for (i = 0; i < NBL_DIPSEC_SAD_IP_LEN; i++) + ip_data[i + NBL_DIPSEC_SAD_IP_LEN] = + ntohl(attrs->saddr.a6[NBL_DIPSEC_SAD_IP_LEN - i - 1]); + } else { + ip_data[0] = ntohl(attrs->daddr.a4); + ip_data[NBL_DIPSEC_SAD_IP_LEN] = ntohl(attrs->saddr.a4); + } + + phy_ops->cfg_dipsec_sad_encap(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, attrs->nat_flag, attrs->dport, attrs->spi, ip_data); +} + +static void nbl_res_cfg_ipsec_rx_sad(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + struct nbl_ipsec_esn_state *esn_state = &sa_entry->esn_state; + struct nbl_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; + struct nbl_ipsec_cfg_info *cfg_info = &sa_entry->cfg_info; + struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + if (attrs->nat_flag) + phy_ops->cfg_uipsec_nat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + attrs->nat_flag, attrs->dport); + + phy_ops->cfg_uipsec_sad_esn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, esn_state->sn, esn_state->esn, + esn_state->overlap, esn_state->enable); + + phy_ops->cfg_uipsec_sad_lifetime(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, cfg_info->lft_cnt, cfg_info->lft_diff, + cfg_info->limit_enable, cfg_info->limit_type); + + phy_ops->cfg_uipsec_sad_crypto(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, aes_gcm->aes_key, aes_gcm->salt, + aes_gcm->crypto_type, attrs->tunnel_mode, aes_gcm->icv_len); + + if (esn_state->window_en) + phy_ops->cfg_uipsec_sad_window(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, esn_state->window_en, esn_state->option); +} + +static void nbl_uipsec_get_em_hash(struct nbl_flow_fem_entry *flow, u8 *key_data) +{ + u16 ht0_hash = 0; + u16 ht1_hash = 0; + u8 key[NBL_UIPSEC_BYTE_LEN]; + int i; + + for (i = 0; i < NBL_UIPSEC_BYTE_LEN; i++) + key[NBL_UIPSEC_BYTE_LEN - 1 - i] = key_data[i]; + + ht0_hash = NBL_CRC16_CCITT(key, NBL_UIPSEC_BYTE_LEN); + ht1_hash = NBL_CRC16_IBM(key, NBL_UIPSEC_BYTE_LEN); + + flow->ht0_hash = nbl_hash_transfer(ht0_hash, NBL_UIPSEC_POWER, 0); + flow->ht1_hash = nbl_hash_transfer(ht1_hash, NBL_UIPSEC_POWER, 0); +} + +static bool nbl_uipsec_ht0_ht1_search(struct nbl_ipsec_ht_mng *ipsec_ht0_mng, uint16_t ht0_hash, + struct nbl_ipsec_ht_mng *ipsec_ht1_mng, uint16_t ht1_hash, + struct nbl_common_info *common) +{ + struct nbl_flow_ht_tbl *node0 = NULL; + struct nbl_flow_ht_tbl *node1 = NULL; + u16 i = 0; + + node0 = ipsec_ht0_mng->hash_map[ht0_hash]; + if (node0) + for (i = 0; i < NBL_HASH_CFT_MAX; i++) + if (node0->key[i].vid == 1 && node0->key[i].ht_other_index == ht1_hash) { + nbl_info(common, NBL_DEBUG_ACCEL, + "Conflicted ht on vid %d and kt_index %u\n", + node0->key[i].vid, node0->key[i].kt_index); + return true; + } + + node1 = ipsec_ht1_mng->hash_map[ht1_hash]; + if (node1) + for (i = 0; i < NBL_HASH_CFT_MAX; i++) + if (node1->key[i].vid == 1 && node1->key[i].ht_other_index == ht0_hash) { + nbl_info(common, NBL_DEBUG_ACCEL, + "Conflicted ht on vid %d and kt_index %u\n", + node1->key[i].vid, node1->key[i].kt_index); + return true; + } + + return false; +} + +static int nbl_uipsec_find_ht_avail_table(struct nbl_ipsec_ht_mng *ipsec_ht0_mng, + struct nbl_ipsec_ht_mng *ipsec_ht1_mng, + u16 ht0_hash, u16 ht1_hash) +{ + struct nbl_flow_ht_tbl *pp_ht0_node = NULL; + struct nbl_flow_ht_tbl *pp_ht1_node = NULL; + + pp_ht0_node = ipsec_ht0_mng->hash_map[ht0_hash]; + pp_ht1_node = ipsec_ht1_mng->hash_map[ht1_hash]; + + if (!pp_ht0_node && !pp_ht1_node) { + return 0; + } else if (pp_ht0_node && !pp_ht1_node) { + if (pp_ht0_node->ref_cnt >= NBL_HASH_CFT_AVL) + return 1; + else + return 0; + } else if (!pp_ht0_node && pp_ht1_node) { + if (pp_ht1_node->ref_cnt >= NBL_HASH_CFT_AVL) + return 0; + else + return 1; + } else { + if ((pp_ht0_node->ref_cnt <= NBL_HASH_CFT_AVL || + (pp_ht0_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht0_node->ref_cnt < NBL_HASH_CFT_MAX && + pp_ht1_node->ref_cnt > NBL_HASH_CFT_AVL))) + return 0; + else if (((pp_ht0_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht1_node->ref_cnt <= NBL_HASH_CFT_AVL) || + (pp_ht0_node->ref_cnt == NBL_HASH_CFT_MAX && + pp_ht1_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht1_node->ref_cnt < NBL_HASH_CFT_MAX))) + return 1; + else + return -1; + } +} + +static void nbl_uipsec_cfg_em_tcam(struct nbl_resource_mgt *res_mgt, u32 index, + u32 *data, struct nbl_flow_fem_entry *flow) +{ + struct nbl_accel_mgt *accel_mgt; + struct nbl_common_info *common; + struct nbl_phy_ops *phy_ops; + u16 tcam_index; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + tcam_index = find_first_zero_bit(accel_mgt->ipsec_tcam_id, NBL_MAX_IPSEC_TCAM); + if (tcam_index >= NBL_MAX_IPSEC_TCAM) { + nbl_err(common, NBL_DEBUG_ACCEL, + "There is no available ipsec tcam id left for sa index %u\n", index); + return; + } + + nbl_info(common, NBL_DEBUG_ACCEL, + "put sad index %u to ipsec tcam index %u.\n", index, tcam_index); + phy_ops->cfg_uipsec_em_tcam(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), tcam_index, data); + phy_ops->cfg_uipsec_em_ad(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), tcam_index, index); + + flow->tcam_index = tcam_index; + flow->tcam_flag = true; + set_bit(tcam_index, accel_mgt->ipsec_tcam_id); +} + +static int nbl_uipsec_insert_em_ht(struct nbl_ipsec_ht_mng *ipsec_ht_mng, + struct nbl_flow_fem_entry *flow) +{ + struct nbl_flow_ht_tbl *node; + u16 ht_index; + u16 ht_other_index; + int i; + + ht_index = (flow->hash_table == NBL_HT0 ? flow->ht0_hash : flow->ht1_hash); + ht_other_index = (flow->hash_table == NBL_HT0 ? flow->ht1_hash : flow->ht0_hash); + + node = ipsec_ht_mng->hash_map[ht_index]; + if (!node) { + node = kzalloc(sizeof(*node), GFP_ATOMIC); + if (!node) + return -ENOMEM; + ipsec_ht_mng->hash_map[ht_index] = node; + } + + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (node->key[i].vid == 0) { + node->key[i].vid = 1; + node->key[i].ht_other_index = ht_other_index; + node->key[i].kt_index = flow->flow_id; + node->ref_cnt++; + flow->hash_bucket = i; + break; + } + } + + return 0; +} + +static void nbl_uipsec_cfg_em_flow(struct nbl_resource_mgt *res_mgt, u32 index, + u32 *data, struct nbl_flow_fem_entry *flow) +{ + struct nbl_phy_ops *phy_ops; + u16 ht_table; + u16 ht_index; + u16 ht_other_index; + u16 ht_bucket; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + ht_table = flow->hash_table; + ht_index = (flow->hash_table == NBL_HT0 ? flow->ht0_hash : flow->ht1_hash); + ht_other_index = (flow->hash_table == NBL_HT0 ? flow->ht1_hash : flow->ht0_hash); + ht_bucket = flow->hash_bucket; + + phy_ops->cfg_uipsec_em_ht(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), index, ht_table, + ht_index, ht_other_index, ht_bucket); + phy_ops->cfg_uipsec_em_kt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), index, data); +} + +static int nbl_accel_add_uipsec_rule(struct nbl_resource_mgt *res_mgt, u32 index, u32 *data, + struct nbl_flow_fem_entry *flow) +{ + struct nbl_accel_mgt *accel_mgt; + struct nbl_common_info *common; + struct nbl_phy_ops *phy_ops; + struct nbl_ipsec_ht_mng *ipsec_ht_mng = NULL; + u8 key_data[NBL_UIPSEC_BYTE_LEN]; + int ht_table; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + flow->flow_id = index; + memcpy(key_data, data, NBL_UIPSEC_BYTE_LEN); + nbl_uipsec_get_em_hash(flow, key_data); + + /* two flows have the same ht0&ht1, put the conflicted one to tcam */ + if (nbl_uipsec_ht0_ht1_search(&accel_mgt->ipsec_ht0_mng, flow->ht0_hash, + &accel_mgt->ipsec_ht1_mng, flow->ht1_hash, common)) + flow->tcam_flag = true; + + ht_table = nbl_uipsec_find_ht_avail_table(&accel_mgt->ipsec_ht0_mng, + &accel_mgt->ipsec_ht1_mng, + flow->ht0_hash, flow->ht1_hash); + if (ht_table < 0) + flow->tcam_flag = true; + + if (flow->tcam_flag) { + nbl_uipsec_cfg_em_tcam(res_mgt, index, data, flow); + return 0; + } + + ipsec_ht_mng = + (ht_table == NBL_HT0 ? &accel_mgt->ipsec_ht0_mng : &accel_mgt->ipsec_ht1_mng); + flow->hash_table = ht_table; + if (nbl_uipsec_insert_em_ht(ipsec_ht_mng, flow)) + return -ENOMEM; + + nbl_info(common, NBL_DEBUG_ACCEL, "cfg uipsec flow_item: %u, %u, %u, %u, %u\n", + flow->flow_id, flow->hash_table, flow->ht0_hash, + flow->ht1_hash, flow->hash_bucket); + nbl_uipsec_cfg_em_flow(res_mgt, index, data, flow); + + return 0; +} + +static int nbl_res_add_ipsec_rx_flow(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + struct nbl_accel_uipsec_rule *rule; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + + list_for_each_entry(rule, &accel_mgt->uprbac_head, node) + if (rule->index == index) + return -EEXIST; + + rule = kzalloc(sizeof(*rule), GFP_ATOMIC); + if (!rule) + return -ENOMEM; + + if (nbl_accel_add_uipsec_rule(res_mgt, index, data, &rule->uipsec_entry)) { + kfree(rule); + return -EFAULT; + } + + rule->index = index; + rule->vsi = vsi; + list_add_tail(&rule->node, &accel_mgt->uprbac_head); + + return 0; +} + +static void nbl_uipsec_clear_em_tcam(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_fem_entry *flow) +{ + struct nbl_accel_mgt *accel_mgt; + struct nbl_common_info *common; + struct nbl_phy_ops *phy_ops; + u16 tcam_index; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + tcam_index = flow->tcam_index; + + nbl_info(common, NBL_DEBUG_ACCEL, + "del sad index %u from ipsec tcam index %u.\n", flow->flow_id, tcam_index); + phy_ops->clear_uipsec_tcam_ad(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), tcam_index); + clear_bit(tcam_index, accel_mgt->ipsec_tcam_id); +} + +static void nbl_uipsec_remove_em_ht(struct nbl_ipsec_ht_mng *ipsec_ht_mng, + struct nbl_flow_fem_entry *flow) +{ + struct nbl_flow_ht_tbl *node; + u16 ht_index; + + ht_index = (flow->hash_table == NBL_HT0 ? flow->ht0_hash : flow->ht1_hash); + node = ipsec_ht_mng->hash_map[ht_index]; + if (!node) + return; + + memset(&node->key[flow->hash_bucket], 0, sizeof(node->key[flow->hash_bucket])); + node->ref_cnt--; + if (!node->ref_cnt) { + kfree(node); + ipsec_ht_mng->hash_map[ht_index] = NULL; + } +} + +static void nbl_uipsec_clear_em_flow(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_fem_entry *flow) +{ + struct nbl_phy_ops *phy_ops; + u16 ht_table; + u16 ht_index; + u16 ht_bucket; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + ht_table = flow->hash_table; + ht_index = (flow->hash_table == NBL_HT0 ? flow->ht0_hash : flow->ht1_hash); + ht_bucket = flow->hash_bucket; + + phy_ops->clear_uipsec_ht_kt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), flow->flow_id, + ht_table, ht_index, ht_bucket); +} + +static void nbl_accel_del_uipsec_rule(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_fem_entry *flow) +{ + struct nbl_accel_mgt *accel_mgt; + struct nbl_common_info *common; + struct nbl_phy_ops *phy_ops; + struct nbl_ipsec_ht_mng *ipsec_ht_mng = NULL; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (flow->tcam_flag) { + nbl_uipsec_clear_em_tcam(res_mgt, flow); + return; + } + + ipsec_ht_mng = (flow->hash_table == NBL_HT0 ? + &accel_mgt->ipsec_ht0_mng : &accel_mgt->ipsec_ht1_mng); + nbl_uipsec_remove_em_ht(ipsec_ht_mng, flow); + nbl_info(common, NBL_DEBUG_ACCEL, "del uipsec flow_item: %u, %u, %u, %u, %u\n", + flow->flow_id, flow->hash_table, flow->ht0_hash, + flow->ht1_hash, flow->hash_bucket); + + nbl_uipsec_clear_em_flow(res_mgt, flow); +} + +static void nbl_res_del_ipsec_rx_flow(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + struct nbl_accel_uipsec_rule *rule; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + + list_for_each_entry(rule, &accel_mgt->uprbac_head, node) + if (rule->index == index) + break; + + if (nbl_list_entry_is_head(rule, &accel_mgt->uprbac_head, node)) + return; + + nbl_accel_del_uipsec_rule(res_mgt, &rule->uipsec_entry); + list_del(&rule->node); + kfree(rule); +} + +static void nbl_res_flr_clear_accel(void *priv, u16 vf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + struct nbl_accel_uipsec_rule *uipsec_rule, *uipsec_rule_safe; + u16 func_id = vf_id + NBL_MAX_PF; + u16 vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_VF_DATA_TYPE); + int i; + + if (nbl_res_vf_is_active(priv, func_id)) { + for (i = 0; i < NBL_MAX_IPSEC_SESSION; i++) { + if (accel_mgt->tx_cfg_info[i].vld && + accel_mgt->tx_cfg_info[i].vsi == vsi_id) { + clear_bit(i, accel_mgt->tx_ipsec_bitmap); + memset(&accel_mgt->tx_cfg_info[i], 0, + sizeof(struct nbl_ipsec_cfg_info)); + } + } + + list_for_each_entry_safe(uipsec_rule, uipsec_rule_safe, + &accel_mgt->uprbac_head, node) + if (uipsec_rule->vsi == vsi_id) { + nbl_accel_del_uipsec_rule(res_mgt, &uipsec_rule->uipsec_entry); + list_del(&uipsec_rule->node); + kfree(uipsec_rule); + } + + for (i = 0; i < NBL_MAX_IPSEC_SESSION; i++) { + if (accel_mgt->rx_cfg_info[i].vld && + accel_mgt->rx_cfg_info[i].vsi == vsi_id) { + clear_bit(i, accel_mgt->rx_ipsec_bitmap); + memset(&accel_mgt->rx_cfg_info[i], 0, + sizeof(struct nbl_ipsec_cfg_info)); + } + } + + for (i = 0; i < NBL_MAX_KTLS_SESSION; i++) { + if (accel_mgt->dtls_cfg_info[i].vld && + accel_mgt->dtls_cfg_info[i].vsi == vsi_id) { + clear_bit(i, accel_mgt->tx_ktls_bitmap); + memset(&accel_mgt->dtls_cfg_info[i], 0, + sizeof(struct nbl_tls_cfg_info)); + } + } + + for (i = 0; i < NBL_MAX_KTLS_SESSION; i++) { + if (accel_mgt->utls_cfg_info[i].vld && + accel_mgt->utls_cfg_info[i].vsi == vsi_id) { + clear_bit(i, accel_mgt->rx_ktls_bitmap); + memset(&accel_mgt->utls_cfg_info[i], 0, + sizeof(struct nbl_tls_cfg_info)); + } + } + } +} + +static bool nbl_res_check_ipsec_status(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + u32 dipsec_status; + u32 uipsec_status; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + dipsec_status = phy_ops->read_dipsec_status(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + uipsec_status = phy_ops->read_uipsec_status(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + if ((dipsec_status & NBL_IPSEC_SOFT_EXPIRE) || + (dipsec_status & NBL_IPSEC_HARD_EXPIRE) || + ((uipsec_status) & NBL_IPSEC_SOFT_EXPIRE) || + ((uipsec_status) & NBL_IPSEC_HARD_EXPIRE)) + return true; + + return false; +} + +static u32 nbl_res_get_dipsec_lft_info(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + union nbl_ipsec_lft_info lft_info; + u32 dipsec_status; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + lft_info.data = phy_ops->read_dipsec_lft_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + dipsec_status = phy_ops->reset_dipsec_status(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + lft_info.soft_vld = !!(dipsec_status & NBL_IPSEC_SOFT_EXPIRE); + lft_info.hard_vld = !!(dipsec_status & NBL_IPSEC_HARD_EXPIRE); + + return lft_info.data; +} + +static void nbl_res_handle_dipsec_soft_expire(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common; + struct nbl_accel_mgt *accel_mgt; + struct nbl_phy_ops *phy_ops; + struct nbl_ipsec_cfg_info *cfg_info; + u32 lifetime_diff; + u32 flag_wen; + u32 msb_wen; + bool need = false; + + common = NBL_RES_MGT_TO_COMMON(res_mgt); + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + cfg_info = &accel_mgt->tx_cfg_info[index]; + + if (!cfg_info->vld) + return; + + if (cfg_info->soft_round == 0) { + nbl_info(common, NBL_DEBUG_ACCEL, "dipsec sa %u soft expire.\n", index); + if (cfg_info->hard_round == 0) { + lifetime_diff = 0; + flag_wen = 1; + msb_wen = 0; + need = true; + } + } + + if (cfg_info->hard_round == 1) { + if (cfg_info->hard_remain > cfg_info->soft_remain) + lifetime_diff = cfg_info->hard_remain - + cfg_info->soft_remain; + else + lifetime_diff = (1 << NBL_IPSEC_LIFETIME_ROUND) + + cfg_info->hard_remain - + cfg_info->soft_remain; + flag_wen = 1; + msb_wen = 0; + need = true; + if (cfg_info->soft_round > 0) + nbl_info(common, NBL_DEBUG_ACCEL, + "dipsec sa %u soft expire in advance.\n", index); + } + + if (cfg_info->hard_round > 1) { + lifetime_diff = 0; + flag_wen = 0; + msb_wen = 1; + need = true; + if (cfg_info->soft_round) + cfg_info->soft_round--; + cfg_info->hard_round--; + } + + if (need) + phy_ops->cfg_dipsec_lft_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), index, + lifetime_diff, flag_wen, msb_wen); +} + +static u32 nbl_res_get_uipsec_lft_info(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + union nbl_ipsec_lft_info lft_info; + u32 uipsec_status; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + lft_info.data = phy_ops->read_uipsec_lft_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + uipsec_status = phy_ops->reset_uipsec_status(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + lft_info.soft_vld = !!(uipsec_status & NBL_IPSEC_SOFT_EXPIRE); + lft_info.hard_vld = !!(uipsec_status & NBL_IPSEC_HARD_EXPIRE); + + return lft_info.data; +} + +static void nbl_res_handle_uipsec_soft_expire(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common; + struct nbl_accel_mgt *accel_mgt; + struct nbl_phy_ops *phy_ops; + struct nbl_ipsec_cfg_info *cfg_info; + u32 lifetime_diff; + u32 flag_wen; + u32 msb_wen; + bool need = false; + + common = NBL_RES_MGT_TO_COMMON(res_mgt); + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + cfg_info = &accel_mgt->rx_cfg_info[index]; + + if (!cfg_info->vld) + return; + + if (cfg_info->soft_round == 0) { + nbl_info(common, NBL_DEBUG_ACCEL, "uipsec sa %u soft expire.\n", index); + if (cfg_info->hard_round == 0) { + lifetime_diff = 0; + flag_wen = 1; + msb_wen = 0; + need = true; + } + } + + if (cfg_info->hard_round == 1) { + if (cfg_info->hard_remain > cfg_info->soft_remain) + lifetime_diff = cfg_info->hard_remain - + cfg_info->soft_remain; + else + lifetime_diff = (1 << NBL_IPSEC_LIFETIME_ROUND) + + cfg_info->hard_remain - + cfg_info->soft_remain; + flag_wen = 1; + msb_wen = 0; + need = true; + if (cfg_info->soft_round > 0) + nbl_info(common, NBL_DEBUG_ACCEL, + "uipsec sa %u soft expire in advance.\n", index); + } + + if (cfg_info->hard_round > 1) { + lifetime_diff = 0; + flag_wen = 0; + msb_wen = 1; + need = true; + if (cfg_info->soft_round) + cfg_info->soft_round--; + cfg_info->hard_round--; + } + + if (need) + phy_ops->cfg_uipsec_lft_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), index, + lifetime_diff, flag_wen, msb_wen); +} + +static void nbl_res_handle_dipsec_hard_expire(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common; + struct nbl_channel_ops *chan_ops; + struct nbl_accel_mgt *accel_mgt; + struct nbl_sa_search_key param; + struct nbl_chan_send_info chan_send; + u16 vsid; + u16 dstid; + + chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (!accel_mgt->tx_cfg_info[index].vld) + return; + + vsid = accel_mgt->tx_cfg_info[index].vsi; + dstid = nbl_res_vsi_id_to_func_id(res_mgt, vsid); + param.family = accel_mgt->tx_cfg_info[index].sa_key.family; + param.mark = accel_mgt->tx_cfg_info[index].sa_key.mark; + param.spi = accel_mgt->tx_cfg_info[index].sa_key.spi; + memcpy(¶m.daddr, &accel_mgt->tx_cfg_info[index].sa_key.daddr, sizeof(param.daddr)); + + nbl_info(common, NBL_DEBUG_ACCEL, "dipsec sa %u hard expire.\n", index); + NBL_CHAN_SEND(chan_send, dstid, NBL_CHAN_MSG_NOTIFY_IPSEC_HARD_EXPIRE, ¶m, + sizeof(param), NULL, 0, 0); + chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); +} + +static void nbl_res_handle_uipsec_hard_expire(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common; + struct nbl_channel_ops *chan_ops; + struct nbl_accel_mgt *accel_mgt; + struct nbl_sa_search_key param; + struct nbl_chan_send_info chan_send; + u16 vsid; + u16 dstid; + + chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (!accel_mgt->rx_cfg_info[index].vld) + return; + + vsid = accel_mgt->rx_cfg_info[index].vsi; + dstid = nbl_res_vsi_id_to_func_id(res_mgt, vsid); + param.family = accel_mgt->rx_cfg_info[index].sa_key.family; + param.mark = accel_mgt->rx_cfg_info[index].sa_key.mark; + param.spi = accel_mgt->rx_cfg_info[index].sa_key.spi; + memcpy(¶m.daddr, &accel_mgt->rx_cfg_info[index].sa_key.daddr, sizeof(param.daddr)); + + nbl_info(common, NBL_DEBUG_ACCEL, "uipsec sa %u hard expire.\n", index); + NBL_CHAN_SEND(chan_send, dstid, NBL_CHAN_MSG_NOTIFY_IPSEC_HARD_EXPIRE, ¶m, + sizeof(param), NULL, 0, 0); + chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); +} + +/* NBL_ACCEL_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_ACCEL_OPS_TBL \ +do { \ + NBL_ACCEL_SET_OPS(alloc_ktls_tx_index, nbl_res_alloc_ktls_tx_index); \ + NBL_ACCEL_SET_OPS(free_ktls_tx_index, nbl_res_free_ktls_tx_index); \ + NBL_ACCEL_SET_OPS(cfg_ktls_tx_keymat, nbl_res_cfg_ktls_tx_keymat); \ + NBL_ACCEL_SET_OPS(alloc_ktls_rx_index, nbl_res_alloc_ktls_rx_index); \ + NBL_ACCEL_SET_OPS(free_ktls_rx_index, nbl_res_free_ktls_rx_index); \ + NBL_ACCEL_SET_OPS(cfg_ktls_rx_keymat, nbl_res_cfg_ktls_rx_keymat); \ + NBL_ACCEL_SET_OPS(cfg_ktls_rx_record, nbl_res_cfg_ktls_rx_record); \ + NBL_ACCEL_SET_OPS(alloc_ipsec_tx_index, nbl_res_alloc_ipsec_tx_index); \ + NBL_ACCEL_SET_OPS(free_ipsec_tx_index, nbl_res_free_ipsec_tx_index); \ + NBL_ACCEL_SET_OPS(alloc_ipsec_rx_index, nbl_res_alloc_ipsec_rx_index); \ + NBL_ACCEL_SET_OPS(free_ipsec_rx_index, nbl_res_free_ipsec_rx_index); \ + NBL_ACCEL_SET_OPS(cfg_ipsec_tx_sad, nbl_res_cfg_ipsec_tx_sad); \ + NBL_ACCEL_SET_OPS(cfg_ipsec_rx_sad, nbl_res_cfg_ipsec_rx_sad); \ + NBL_ACCEL_SET_OPS(add_ipsec_rx_flow, nbl_res_add_ipsec_rx_flow); \ + NBL_ACCEL_SET_OPS(del_ipsec_rx_flow, nbl_res_del_ipsec_rx_flow); \ + NBL_ACCEL_SET_OPS(flr_clear_accel, nbl_res_flr_clear_accel); \ + NBL_ACCEL_SET_OPS(check_ipsec_status, nbl_res_check_ipsec_status); \ + NBL_ACCEL_SET_OPS(get_dipsec_lft_info, nbl_res_get_dipsec_lft_info); \ + NBL_ACCEL_SET_OPS(handle_dipsec_soft_expire, nbl_res_handle_dipsec_soft_expire);\ + NBL_ACCEL_SET_OPS(handle_dipsec_hard_expire, nbl_res_handle_dipsec_hard_expire);\ + NBL_ACCEL_SET_OPS(get_uipsec_lft_info, nbl_res_get_uipsec_lft_info); \ + NBL_ACCEL_SET_OPS(handle_uipsec_soft_expire, nbl_res_handle_uipsec_soft_expire);\ + NBL_ACCEL_SET_OPS(handle_uipsec_hard_expire, nbl_res_handle_uipsec_hard_expire);\ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_accel_setup_mgt(struct device *dev, struct nbl_accel_mgt **accel_mgt) +{ + *accel_mgt = devm_kzalloc(dev, sizeof(struct nbl_accel_mgt), GFP_KERNEL); + if (!*accel_mgt) + return -ENOMEM; + + INIT_LIST_HEAD(&(*accel_mgt)->uprbac_head); + return 0; +} + +static void nbl_accel_remove_mgt(struct device *dev, struct nbl_accel_mgt **accel_mgt) +{ + devm_kfree(dev, *accel_mgt); + *accel_mgt = NULL; +} + +int nbl_accel_mgt_start(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_accel_mgt **accel_mgt; + struct nbl_phy_ops *phy_ops; + struct device *dev; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + accel_mgt = &NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->init_dprbac(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + phy_ops->init_uprbac(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + return nbl_accel_setup_mgt(dev, accel_mgt); +} + +void nbl_accel_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_accel_mgt **accel_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + accel_mgt = &NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + + if (!(*accel_mgt)) + return; + + nbl_accel_remove_mgt(dev, accel_mgt); +} + +int nbl_accel_setup_ops(struct nbl_resource_ops *res_ops) +{ + if (!res_ops) + return -EINVAL; + +#define NBL_ACCEL_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_ACCEL_OPS_TBL; +#undef NBL_ACCEL_SET_OPS + + return 0; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.h new file mode 100644 index 0000000000000000000000000000000000000000..2001f995db7b9d5c3ecbda1df1380c7ddbee7797 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_ACCEL_H_ +#define _NBL_ACCEL_H_ +#include "nbl_resource.h" + +#define NBL_IPSEC_SOFT_EXPIRE 0x80 +#define NBL_IPSEC_HARD_EXPIRE 0x100 + +#define NBL_DIPSEC_SAD_IP_TOTAL 8 +#define NBL_DIPSEC_SAD_IP_LEN 4 +#define NBL_UIPSEC_BYTE_LEN 20 +#define NBL_UIPSEC_POWER 9 +#define NBL_IPSEC_LIFETIME_ROUND 31 + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.c index 89b7244eaa21b12c005079f9ed3edeb7f87ae0b1..5348c98bce658700049623e15c3485e91cdae723 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.c @@ -10,14 +10,15 @@ static int nbl_res_adminq_update_ring_num(void *priv); /* **** FW CMD FILTERS START **** */ -static int nbl_res_adminq_check_ring_num(struct nbl_resource_mgt *res_mgt, - struct nbl_fw_cmd_ring_num_param *param) +static int nbl_res_adminq_check_net_ring_num(struct nbl_resource_mgt *res_mgt, + struct nbl_fw_cmd_net_ring_num_param *param) { + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); u32 sum = 0, pf_real_num = 0, vf_real_num = 0; int i; - pf_real_num = NBL_VSI_PF_REAL_QUEUE_NUM(param->pf_def_max_net_qp_num); + pf_real_num = NBL_VSI_PF_LEGAL_QUEUE_NUM(param->pf_def_max_net_qp_num); vf_real_num = NBL_VSI_VF_REAL_QUEUE_NUM(param->vf_def_max_net_qp_num); if (pf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC || vf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) @@ -26,18 +27,24 @@ static int nbl_res_adminq_check_ring_num(struct nbl_resource_mgt *res_mgt, /* TODO: should we consider when pf_num is 8? */ for (i = 0; i < NBL_COMMON_TO_ETH_MODE(common); i++) { pf_real_num = param->net_max_qp_num[i] ? - NBL_VSI_PF_REAL_QUEUE_NUM(param->net_max_qp_num[i]) : - NBL_VSI_PF_REAL_QUEUE_NUM(param->pf_def_max_net_qp_num); + NBL_VSI_PF_LEGAL_QUEUE_NUM(param->net_max_qp_num[i]) : + NBL_VSI_PF_LEGAL_QUEUE_NUM(param->pf_def_max_net_qp_num); if (pf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) return -EINVAL; + pf_real_num = param->net_max_qp_num[i] ? + NBL_VSI_PF_MAX_QUEUE_NUM(param->net_max_qp_num[i]) : + NBL_VSI_PF_MAX_QUEUE_NUM(param->pf_def_max_net_qp_num); + if (pf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) + pf_real_num = NBL_MAX_TXRX_QUEUE_PER_FUNC; + sum += pf_real_num; } - for (i = NBL_MAX_PF; i < NBL_MAX_FUNC; i++) { - vf_real_num = param->net_max_qp_num[i] ? - NBL_VSI_VF_REAL_QUEUE_NUM(param->net_max_qp_num[i]) : + for (i = 0; i < res_info->max_vf_num; i++) { + vf_real_num = param->net_max_qp_num[i + NBL_MAX_PF] ? + NBL_VSI_VF_REAL_QUEUE_NUM(param->net_max_qp_num[i + NBL_MAX_PF]) : NBL_VSI_VF_REAL_QUEUE_NUM(param->vf_def_max_net_qp_num); if (vf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) @@ -52,15 +59,100 @@ static int nbl_res_adminq_check_ring_num(struct nbl_resource_mgt *res_mgt, return 0; } -static int nbl_res_fw_cmd_filter_rw_in(struct nbl_resource_mgt *res_mgt, void *data, int len) +static int nbl_res_adminq_check_rdma_cap(struct nbl_resource_mgt *res_mgt, + struct nbl_fw_cmd_rdma_cap_param *param) +{ + int count = 0, i, j; + + for (i = 0; i < NBL_RDMA_CAP_CMD_LEN; i++) + for (j = 0; j < BITS_PER_BYTE; j++) + if (param->rdma_func_bitmaps[i] & BIT(j)) + count++; + + if (count > NBL_RES_RDMA_MAX) + return -EINVAL; + + return 0; +} + +static int nbl_res_adminq_check_rdma_mem_type(struct nbl_resource_mgt *res_mgt, + struct nbl_fw_cmd_rdma_mem_type_param *param) +{ + return param->mem_type > NBL_RDMA_MEM_TYPE_MAX ? -EINVAL : 0; +} + +static u32 nbl_res_adminq_sum_vf_num(struct nbl_fw_cmd_vf_num_param *param) +{ + u32 count = 0; + int i; + + for (i = 0; i < NBL_VF_NUM_CMD_LEN; i++) + count += param->vf_max_num[i]; + + return count; +} + +static int nbl_res_adminq_check_vf_num_type(struct nbl_resource_mgt *res_mgt, + struct nbl_fw_cmd_vf_num_param *param) +{ + u32 count; + + count = nbl_res_adminq_sum_vf_num(param); + if (count > NBL_MAX_VF) + return -EINVAL; + + return 0; +} + +static int nbl_res_fw_cmd_filter_rw_in(struct nbl_resource_mgt *res_mgt, void *data, u16 len) { struct nbl_chan_resource_write_param *param = (struct nbl_chan_resource_write_param *)data; - struct nbl_fw_cmd_ring_num_param *num_param; + struct nbl_fw_cmd_net_ring_num_param *net_ring_num_param; + struct nbl_fw_cmd_rdma_cap_param *rdma_cap_param; + struct nbl_fw_cmd_rdma_mem_type_param *rdma_mem_type_param; + struct nbl_fw_cmd_vf_num_param *vf_num_param; switch (param->resid) { - case NBL_ADMINQ_PFA_TLV_PFVF_RING_ID: - num_param = (struct nbl_fw_cmd_ring_num_param *)param->data; - return nbl_res_adminq_check_ring_num(res_mgt, num_param); + case NBL_ADMINQ_PFA_TLV_NET_RING_NUM: + net_ring_num_param = (struct nbl_fw_cmd_net_ring_num_param *)param->data; + return nbl_res_adminq_check_net_ring_num(res_mgt, net_ring_num_param); + case NBL_ADMINQ_PFA_TLV_RDMA_CAP: + rdma_cap_param = (struct nbl_fw_cmd_rdma_cap_param *)param->data; + return nbl_res_adminq_check_rdma_cap(res_mgt, rdma_cap_param); + case NBL_ADMINQ_PFA_TLV_RDMA_MEM_TYPE: + rdma_mem_type_param = (struct nbl_fw_cmd_rdma_mem_type_param *)param->data; + return nbl_res_adminq_check_rdma_mem_type(res_mgt, rdma_mem_type_param); + case NBL_ADMINQ_PFA_TLV_VF_NUM: + vf_num_param = (struct nbl_fw_cmd_vf_num_param *)param->data; + return nbl_res_adminq_check_vf_num_type(res_mgt, vf_num_param); + default: + break; + } + + return 0; +} + +static int nbl_res_fw_cmd_filter_rw_out(struct nbl_resource_mgt *res_mgt, void *in, u16 in_len, + void *out, u16 out_len) +{ + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_net_ring_num_info *num_info = &res_info->net_ring_num_info; + struct nbl_chan_resource_write_param *param = (struct nbl_chan_resource_write_param *)in; + struct nbl_fw_cmd_net_ring_num_param *net_ring_num_param; + struct nbl_fw_cmd_vf_num_param *vf_num_param; + size_t copy_len; + u32 count; + + switch (param->resid) { + case NBL_ADMINQ_PFA_TLV_NET_RING_NUM: + net_ring_num_param = (struct nbl_fw_cmd_net_ring_num_param *)param->data; + copy_len = min_t(size_t, sizeof(*num_info), (size_t)in_len); + memcpy(num_info, net_ring_num_param, copy_len); + break; + case NBL_ADMINQ_PFA_TLV_VF_NUM: + vf_num_param = (struct nbl_fw_cmd_vf_num_param *)param->data; + count = nbl_res_adminq_sum_vf_num(vf_num_param); + res_info->max_vf_num = count; default: break; } @@ -74,13 +166,13 @@ static void nbl_res_adminq_add_cmd_filter_res_write(struct nbl_resource_mgt *res struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); struct nbl_res_fw_cmd_filter filter = { .in = nbl_res_fw_cmd_filter_rw_in, - .out = NULL, + .out = nbl_res_fw_cmd_filter_rw_out, }; u16 key = 0; key = NBL_CHAN_MSG_ADMINQ_RESOURCE_WRITE; - if (nbl_common_alloc_hash_node(adminq_mgt->cmd_filter, &key, &filter)) + if (nbl_common_alloc_hash_node(adminq_mgt->cmd_filter, &key, &filter, NULL)) nbl_warn(common, NBL_DEBUG_ADMINQ, "Fail to register res_write in filter"); } @@ -114,6 +206,7 @@ static int nbl_res_adminq_set_module_eeprom_info(struct nbl_resource_mgt *res_mg param.page = page; param.bank = bank; param.write = 1; + param.version = 1; param.offset = offset + byte_offset; param.length = xfer_size; memcpy(param.data, data + byte_offset, xfer_size); @@ -123,8 +216,9 @@ static int nbl_res_adminq_set_module_eeprom_info(struct nbl_resource_mgt *res_mg ¶m, sizeof(param), NULL, 0, 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) { - dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d,\n" - "i2c_address:%d, page:%d, bank:%d, offset:%d, length:%d\n", + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," + " eth_id:%d, i2c_address:%d, page:%d, bank:%d," + " offset:%d, length:%d\n", ret, NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM, eth_info->logic_eth_id[eth_id], i2c_address, page, bank, offset + byte_offset, xfer_size); @@ -193,6 +287,7 @@ static int nbl_res_adminq_get_module_eeprom_info(struct nbl_resource_mgt *res_mg param.page = page; param.bank = bank; param.write = 0; + param.version = 1; param.offset = offset + byte_offset; param.length = xfer_size; @@ -201,8 +296,9 @@ static int nbl_res_adminq_get_module_eeprom_info(struct nbl_resource_mgt *res_mg ¶m, sizeof(param), data + byte_offset, xfer_size, 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) { - dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d,\n" - "i2c_address:%d, page:%d, bank:%d, offset:%d, length:%d\n", + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," + " eth_id:%d, i2c_address:%d, page:%d, bank:%d," + " offset:%d, length:%d\n", ret, NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM, eth_info->logic_eth_id[eth_id], i2c_address, page, bank, offset + byte_offset, xfer_size); @@ -663,7 +759,7 @@ static bool nbl_res_adminq_check_fw_heartbeat(void *priv) struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); unsigned long check_time; - unsigned long seq_acked; + u32 seq_acked; if (adminq_mgt->fw_resetting) { adminq_mgt->fw_last_hb_seq++; @@ -690,7 +786,7 @@ static bool nbl_res_adminq_check_fw_reset(void *priv) struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - unsigned long seq_acked; + u32 seq_acked; seq_acked = phy_ops->get_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); if (adminq_mgt->fw_last_hb_seq != seq_acked) { @@ -1036,16 +1132,56 @@ static void nbl_res_eth_task_schedule(struct nbl_adminq_mgt *adminq_mgt) nbl_common_queue_work(&adminq_mgt->eth_task, true, false); } +static int nbl_res_adminq_get_bond_link_state(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_eth_bond_info *eth_bond_info = NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt); + struct nbl_eth_bond_entry *entry = NULL; + int lag_id = nbl_res_eth_id_to_lag_id(res_mgt, eth_id); + int i, link_state = 0; + + if (lag_id < 0 || lag_id >= NBL_LAG_MAX_NUM) + return eth_info->link_state[eth_id]; + + /* bond_link_state will be 1 if any eth port is up */ + entry = ð_bond_info->entry[lag_id]; + for (i = 0; i < entry->lag_num && NBL_ETH_BOND_VALID_PORT(i); i++) + link_state |= !!(eth_info->link_state[entry->eth_id[i]]); + + return link_state; +} + +static int nbl_res_adminq_handle_link_state_update(u16 type, void *event_data, void *callback_data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)callback_data; + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_event_link_status_update_data *data = + (struct nbl_event_link_status_update_data *)event_data; + int i; + + mutex_lock(&adminq_mgt->eth_lock); + + for (i = 0; i < data->num; i++) + adminq_mgt->link_state_changed[data->eth_id[i]] = 1; + + mutex_unlock(&adminq_mgt->eth_lock); + + nbl_res_eth_task_schedule(adminq_mgt); + + return 0; +} + static void nbl_res_adminq_recv_port_notify(void *priv, void *data) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_eth_bond_info *eth_bond_info = NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt); struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); struct nbl_port_notify *notify; u8 last_module_inplace = 0; u8 last_link_state = 0; - int eth_id = 0; + int i, eth_id = 0, eth_tmp = 0, lag_id = -1; notify = (struct nbl_port_notify *)data; eth_id = notify->id; @@ -1060,6 +1196,9 @@ static void nbl_res_adminq_recv_port_notify(void *priv, void *data) last_module_inplace = eth_info->module_inplace[eth_id]; last_link_state = eth_info->link_state[eth_id]; + if (!notify->link_state) + eth_info->link_down_count[eth_id]++; + eth_info->link_state[eth_id] = notify->link_state; eth_info->module_inplace[eth_id] = notify->module_inplace; /* when eth link down, don not update speed @@ -1072,6 +1211,7 @@ static void nbl_res_adminq_recv_port_notify(void *priv, void *data) eth_info->active_fc[eth_id] = notify->flow_ctrl; eth_info->active_fec[eth_id] = notify->fec; eth_info->port_lp_advertising[eth_id] = notify->lp_advertising; + eth_info->port_advertising[eth_id] = notify->advertising; if (!last_module_inplace && notify->module_inplace) { adminq_mgt->module_inplace_changed[eth_id] = 1; @@ -1079,6 +1219,17 @@ static void nbl_res_adminq_recv_port_notify(void *priv, void *data) } if (last_link_state != notify->link_state) { + /* If this eth belongs to a bond, any link_state update has to notify all vfs for + * all eths in this bond group. + */ + lag_id = nbl_res_eth_id_to_lag_id(res_mgt, eth_id); + if (lag_id >= 0 && lag_id < NBL_LAG_MAX_NUM) + for (i = 0; i < eth_bond_info->entry[lag_id].lag_num && + NBL_ETH_BOND_VALID_PORT(i); i++) { + eth_tmp = eth_bond_info->entry[lag_id].eth_id[i]; + adminq_mgt->link_state_changed[eth_tmp] = 1; + } + adminq_mgt->link_state_changed[eth_id] = 1; nbl_res_eth_task_schedule(adminq_mgt); } @@ -1086,18 +1237,6 @@ static void nbl_res_adminq_recv_port_notify(void *priv, void *data) mutex_unlock(&adminq_mgt->eth_lock); } -static int nbl_get_highest_bit(u64 advertise) -{ - int highest_bit_pos = 0; - - while (advertise != 0) { - advertise >>= 1; - highest_bit_pos++; - } - - return highest_bit_pos; -} - static int nbl_res_adminq_set_port_advertising(void *priv, struct nbl_port_advertising *advertising) { @@ -1106,7 +1245,6 @@ static int nbl_res_adminq_set_port_advertising(void *priv, struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); struct nbl_chan_send_info chan_send; - int highest_bit_pos = 0; struct nbl_port_key *param; int param_len = 0; int eth_id = 0; @@ -1139,10 +1277,12 @@ static int nbl_res_adminq_set_port_advertising(void *priv, /* set FEC */ if (advertising->active_fec != 0) { - new_advert = new_advert & ~NBL_PORT_CAP_FEC_MASK; + new_advert = new_advert & ~NBL_PORT_CAP_FEC_MASK & ~BIT(NBL_PORT_CAP_FEC_AUTONEG); /* when ethtool set FEC_AUTO, we set default fec mode */ - if (advertising->active_fec == NBL_PORT_FEC_AUTO && !advertising->autoneg) { + if (advertising->active_fec == NBL_PORT_FEC_AUTO && + (!advertising->autoneg && + !(eth_info->port_caps[eth_id] & BIT(NBL_PORT_CAP_FEC_AUTONEG)))) { advertising->active_fec = NBL_PORT_FEC_OFF; if (eth_info->link_speed[eth_id] == SPEED_1000) advertising->active_fec = NBL_ETH_1G_DEFAULT_FEC_MODE; @@ -1153,34 +1293,28 @@ static int nbl_res_adminq_set_port_advertising(void *priv, } if (advertising->active_fec == NBL_PORT_FEC_OFF) - new_advert |= BIT(NBL_PORT_CAP_FEC_NONE); + new_advert |= BIT(NBL_PORT_CAP_FEC_OFF); if (advertising->active_fec == NBL_PORT_FEC_RS) new_advert |= BIT(NBL_PORT_CAP_FEC_RS); if (advertising->active_fec == NBL_PORT_FEC_BASER) new_advert |= BIT(NBL_PORT_CAP_FEC_BASER); - if (advertising->active_fec == NBL_PORT_FEC_AUTO) + if (advertising->active_fec == NBL_PORT_FEC_AUTO) { new_advert |= NBL_PORT_CAP_FEC_MASK; + if (eth_info->port_caps[eth_id] & BIT(NBL_PORT_CAP_FEC_AUTONEG)) + new_advert |= BIT(NBL_PORT_CAP_FEC_AUTONEG); + } } /* set speed */ if (advertising->speed_advert != 0) { new_advert = (new_advert & (NBL_PORT_CAP_AUTONEG_MASK | NBL_PORT_CAP_FEC_MASK | - NBL_PORT_CAP_PAUSE_MASK)) | advertising->speed_advert; + NBL_PORT_CAP_PAUSE_MASK | BIT(NBL_PORT_CAP_FEC_AUTONEG))) | + advertising->speed_advert; } - highest_bit_pos = nbl_get_highest_bit(new_advert); - /* speed 10G only can set fec off or baseR, if set RS we change it to baseR */ - if (highest_bit_pos <= NBL_PORT_CAP_10GBASE_SR && - highest_bit_pos >= NBL_PORT_CAP_10GBASE_T && !advertising->autoneg) { - if (new_advert & BIT(NBL_PORT_CAP_FEC_RS)) { - new_advert = new_advert & ~NBL_PORT_CAP_FEC_MASK; - new_advert |= BIT(NBL_PORT_CAP_FEC_BASER); - dev_notice(dev, "speed 10G default set fec baseR, set fec baseR\n"); - dev_notice(dev, "set new_advert:%llx\n", new_advert); - } - } - - if (eth_info->port_max_rate[eth_id] != NBL_PORT_MAX_RATE_100G_PAM4) + if (eth_info->port_max_rate[eth_id] != NBL_PORT_MAX_RATE_100G_PAM4 || + (!(new_advert & NBL_PORT_CAP_SPEED_100G_MASK) && + eth_info->port_max_rate[eth_id] == NBL_PORT_MAX_RATE_100G_PAM4)) new_advert &= ~NBL_PORT_CAP_PAM4_MASK; else new_advert |= NBL_PORT_CAP_PAM4_MASK; @@ -1200,11 +1334,12 @@ static int nbl_res_adminq_set_port_advertising(void *priv, param, param_len, NULL, 0, 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) { - dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d, set_port_advertising\n", + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," + " eth_id:%d, set_port_advertising\n", ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, eth_info->logic_eth_id[eth_id]); kfree(param); - return ret; + return -EIO; } eth_info->port_advertising[eth_id] = new_advert; @@ -1227,6 +1362,8 @@ static int nbl_res_adminq_get_port_state(void *priv, u8 eth_id, struct nbl_port_ port_state->link_state = eth_info->link_state[eth_id]; port_state->module_inplace = eth_info->module_inplace[eth_id]; port_state->fw_port_max_speed = res_mgt->resource_info->board_info.eth_speed; + port_state->module_repluged = eth_info->module_repluged[eth_id]; + eth_info->module_repluged[eth_id] = 0; if (port_state->module_inplace) { port_state->port_type = eth_info->port_type[eth_id]; port_state->port_max_rate = eth_info->port_max_rate[eth_id]; @@ -1309,7 +1446,8 @@ static int nbl_res_adminq_get_module_eeprom(void *priv, u8 eth_id, struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); u8 module_inplace = 0; /* 1 inplace, 0 not inplace */ u32 start = eeprom->offset; - u32 length = eeprom->len; + u32 total_len = eeprom->len; + u32 length; u8 turn_page, offset; int ret; @@ -1324,12 +1462,12 @@ static int nbl_res_adminq_get_module_eeprom(void *priv, u8 eth_id, } if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) { - while (start < ETH_MODULE_SFF_8636_MAX_LEN) { - length = SFF_8638_PAGESIZE; - if (start + length > ETH_MODULE_SFF_8636_MAX_LEN) - length = ETH_MODULE_SFF_8636_MAX_LEN - start; - + while (start < ETH_MODULE_SFF_8636_MAX_LEN && total_len) { nbl_res_get_module_eeprom_page(start, &turn_page, &offset); + length = min(SFF_8638_PAGESIZE, total_len); + if (offset % SFF_8638_PAGESIZE + length > SFF_8638_PAGESIZE) + length = SFF_8638_PAGESIZE - offset % SFF_8638_PAGESIZE; + ret = nbl_res_adminq_turn_module_eeprom_page(res_mgt, eth_id, turn_page); if (ret) { dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", @@ -1338,7 +1476,7 @@ static int nbl_res_adminq_get_module_eeprom(void *priv, u8 eth_id, } ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, - I2C_DEV_ADDR_A0, 0, 0, + I2C_DEV_ADDR_A0, turn_page, 0, offset, length, data); if (ret) { dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", @@ -1347,14 +1485,15 @@ static int nbl_res_adminq_get_module_eeprom(void *priv, u8 eth_id, } start += length; data += length; - length = eeprom->len - length; + total_len -= length; } return 0; } + length = total_len; /* Read A0 portion of eth EEPROM */ if (start < ETH_MODULE_SFF_8079_LEN) { - if (start + eeprom->len > ETH_MODULE_SFF_8079_LEN) + if (start + length > ETH_MODULE_SFF_8079_LEN) length = ETH_MODULE_SFF_8079_LEN - start; ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, 0, 0, @@ -1366,7 +1505,7 @@ static int nbl_res_adminq_get_module_eeprom(void *priv, u8 eth_id, } start += length; data += length; - length = eeprom->len - length; + length = total_len - length; } /* Read A2 portion of eth EEPROM */ @@ -1396,6 +1535,56 @@ static int nbl_res_adminq_get_link_state(void *priv, u8 eth_id, return 0; } +static int nbl_res_adminq_get_link_down_count(void *priv, u8 eth_id, u64 *link_down_count) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + + *link_down_count = eth_info->link_down_count[eth_id]; + return 0; +} + +static int nbl_res_adminq_get_link_status_opcode(void *priv, u8 eth_id, u32 *link_status_opcode) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + u64 data = 0, key = 0, result = 0; + int param_len = 0, ret = 0; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + key = NBL_PORT_KEY_GET_LINK_STATUS_OPCODE; + + data += (key << NBL_PORT_KEY_KEY_SHIFT); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_READ; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id]); + kfree(param); + return ret; + } + + *link_status_opcode = result; + + kfree(param); + return 0; +} + static int nbl_res_adminq_get_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; @@ -1498,6 +1687,26 @@ static int nbl_res_adminq_set_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) return 0; } +static int nbl_res_adminq_get_fec_stats(void *priv, u32 eth_id, + struct nbl_fec_stats *fec_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + int data_len = sizeof(struct nbl_fec_stats); + int ret; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_FEC_STATS, ð_id, + sizeof(eth_id), fec_stats, data_len, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) + dev_err(dev, "ctrl eth %d fec stats failed", eth_id); + + return ret; +} + static int nbl_res_adminq_ctrl_port_led(void *priv, u8 eth_id, enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg) { @@ -1515,7 +1724,7 @@ static int nbl_res_adminq_ctrl_port_led(void *priv, u8 eth_id, param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); param = kzalloc(param_len, GFP_KERNEL); - key = NBL_PORT_KRY_LED_BLINK; + key = NBL_PORT_KEY_LED_BLINK; switch (led_ctrl) { case NBL_LED_REG_ACTIVE: @@ -1549,6 +1758,52 @@ static int nbl_res_adminq_ctrl_port_led(void *priv, u8 eth_id, return 0; } +static int nbl_res_adminq_set_eth_pfc(void *priv, u8 eth_id, u8 *pfc) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 data = 0; + u64 key = 0; + int ret; + int i; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) { + if (pfc[i]) + data |= 1 << i; + } + + key = NBL_PORT_KEY_SET_PFC_CFG; + data += (key << NBL_PORT_KEY_KEY_SHIFT); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d,\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id]); + kfree(param); + return ret; + } + + kfree(param); + return 0; +} + static int nbl_res_adminq_pt_filter_in(struct nbl_resource_mgt *res_mgt, struct nbl_passthrough_fw_cmd_param *param) { @@ -1563,7 +1818,8 @@ static int nbl_res_adminq_pt_filter_in(struct nbl_resource_mgt *res_mgt, } static int nbl_res_adminq_pt_filter_out(struct nbl_resource_mgt *res_mgt, - struct nbl_passthrough_fw_cmd_param *param) + struct nbl_passthrough_fw_cmd_param *param, + struct nbl_passthrough_fw_cmd_param *result) { struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); struct nbl_res_fw_cmd_filter *filter; @@ -1571,9 +1827,10 @@ static int nbl_res_adminq_pt_filter_out(struct nbl_resource_mgt *res_mgt, filter = nbl_common_get_hash_node(adminq_mgt->cmd_filter, ¶m->opcode); if (filter && filter->out) - ret = filter->out(res_mgt, param->data, param->out_size); + ret = filter->out(res_mgt, param->data, param->in_size, + result->data, result->out_size); - return ret; + return 0; } static int nbl_res_adminq_passthrough(void *priv, struct nbl_passthrough_fw_cmd_param *param, @@ -1617,7 +1874,7 @@ static int nbl_res_adminq_passthrough(void *priv, struct nbl_passthrough_fw_cmd_ if (result->out_size) memcpy(result->data, out_data, param->out_size); - nbl_res_adminq_pt_filter_out(res_mgt, result); + nbl_res_adminq_pt_filter_out(res_mgt, param, result); send_fail: kfree(out_data); @@ -1650,7 +1907,7 @@ static int nbl_res_adminq_update_ring_num(void *priv) goto alloc_info_fail; } - param->resid = NBL_ADMINQ_PFA_TLV_PFVF_RING_ID; + param->resid = NBL_ADMINQ_PFA_TLV_NET_RING_NUM; param->offset = 0; param->len = sizeof(*info); NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_RESOURCE_READ, @@ -1663,7 +1920,9 @@ static int nbl_res_adminq_update_ring_num(void *priv) goto send_fail; } - if (info->pf_def_max_net_qp_num && info->vf_def_max_net_qp_num) + if (info->pf_def_max_net_qp_num && info->vf_def_max_net_qp_num && + !nbl_res_adminq_check_net_ring_num(res_mgt, + (struct nbl_fw_cmd_net_ring_num_param *)info)) memcpy(&res_info->net_ring_num_info, info, sizeof(res_info->net_ring_num_info)); send_fail: @@ -1674,143 +1933,191 @@ static int nbl_res_adminq_update_ring_num(void *priv) return ret; } -static int nbl_res_adminq_set_ring_num(void *priv, struct nbl_fw_cmd_ring_num_param *param) +static int nbl_res_adminq_rdma_cap_default(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u16 func_id = 0, vsi_id = 0; + int pf_num = NBL_RES_MGT_TO_PF_NUM(res_mgt); + int per_pf_num = (NBL_RES_RDMA_MAX - pf_num + 1) / pf_num, i, j; + int per_pf_vf_num = res_info->max_vf_num / pf_num; + int rdma_reserve = NBL_RES_MGT_TO_COMMON(res_mgt)->product_type == NBL_LEONIS_TYPE; + + per_pf_num = min_t(int, per_pf_num, per_pf_vf_num); + for (i = 0; i < pf_num; i++) { + vsi_id = nbl_res_pfvfid_to_vsi_id(res_mgt, i, -1, NBL_VSI_DATA); + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + if (func_id < NBL_MAX_FUNC) + set_bit(func_id, res_info->rdma_info.func_cap); + + /* If we have reserved rdma aux dev, remove these on the last pf */ + for (j = 0; j < per_pf_num - (!(pf_num - i - 1)) * rdma_reserve; j++) { + vsi_id = nbl_res_pfvfid_to_vsi_id(res_mgt, i, j, NBL_VSI_DATA); + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + if (func_id < NBL_MAX_FUNC) + set_bit(func_id, res_info->rdma_info.func_cap); + } + } + + return 0; +} + +static int nbl_res_adminq_rdma_cap_tlv(struct nbl_resource_mgt *res_mgt, + struct nbl_rdma_cap_info *info) +{ + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + int i, j; + + for (i = 0; i < NBL_RDMA_CAP_CMD_LEN; i++) + for (j = 0; j < BITS_PER_BYTE; j++) + if (info->rdma_func_bitmaps[i] & BIT(j)) + set_bit(i * BITS_PER_BYTE + j, res_info->rdma_info.func_cap); + + return 0; +} + +static int nbl_res_adminq_update_rdma_cap(void *priv) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); struct device *dev = NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)); struct nbl_chan_send_info chan_send; - struct nbl_chan_resource_write_param *data; - int data_len = sizeof(struct nbl_fw_cmd_ring_num_param); + struct nbl_chan_resource_read_param *param; + struct nbl_rdma_cap_info *info; int ret = 0; - data = kzalloc(sizeof(*data) + data_len, GFP_KERNEL); - if (!data) - goto alloc_data_fail; + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) { + ret = -ENOMEM; + goto alloc_param_fail; + } - data->resid = NBL_ADMINQ_PFA_TLV_PFVF_RING_ID; - data->offset = 0; - data->len = data_len; - memcpy(data->data, param, data_len); + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto alloc_info_fail; + } + + param->resid = NBL_ADMINQ_PFA_TLV_RDMA_CAP; + param->offset = 0; + param->len = sizeof(*info); + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_RESOURCE_READ, + param, sizeof(*param), info, sizeof(*info), 1); - NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_RESOURCE_WRITE, - data, sizeof(*data) + data_len, NULL, 0, 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); - if (ret) - dev_err(dev, "adminq send msg failed with ret: %d\n", ret); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x\n", + ret, NBL_CHAN_MSG_ADMINQ_RESOURCE_READ); + info->valid = 1; + } - kfree(data); -alloc_data_fail: + /* For some reason, valid == 0 means valid, and 1 means invalid */ + if (info->valid) + nbl_res_adminq_rdma_cap_default(res_mgt); + else + nbl_res_adminq_rdma_cap_tlv(res_mgt, info); + + kfree(info); +alloc_info_fail: + kfree(param); +alloc_param_fail: return ret; } -static void nbl_res_adminq_set_eth_speed(struct nbl_resource_mgt *res_mgt, - u8 eth_id, u32 speed, u8 active_fec, u8 autoneg) +static u16 nbl_res_adminq_get_rdma_cap_num(void *priv) { - struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); - struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); - struct nbl_port_advertising port_advertising = {0}; - u64 speed_advert = 0; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u16 rdma_cap_num = 0; + int i; - speed_advert = nbl_speed_to_link_mode(speed, autoneg); - speed_advert &= eth_info->port_caps[eth_id]; + for (i = 0; i < NBL_MAX_FUNC; i++) + if (test_bit(i, res_info->rdma_info.func_cap)) + rdma_cap_num++; - if (!speed_advert) { - dev_err(dev, "eth %d speed %d is not support, exit\n", - eth_info->logic_eth_id[eth_id], speed); - return; - } + return rdma_cap_num; +} - if (active_fec == NBL_PORT_FEC_OFF) { - if (!(eth_info->port_caps[eth_id] & BIT(NBL_PORT_CAP_FEC_NONE))) { - dev_err(dev, "eth %d optical module plug in, want to set fec mode off, but eth caps %llx donot support it\n", - eth_info->logic_eth_id[eth_id], eth_info->port_caps[eth_id]); - } - } - if (active_fec == NBL_PORT_FEC_RS) { - if (!(eth_info->port_caps[eth_id] & BIT(NBL_PORT_CAP_FEC_RS))) { - dev_err(dev, "eth %d optical module plug in, want to set fec mode RS, but eth caps %llx donot support it\n", - eth_info->logic_eth_id[eth_id], eth_info->port_caps[eth_id]); - } +static int nbl_res_adminq_update_rdma_mem_type(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)); + struct nbl_chan_send_info chan_send; + struct nbl_chan_resource_read_param *param; + struct nbl_rdma_mem_type_info *info; + int ret = 0; + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) { + ret = -ENOMEM; + goto alloc_param_fail; } - if (active_fec == NBL_PORT_FEC_BASER) { - if (!(eth_info->port_caps[eth_id] & BIT(NBL_PORT_CAP_FEC_BASER))) { - dev_err(dev, "eth %d optical module plug in, want to set fec mode baseR, but eth caps %llx donot support it\n", - eth_info->logic_eth_id[eth_id], eth_info->port_caps[eth_id]); - } + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto alloc_info_fail; } - if (active_fec == NBL_PORT_FEC_AUTO) { - if (!(eth_info->port_caps[eth_id] & BIT(NBL_PORT_CAP_AUTONEG))) { - dev_err(dev, "eth %d optical module plug in, want to set fec mode auto, but eth caps %llx donot support it\n", - eth_info->logic_eth_id[eth_id], eth_info->port_caps[eth_id]); - } + + param->resid = NBL_ADMINQ_PFA_TLV_RDMA_MEM_TYPE; + param->offset = 0; + param->len = sizeof(*info); + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_RESOURCE_READ, + param, sizeof(*param), info, sizeof(*info), 1); + + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x\n", + ret, NBL_CHAN_MSG_ADMINQ_RESOURCE_READ); + goto send_fail; } - port_advertising.eth_id = eth_id; - port_advertising.speed_advert = speed_advert; - port_advertising.active_fec = active_fec; - port_advertising.autoneg = autoneg; - dev_info(dev, "eth %d optical module plug in, set speed_advert:%llx, active_fec:%x, autoneg %d\n", - eth_info->logic_eth_id[eth_id], speed_advert, active_fec, autoneg); - nbl_res_adminq_set_port_advertising(res_mgt, &port_advertising); + + if (info->mem_type <= NBL_RDMA_MEM_TYPE_MAX) + res_info->rdma_info.mem_type = info->mem_type; + else + res_info->rdma_info.mem_type = NBL_RDMA_MEM_TYPE_MAX; + +send_fail: + kfree(info); +alloc_info_fail: + kfree(param); +alloc_param_fail: + return ret; } -static void nbl_res_adminq_recovery_eth(struct nbl_resource_mgt *res_mgt, u8 eth_id) +static int nbl_res_adminq_set_ring_num(void *priv, struct nbl_fw_cmd_net_ring_num_param *param) { - struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); - u8 port_max_rate = 0; - u8 port_type; - u32 port_max_speed = 0; - u8 active_fec = 0; - u8 autoneg = 0; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)); + struct nbl_chan_send_info chan_send; + struct nbl_chan_resource_write_param *data; + int data_len = sizeof(struct nbl_fw_cmd_net_ring_num_param); + int ret = 0; - if (!eth_info->module_inplace[eth_id]) - return; + data = kzalloc(sizeof(*data) + data_len, GFP_KERNEL); + if (!data) + return -ENOMEM; - port_max_rate = eth_info->port_max_rate[eth_id]; + data->resid = NBL_ADMINQ_PFA_TLV_NET_RING_NUM; + data->offset = 0; + data->len = data_len; - switch (port_max_rate) { - case NBL_PORT_MAX_RATE_1G: - port_max_speed = SPEED_1000; - active_fec = NBL_ETH_1G_DEFAULT_FEC_MODE; - break; - case NBL_PORT_MAX_RATE_10G: - port_max_speed = SPEED_10000; - active_fec = NBL_ETH_10G_DEFAULT_FEC_MODE; - break; - case NBL_PORT_MAX_RATE_25G: - port_max_speed = SPEED_25000; - active_fec = NBL_ETH_25G_DEFAULT_FEC_MODE; - break; - case NBL_PORT_MAX_RATE_100G: - case NBL_PORT_MAX_RATE_100G_PAM4: - port_max_speed = SPEED_100000; - active_fec = NBL_ETH_100G_DEFAULT_FEC_MODE; - break; - default: - /* default set 25G */ - port_max_speed = SPEED_25000; - active_fec = NBL_ETH_25G_DEFAULT_FEC_MODE; - break; - } + memcpy(data + 1, param, data_len); - port_type = eth_info->port_type[eth_id]; - /* cooper support auto-negotiation */ - if (port_type == NBL_PORT_TYPE_COPPER) { - if (port_max_speed >= SPEED_25000) - autoneg = 1; - else - autoneg = 0; /* disable autoneg when 10G module pluged */ + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_RESOURCE_WRITE, + data, sizeof(*data) + data_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) + dev_err(dev, "adminq send msg failed with ret: %d\n", ret); - eth_info->port_caps[eth_id] |= BIT(NBL_PORT_CAP_AUTONEG); - } else { - autoneg = 0; - eth_info->port_caps[eth_id] &= ~BIT_MASK(NBL_PORT_CAP_AUTONEG); - } - /* when optical module plug in, we must set default fec */ - nbl_res_adminq_set_eth_speed(res_mgt, eth_id, port_max_speed, active_fec, autoneg); + kfree(data); + return ret; } -static int nbl_res_adminq_nway_reset(void *priv, u8 eth_id) +static int nbl_res_adminq_restore_default_cfg(void *priv, u8 eth_id) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); @@ -1823,7 +2130,7 @@ static int nbl_res_adminq_nway_reset(void *priv, u8 eth_id) u64 key = 0; int ret; - key = NBL_PORT_KEY_DISABLE; + key = NBL_PORT_KEY_RESTORE_DEFAULTE_CFG; data = (key << NBL_PORT_KEY_KEY_SHIFT); param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); param = kzalloc(param_len, GFP_KERNEL); @@ -1836,31 +2143,64 @@ static int nbl_res_adminq_nway_reset(void *priv, u8 eth_id) param, param_len, NULL, 0, 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) { - dev_err(dev, "ctrl eth %d disable failed ret %d\n", + dev_err(dev, "ctrl eth %d restore defaulte cfg failed ret %d\n", eth_info->logic_eth_id[eth_id], ret); kfree(param); return ret; } - key = NBL_PORT_KEY_ENABLE; - data = NBL_PORT_FLAG_ENABLE_NOTIFY + (key << NBL_PORT_KEY_KEY_SHIFT); + kfree(param); + return 0; +} + +static int nbl_res_adminq_nway_reset(void *priv, u8 eth_id) +{ + return nbl_res_adminq_restore_default_cfg(priv, eth_id); +} - param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); - param->data[0] = data; - param->id = eth_id; - param->subop = NBL_PORT_SUBOP_WRITE; +static int nbl_res_adminq_init_port(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u8 eth_id; + + for_each_set_bit(eth_id, eth_info->eth_bitmap, NBL_MAX_ETHERNET) + nbl_res_adminq_restore_default_cfg(priv, eth_id); + + return 0; +} + +static int nbl_res_adminq_set_wol(void *priv, u8 eth_id, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)); + struct nbl_chan_send_info chan_send; + struct nbl_chan_adminq_reg_write_param reg_write = {0}; + struct nbl_chan_adminq_reg_read_param reg_read = {0}; + u32 value; + int ret = 0; + + dev_info(dev, "set_wol ethid %d %sabled", eth_id, enable ? "en" : "dis"); + + reg_read.reg = NBL_ADMINQ_ETH_WOL_REG_OFFSET; + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_REGISTER_READ, + ®_read, sizeof(reg_read), &value, sizeof(value), 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) { - dev_err(dev, "ctrl eth %d enable failed %d\n", - eth_info->logic_eth_id[eth_id], ret); - kfree(param); + dev_err(dev, "adminq send msg failed with ret: %d\n", ret); return ret; } - nbl_res_adminq_recovery_eth(res_mgt, eth_id); + reg_write.reg = NBL_ADMINQ_ETH_WOL_REG_OFFSET; + reg_write.value = (value & ~(1 << eth_id)) | (enable << eth_id); + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_REGISTER_WRITE, + ®_write, sizeof(reg_write), NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) + dev_err(dev, "adminq send msg failed with ret: %d\n", ret); - kfree(param); - return 0; + return ret; } #define ADD_ETH_STATISTICS(name) {#name} @@ -1892,8 +2232,8 @@ static struct nbl_leonis_eth_stats_info _eth_statistics[] = { ADD_ETH_STATISTICS(eth_frames_tx_128_to_255B), ADD_ETH_STATISTICS(eth_frames_tx_256_to_511B), ADD_ETH_STATISTICS(eth_frames_tx_512_to_1023B), - ADD_ETH_STATISTICS(eth_frames_tx_1024_to_1535B), - ADD_ETH_STATISTICS(eth_frames_tx_1536_to_2047B), + ADD_ETH_STATISTICS(eth_frames_tx_1024_to_1518B), + ADD_ETH_STATISTICS(eth_frames_tx_1519_to_2047B), ADD_ETH_STATISTICS(eth_frames_tx_2048_to_MAXB), ADD_ETH_STATISTICS(eth_undersize_frames_tx_goodfcs), ADD_ETH_STATISTICS(eth_oversize_frames_tx_goodfcs), @@ -1939,13 +2279,14 @@ static struct nbl_leonis_eth_stats_info _eth_statistics[] = { ADD_ETH_STATISTICS(eth_frames_rx_128_to_255B), ADD_ETH_STATISTICS(eth_frames_rx_256_to_511B), ADD_ETH_STATISTICS(eth_frames_rx_512_to_1023B), - ADD_ETH_STATISTICS(eth_frames_rx_1024_to_1535B), - ADD_ETH_STATISTICS(eth_frames_rx_1536_to_2047B), + ADD_ETH_STATISTICS(eth_frames_rx_1024_to_1518B), + ADD_ETH_STATISTICS(eth_frames_rx_1519_to_2047B), ADD_ETH_STATISTICS(eth_frames_rx_2048_to_MAXB), ADD_ETH_STATISTICS(eth_octets_rx), ADD_ETH_STATISTICS(eth_octets_rx_ok), ADD_ETH_STATISTICS(eth_octets_rx_badfcs), ADD_ETH_STATISTICS(eth_octets_rx_dropped), + ADD_ETH_STATISTICS(eth_unsupported_opcodes_rx), }; static void nbl_res_adminq_get_private_stat_len(void *priv, u32 *len) @@ -1953,25 +2294,79 @@ static void nbl_res_adminq_get_private_stat_len(void *priv, u32 *len) *len = ARRAY_SIZE(_eth_statistics); } -static void nbl_res_adminq_get_private_stat_data(void *priv, u32 eth_id, u64 *data) +static void nbl_res_adminq_get_private_stat_data(void *priv, u32 eth_id, u64 *data, u32 data_len) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); struct nbl_chan_send_info chan_send; - int data_length = sizeof(struct nbl_leonis_eth_stats); int ret = 0; NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS, - ð_id, sizeof(eth_id), data, data_length, 1); + ð_id, sizeof(eth_id), data, data_len, 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) dev_err(dev, "adminq get eth %d stats failed ret: %d\n", eth_info->logic_eth_id[eth_id], ret); } +static int nbl_res_adminq_get_eth_ctrl_stats(void *priv, u32 eth_id, + struct nbl_eth_ctrl_stats *eth_ctrl_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_leonis_eth_stats eth_stats = {{0}}; + int data_length = sizeof(struct nbl_leonis_eth_stats); + int ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS, + ð_id, sizeof(eth_id), ð_stats, data_length, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq get eth %d ctrl stats failed ret: %d\n", + eth_info->logic_eth_id[eth_id], ret); + return ret; + } + eth_ctrl_stats->macctrl_frames_txd_ok = eth_stats.tx_stats.macctrl_frames_txd_ok; + eth_ctrl_stats->macctrl_frames_rxd = eth_stats.rx_stats.macctrl_frames_rxd; + eth_ctrl_stats->unsupported_opcodes_rx = eth_stats.rx_stats.unsupported_opcodes_rx; + + return ret; +} + +static int nbl_res_adminq_get_pause_stats(void *priv, u32 eth_id, + struct nbl_pause_stats *pause_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_leonis_eth_stats eth_stats; + int data_length = sizeof(struct nbl_leonis_eth_stats); + int ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS, + ð_id, sizeof(eth_id), (void *)ð_stats, data_length, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq get eth %d pause stats failed ret: %d\n", + eth_info->logic_eth_id[eth_id], ret); + return ret; + } + pause_stats->rx_pause_frames = eth_stats.rx_stats.pause_macctrl_frames_rxd; + pause_stats->tx_pause_frames = eth_stats.tx_stats.pause_macctrl_frames_txd; + + return ret; +} + static void nbl_res_adminq_fill_private_stat_strings(void *priv, u8 *strings) { int i; @@ -1982,37 +2377,70 @@ static void nbl_res_adminq_fill_private_stat_strings(void *priv, u8 *strings) } } -static u32 nbl_convert_temp_type_eeprom_offset(enum nbl_module_temp_type type) +static int +nbl_res_adminq_get_eth_abnormal_stats(void *priv, u32 eth_id, + struct nbl_eth_abnormal_stats *eth_abnormal_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_leonis_eth_stats eth_stats = {{ 0 }}; + int data_length = sizeof(struct nbl_leonis_eth_stats); + int ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS, + ð_id, sizeof(eth_id), (u64 *)ð_stats, data_length, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq get eth %d stats failed ret: %d\n", + eth_info->logic_eth_id[eth_id], ret); + return ret; + } + + eth_abnormal_stats->rx_crc_errors = eth_stats.rx_stats.frames_rxd_badfcs; + eth_abnormal_stats->rx_frame_errors = eth_stats.rx_stats.frames_rxd_misc_error; + eth_abnormal_stats->rx_length_errors = eth_stats.rx_stats.undersize_frames_rxd_goodfcs + + eth_stats.rx_stats.oversize_frames_rxd_goodfcs; + + return 0; +} + +static u32 nbl_convert_temp_type_eeprom_offset(enum nbl_hwmon_type type) { switch (type) { - case NBL_MODULE_TEMP: + case NBL_HWMON_TEMP_INPUT: return SFF_8636_TEMP; - case NBL_MODULE_TEMP_MAX: + case NBL_HWMON_TEMP_MAX: return SFF_8636_TEMP_MAX; - case NBL_MODULE_TEMP_CRIT: + case NBL_HWMON_TEMP_CRIT: return SFF_8636_TEMP_CIRT; default: return SFF_8636_TEMP; } } -static u32 nbl_convert_temp_type_qsfp28_eeprom_offset(enum nbl_module_temp_type type) +static u32 nbl_convert_temp_type_qsfp28_eeprom_offset(enum nbl_hwmon_type type) { switch (type) { - case NBL_MODULE_TEMP: + case NBL_HWMON_TEMP_INPUT: return SFF_8636_QSFP28_TEMP; - case NBL_MODULE_TEMP_MAX: + case NBL_HWMON_TEMP_MAX: return SFF_8636_QSFP28_TEMP_MAX; - case NBL_MODULE_TEMP_CRIT: + case NBL_HWMON_TEMP_CRIT: return SFF_8636_QSFP28_TEMP_CIRT; default: return SFF_8636_QSFP28_TEMP; } } -static int nbl_res_adminq_get_module_temp_common(struct nbl_resource_mgt *res_mgt, u8 eth_id, - enum nbl_module_temp_type type) +/* return value need to convert to Mil degree Celsius(1/1000) */ +static int nbl_res_adminq_get_module_temp_common(void *priv, u8 eth_id, + enum nbl_hwmon_type type) { + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); struct ethtool_modinfo info = {0}; @@ -2040,11 +2468,99 @@ static int nbl_res_adminq_get_module_temp_common(struct nbl_resource_mgt *res_mg return 0; } - return temp; + return temp * 1000; +} + +static int nbl_res_adminq_get_eth_mac_stats(void *priv, u32 eth_id, + struct nbl_eth_mac_stats *eth_mac_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + + struct nbl_leonis_eth_stats eth_stats; + int data_length = sizeof(struct nbl_leonis_eth_stats); + int ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS, + ð_id, sizeof(eth_id), (void *)ð_stats, data_length, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq get eth %d stats failed ret: %d\n", + eth_info->logic_eth_id[eth_id], ret); + return ret; + } + eth_mac_stats->frames_txd_ok = eth_stats.tx_stats.frames_txd_ok; + eth_mac_stats->frames_rxd_ok = eth_stats.rx_stats.frames_rxd_ok; + eth_mac_stats->octets_txd_ok = eth_stats.tx_stats.octets_txd_ok; + eth_mac_stats->octets_rxd_ok = eth_stats.rx_stats.octets_rxd_ok; + eth_mac_stats->multicast_frames_txd_ok = eth_stats.tx_stats.multicast_frames_txd_ok; + eth_mac_stats->broadcast_frames_txd_ok = eth_stats.tx_stats.broadcast_frames_txd_ok; + eth_mac_stats->multicast_frames_rxd_ok = eth_stats.rx_stats.multicast_frames_rxd_ok; + eth_mac_stats->broadcast_frames_rxd_ok = eth_stats.rx_stats.broadcast_frames_rxd_ok; + + return ret; +} + +static int nbl_res_adminq_get_rmon_stats(void *priv, u32 eth_id, + struct nbl_rmon_stats *rmon_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_leonis_eth_stats eth_stats = {{0}}; + int data_length = sizeof(struct nbl_leonis_eth_stats); + u64 *rx = rmon_stats->rmon_rx_range; + u64 *tx = rmon_stats->rmon_tx_range; + int ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS, + ð_id, sizeof(eth_id), (void *)ð_stats, data_length, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq get eth %d rmon stats failed ret: %d\n", + eth_info->logic_eth_id[eth_id], ret); + return ret; + } + rmon_stats->undersize_frames_rxd_goodfcs = + eth_stats.rx_stats.undersize_frames_rxd_goodfcs; + rmon_stats->oversize_frames_rxd_goodfcs = + eth_stats.rx_stats.oversize_frames_rxd_goodfcs; + rmon_stats->undersize_frames_rxd_badfcs = + eth_stats.rx_stats.undersize_frames_rxd_badfcs; + rmon_stats->oversize_frames_rxd_badfcs = + eth_stats.rx_stats.oversize_frames_rxd_badfcs; + + rx[ETHER_STATS_PKTS_64_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange0; + rx[ETHER_STATS_PKTS_65_TO_127_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange1; + rx[ETHER_STATS_PKTS_128_TO_255_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange2; + rx[ETHER_STATS_PKTS_256_TO_511_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange3; + rx[ETHER_STATS_PKTS_512_TO_1023_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange4; + rx[ETHER_STATS_PKTS_1024_TO_1518_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange5; + rx[ETHER_STATS_PKTS_1519_TO_2047_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange6; + rx[ETHER_STATS_PKTS_2048_TO_MAX_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange7; + + tx[ETHER_STATS_PKTS_64_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange0; + tx[ETHER_STATS_PKTS_65_TO_127_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange1; + tx[ETHER_STATS_PKTS_128_TO_255_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange2; + tx[ETHER_STATS_PKTS_256_TO_511_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange3; + tx[ETHER_STATS_PKTS_512_TO_1023_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange4; + tx[ETHER_STATS_PKTS_1024_TO_1518_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange5; + tx[ETHER_STATS_PKTS_1519_TO_2047_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange6; + tx[ETHER_STATS_PKTS_2048_TO_MAX_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange7; + + return ret; } +/* return value need to convert to Mil degree Celsius(1/1000) */ static int nbl_res_adminq_get_module_temp_special(struct nbl_resource_mgt *res_mgt, u8 eth_id, - enum nbl_module_temp_type type) + enum nbl_hwmon_type type) { struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); @@ -2065,18 +2581,18 @@ static int nbl_res_adminq_get_module_temp_special(struct nbl_resource_mgt *res_m } ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, - 0, 0, offset, 1, (u8 *)&temp); + turn_page, 0, offset, 1, (u8 *)&temp); if (ret) { dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", eth_info->logic_eth_id[eth_id], ret); return 0; } - return temp; + return temp * 1000; } static int nbl_res_adminq_get_module_temperature(void *priv, u8 eth_id, - enum nbl_module_temp_type type) + enum nbl_hwmon_type type) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); @@ -2090,7 +2606,7 @@ static int nbl_res_adminq_get_module_temperature(void *priv, u8 eth_id, return nbl_res_adminq_get_module_temp_common(res_mgt, eth_id, type); } -static int nbl_res_adminq_load_p4(void *priv, struct nbl_load_p4_param *p4_param) +static __maybe_unused int nbl_res_adminq_load_p4(void *priv, struct nbl_load_p4_param *p4_param) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); @@ -2123,7 +2639,7 @@ static int nbl_res_adminq_load_p4(void *priv, struct nbl_load_p4_param *p4_param return ret; } -static int nbl_res_adminq_load_p4_default(void *priv) +static __maybe_unused int nbl_res_adminq_load_p4_default(void *priv) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); @@ -2141,6 +2657,23 @@ static int nbl_res_adminq_load_p4_default(void *priv) return ret; } +static void nbl_res_adminq_cfg_eth_bond_event(void *priv, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_event_callback event_callback = {0}; + + event_callback.callback_data = res_mgt; + event_callback.callback = nbl_res_adminq_handle_link_state_update; + + if (enable) + nbl_event_register(NBL_EVENT_LINK_STATE_UPDATE, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + else + nbl_event_unregister(NBL_EVENT_LINK_STATE_UPDATE, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); +} + /* NBL_ADMINQ_SET_OPS(ops_name, func) * * Use X Macros to reduce setup and remove codes. @@ -2159,7 +2692,11 @@ do { \ NBL_ADMINQ_SET_OPS(check_fw_reset, nbl_res_adminq_check_fw_reset); \ NBL_ADMINQ_SET_OPS(get_port_attributes, nbl_res_adminq_get_port_attributes); \ NBL_ADMINQ_SET_OPS(update_ring_num, nbl_res_adminq_update_ring_num); \ + NBL_ADMINQ_SET_OPS(update_rdma_cap, nbl_res_adminq_update_rdma_cap); \ + NBL_ADMINQ_SET_OPS(update_rdma_mem_type, nbl_res_adminq_update_rdma_mem_type); \ + NBL_ADMINQ_SET_OPS(get_rdma_cap_num, nbl_res_adminq_get_rdma_cap_num); \ NBL_ADMINQ_SET_OPS(set_ring_num, nbl_res_adminq_set_ring_num); \ + NBL_ADMINQ_SET_OPS(init_port, nbl_res_adminq_init_port); \ NBL_ADMINQ_SET_OPS(enable_port, nbl_res_adminq_enable_port); \ NBL_ADMINQ_SET_OPS(recv_port_notify, nbl_res_adminq_recv_port_notify); \ NBL_ADMINQ_SET_OPS(set_port_advertising, nbl_res_adminq_set_port_advertising); \ @@ -2167,16 +2704,26 @@ do { \ NBL_ADMINQ_SET_OPS(get_module_info, nbl_res_adminq_get_module_info); \ NBL_ADMINQ_SET_OPS(get_module_eeprom, nbl_res_adminq_get_module_eeprom); \ NBL_ADMINQ_SET_OPS(get_link_state, nbl_res_adminq_get_link_state); \ + NBL_ADMINQ_SET_OPS(get_link_down_count, nbl_res_adminq_get_link_down_count); \ + NBL_ADMINQ_SET_OPS(get_link_status_opcode, nbl_res_adminq_get_link_status_opcode); \ NBL_ADMINQ_SET_OPS(set_eth_mac_addr, nbl_res_adminq_set_eth_mac_addr); \ + NBL_ADMINQ_SET_OPS(get_eth_ctrl_stats, nbl_res_adminq_get_eth_ctrl_stats); \ NBL_ADMINQ_SET_OPS(ctrl_port_led, nbl_res_adminq_ctrl_port_led); \ + NBL_ADMINQ_SET_OPS(set_wol, nbl_res_adminq_set_wol); \ NBL_ADMINQ_SET_OPS(nway_reset, nbl_res_adminq_nway_reset); \ + NBL_ADMINQ_SET_OPS(set_eth_pfc, nbl_res_adminq_set_eth_pfc); \ NBL_ADMINQ_SET_OPS(passthrough_fw_cmd, nbl_res_adminq_passthrough); \ NBL_ADMINQ_SET_OPS(get_private_stat_len, nbl_res_adminq_get_private_stat_len); \ NBL_ADMINQ_SET_OPS(get_private_stat_data, nbl_res_adminq_get_private_stat_data); \ + NBL_ADMINQ_SET_OPS(get_pause_stats, nbl_res_adminq_get_pause_stats); \ + NBL_ADMINQ_SET_OPS(get_eth_mac_stats, nbl_res_adminq_get_eth_mac_stats); \ NBL_ADMINQ_SET_OPS(fill_private_stat_strings, nbl_res_adminq_fill_private_stat_strings);\ NBL_ADMINQ_SET_OPS(get_module_temperature, nbl_res_adminq_get_module_temperature); \ - NBL_ADMINQ_SET_OPS(load_p4, nbl_res_adminq_load_p4); \ NBL_ADMINQ_SET_OPS(load_p4_default, nbl_res_adminq_load_p4_default); \ + NBL_ADMINQ_SET_OPS(cfg_eth_bond_event, nbl_res_adminq_cfg_eth_bond_event); \ + NBL_ADMINQ_SET_OPS(get_eth_abnormal_stats, nbl_res_adminq_get_eth_abnormal_stats); \ + NBL_ADMINQ_SET_OPS(get_fec_stats, nbl_res_adminq_get_fec_stats); \ + NBL_ADMINQ_SET_OPS(get_rmon_stats, nbl_res_adminq_get_rmon_stats); \ } while (0) /* Structure starts here, adding an op should not modify anything below */ @@ -2212,14 +2759,32 @@ static int nbl_res_adminq_chan_notify_link_state_req(struct nbl_resource_mgt *re return chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); } +static int nbl_res_adminq_notify_eth_rep_link_req(struct nbl_resource_mgt *res_mgt, + u16 fid, u8 eth_id, u8 link_state) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_chan_param_eth_rep_notify_link_state param = {0}; + struct nbl_chan_send_info chan_send; + + chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + + param.eth_id = eth_id; + param.link_state = link_state; + NBL_CHAN_SEND(chan_send, fid, NBL_CHAN_MSG_NOTIFY_ETH_REP_LINK_STATE, ¶m, sizeof(param), + NULL, 0, 0); + return chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); +} + static void nbl_res_adminq_notify_link_state(struct nbl_resource_mgt *res_mgt, u8 eth_id, u8 link_state) { struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_resource_mgt_leonis *res_mgt_leonis = (struct nbl_resource_mgt_leonis *)res_mgt; + struct nbl_pmd_status *pmd_status = &res_mgt_leonis->pmd_status; struct nbl_sriov_info *sriov_info; struct nbl_queue_info *queue_info; - u16 pf_fid = 0, vf_fid = 0, link_speed = 0; + u16 pf_fid = 0, vf_fid = 0, bond_link_state = 0, link_speed = 0; int i = 0, j = 0; for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { @@ -2238,6 +2803,11 @@ static void nbl_res_adminq_notify_link_state(struct nbl_resource_mgt *res_mgt, u link_state, eth_info->link_speed[eth_id]); + /* Use bond_link_state for vfs. + * If there is no bond, then it will equals to link_state. + */ + bond_link_state = nbl_res_adminq_get_bond_link_state(res_mgt, eth_id); + /* send eth's link state to pf's all vf */ for (j = 0; j < sriov_info->num_vfs; j++) { vf_fid = sriov_info->start_vf_func_id + j; @@ -2245,11 +2815,17 @@ static void nbl_res_adminq_notify_link_state(struct nbl_resource_mgt *res_mgt, u if (queue_info->num_txrx_queues) { link_speed = eth_info->link_speed[eth_id]; nbl_res_adminq_chan_notify_link_state_req(res_mgt, vf_fid, - link_state, + bond_link_state, link_speed); } } } + + if (pmd_status->upcall_port_info.upcall_port_active) { + nbl_res_adminq_notify_eth_rep_link_req(res_mgt, + pmd_status->upcall_port_info.func_id, + eth_id, link_state); + } } static void nbl_res_adminq_eth_task(struct work_struct *work) @@ -2260,59 +2836,21 @@ static void nbl_res_adminq_eth_task(struct work_struct *work) struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); u8 eth_id = 0; u8 port_max_rate = 0; - u32 port_max_speed = 0; - u8 active_fec = 0; - u8 autoneg = 0; for (eth_id = 0 ; eth_id < NBL_MAX_ETHERNET; eth_id++) { if (adminq_mgt->module_inplace_changed[eth_id]) { /* module not-inplace, transitions to inplace status */ - /* read module register and set speed, */ - /* set fec mode: 10G default OFF, 25G default RS */ + /* read module register */ port_max_rate = nbl_res_adminq_get_module_bitrate(res_mgt, eth_id); - switch (port_max_rate) { - case NBL_PORT_MAX_RATE_1G: - port_max_speed = SPEED_1000; - active_fec = NBL_ETH_1G_DEFAULT_FEC_MODE; - break; - case NBL_PORT_MAX_RATE_10G: - port_max_speed = SPEED_10000; - active_fec = NBL_ETH_10G_DEFAULT_FEC_MODE; - break; - case NBL_PORT_MAX_RATE_25G: - port_max_speed = SPEED_25000; - active_fec = NBL_ETH_25G_DEFAULT_FEC_MODE; - break; - case NBL_PORT_MAX_RATE_100G: - case NBL_PORT_MAX_RATE_100G_PAM4: - port_max_speed = SPEED_100000; - active_fec = NBL_ETH_100G_DEFAULT_FEC_MODE; - break; - default: - /* default set 25G */ - port_max_speed = SPEED_25000; - active_fec = NBL_ETH_25G_DEFAULT_FEC_MODE; - break; - } eth_info->port_max_rate[eth_id] = port_max_rate; eth_info->port_type[eth_id] = nbl_res_adminq_get_port_type(res_mgt, eth_id); + eth_info->module_repluged[eth_id] = 1; /* cooper support auto-negotiation */ - if (eth_info->port_type[eth_id] == NBL_PORT_TYPE_COPPER) { - if (port_max_speed >= SPEED_25000) - autoneg = 1; - else - autoneg = 0; /* disable autoneg when 10G module pluged */ - + if (eth_info->port_type[eth_id] == NBL_PORT_TYPE_COPPER) eth_info->port_caps[eth_id] |= BIT(NBL_PORT_CAP_AUTONEG); - } else { - autoneg = 0; + else eth_info->port_caps[eth_id] &= ~BIT_MASK(NBL_PORT_CAP_AUTONEG); - } - - /* when optical module plug in, we must set default fec */ - nbl_res_adminq_set_eth_speed(res_mgt, eth_id, port_max_speed, - active_fec, autoneg); adminq_mgt->module_inplace_changed[eth_id] = 0; } @@ -2348,10 +2886,9 @@ static int nbl_res_adminq_setup_cmd_filter(struct nbl_resource_mgt *res_mgt) static void nbl_res_adminq_remove_cmd_filter(struct nbl_resource_mgt *res_mgt) { struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); - struct nbl_hash_tbl_del_key del_key = {0}; if (adminq_mgt->cmd_filter) - nbl_common_remove_hash_table(adminq_mgt->cmd_filter, &del_key); + nbl_common_remove_hash_table(adminq_mgt->cmd_filter, NULL); adminq_mgt->cmd_filter = NULL; } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.h index 9cd868075827770420d21069867f16d3c6765aed..20e1291020595941aabbe084099d33b2962655ba 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.h @@ -81,7 +81,8 @@ #define SFF_COPPER_8431_APPENDIX_E 1 #define SFF_COPPER_8431_LIMITING 4 #define SFF_8636_TURNPAGE_ADDR (127) -#define SFF_8638_PAGESIZE (128) +#define SFF_8638_PAGESIZE (128U) +#define SFF_8638_PAGE0_SIZE (256U) #define SFF_8636_TEMP (0x60) #define SFF_8636_TEMP_MAX (0x4) @@ -91,6 +92,8 @@ #define SFF_8636_QSFP28_TEMP_MAX (0x204) #define SFF_8636_QSFP28_TEMP_CIRT (0x200) +#define NBL_ADMINQ_ETH_WOL_REG_OFFSET (0x1604000 + 0x500) + /* Firmware version */ #define FIRMWARE_MAGIC "M181FWV0" #define BCD2BYTE(b) ({ typeof(b) _b = (b); \ @@ -100,10 +103,16 @@ (((_s) >> 8) & 0xF) * 100 + (((_s) >> 12) & 0xF) * 1000); }) /* VSI fixed number of queues*/ -#define NBL_VSI_PF_REAL_QUEUE_NUM(num) (((num) * 2) + NBL_DEFAULT_REP_HW_QUEUE_NUM) +#define NBL_VSI_PF_LEGAL_QUEUE_NUM(num) ((num) + NBL_DEFAULT_REP_HW_QUEUE_NUM) +#define NBL_VSI_PF_MAX_QUEUE_NUM(num) (((num) * 2) + NBL_DEFAULT_REP_HW_QUEUE_NUM) #define NBL_VSI_VF_REAL_QUEUE_NUM(num) (num) -#define NBL_ADMINQ_PFA_TLV_PFVF_RING_ID (0x5805) +#define NBL_ADMINQ_PFA_TLV_VF_NUM (0x5804) +#define NBL_ADMINQ_PFA_TLV_NET_RING_NUM (0x5805) +#define NBL_ADMINQ_PFA_TLV_REP_RING_NUM (0x5806) +#define NBL_ADMINQ_PFA_TLV_ECPU_RING_NUM (0x5807) +#define NBL_ADMINQ_PFA_TLV_RDMA_CAP (0x5808) +#define NBL_ADMINQ_PFA_TLV_RDMA_MEM_TYPE (0x5809) enum { NBL_FW_VERSION_BANK0 = 0, @@ -206,6 +215,7 @@ struct nbl_leonis_eth_rx_stats { u64 octets_rxd_ok; u64 octets_rxd_badfcs; u64 octets_rxd_dropped; + u64 unsupported_opcodes_rx; }; struct nbl_leonis_eth_stats { @@ -217,4 +227,36 @@ struct nbl_leonis_eth_stats_info { const char *descp; }; +struct nbl_port_key { + u32 id; /* port id */ + u32 subop; /* 1: read, 2: write */ + u64 data[]; /* [47:0]: data, [55:48]: rsvd, [63:56]: key */ +}; + +#define NBL_PORT_KEY_ILLEGAL 0x0 +#define NBL_PORT_KEY_CAPABILITIES 0x1 +#define NBL_PORT_KEY_ENABLE 0x2 /* BIT(0): NBL_PORT_FLAG_ENABLE_NOTIFY */ +#define NBL_PORT_KEY_DISABLE 0x3 +#define NBL_PORT_KEY_ADVERT 0x4 +#define NBL_PORT_KEY_LOOPBACK 0x5 /* 0: disable eth loopback, 1: enable eth loopback */ +#define NBL_PORT_KEY_MODULE_SWITCH 0x6 /* 0: sfp off, 1: sfp on */ +#define NBL_PORT_KEY_MAC_ADDRESS 0x7 +#define NBL_PORT_KEY_LED_BLINK 0x8 +#define NBL_PORT_KEY_RESTORE_DEFAULTE_CFG 11 +#define NBL_PORT_KEY_SET_PFC_CFG 12 +#define NBL_PORT_KEY_GET_LINK_STATUS_OPCODE 17 + +enum { + NBL_PORT_SUBOP_READ = 1, + NBL_PORT_SUBOP_WRITE = 2, +}; + +#define NBL_PORT_FLAG_ENABLE_NOTIFY BIT(0) +#define NBL_PORT_ENABLE_LOOPBACK 1 +#define NBL_PORT_DISABLE_LOOPBCK 0 +#define NBL_PORT_SFP_ON 1 +#define NBL_PORT_SFP_OFF 0 +#define NBL_PORT_KEY_KEY_SHIFT 56 +#define NBL_PORT_KEY_DATA_MASK 0xFFFFFFFFFFFF + #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.c new file mode 100644 index 0000000000000000000000000000000000000000..1d37723dbd062217d9c1aa4370e5c8fc53eb30eb --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.c @@ -0,0 +1,407 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_fc.h" + +static void nbl_fc_update_stats(__maybe_unused struct flow_stats *flow_stats, + __maybe_unused u64 bytes, __maybe_unused u64 pkts, + __maybe_unused u64 drops, __maybe_unused u64 lastused) +{ + flow_stats_update(flow_stats, bytes, pkts, drops, lastused, + FLOW_ACTION_HW_STATS_DELAYED); +} + +static int nbl_fc_get_stats(void *priv, struct nbl_stats_param *param) +{ + int idx; + int i; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_fc_mgt *mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u64 pkts = 0; + u64 bytes = 0; + struct nbl_flow_counter *counter = NULL; + unsigned long cookie = param->f->cookie; + struct nbl_index_key_extra extra_key; + + if (phy_ops->get_hw_status(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)) == NBL_HW_FATAL_ERR) + return -EIO; + + mgt = NBL_RES_MGT_TO_COUNTER_MGT(res_mgt); + if (!mgt) { + nbl_err(common, NBL_DEBUG_FLOW, "nbl flow fc not been init."); + return -EPERM; + } + + spin_lock(&mgt->counter_lock); + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, 0, 0, true); + for (i = 0; i < NBL_FC_TYPE_MAX; i++) { + idx = nbl_common_get_index_with_data(mgt->cls_cookie_tbl[i], &cookie, &extra_key, + NULL, 0, (void **)&counter); + if (idx != U32_MAX) + break; + } + + if (!counter || i >= NBL_FC_TYPE_MAX) { + spin_unlock(&mgt->counter_lock); + return -EINVAL; + } + + if (i == NBL_FC_SPEC_TYPE) + mgt->fc_ops.get_spec_stats(counter, &pkts, &bytes); + else + mgt->fc_ops.get_flow_stats(counter, &pkts, &bytes); + + counter->lastpackets = counter->cache.packets; + counter->lastbytes = counter->cache.bytes; + + nbl_fc_update_stats(¶m->f->stats, bytes, pkts, 0, counter->lastuse); + + spin_unlock(&mgt->counter_lock); + + return 0; +} + +static void flow_counter_update(struct nbl_fc_mgt *mgt, enum nbl_pp_fc_type fc_type) +{ + u32 idx = 0; + u32 flow_num = 0; + u32 i = 0; + struct nbl_flow_counter *iter_counter = NULL; + struct nbl_flow_query_counter counter_array; + struct list_head *counter_list; + + memset(&counter_array, 0, sizeof(counter_array)); + + if (fc_type == NBL_FC_COMMON_TYPE) + counter_list = &mgt->counter_hash_list; + else + counter_list = &mgt->counter_stat_hash_list; + + spin_lock(&mgt->counter_lock); + list_for_each_entry(iter_counter, counter_list, entries) { + mgt->counter_update_list[idx].counter_id = iter_counter->counter_id; + mgt->counter_update_list[idx].cookie = iter_counter->cookie; + idx++; + } + spin_unlock(&mgt->counter_lock); + /* using command queue */ + for (i = 0; i < idx; i++) { + counter_array.counter_id[flow_num] = mgt->counter_update_list[i].counter_id; + counter_array.cookie[flow_num] = mgt->counter_update_list[i].cookie; + ++flow_num; + + /* send bluk of cmdqueue query */ + if (flow_num == NBL_FLOW_COUNT_NUM) { + mgt->fc_ops.update_stats(mgt, &counter_array, flow_num, 0, fc_type); + flow_num = 0; + } + } + + if (flow_num) { + mgt->fc_ops.update_stats(mgt, &counter_array, flow_num, 0, fc_type); + flow_num = 0; + } + + nbl_debug(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc start update counter type %d, all=%u", + fc_type, idx); +} + +static void nbl_fc_stats_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct nbl_fc_mgt *mgt = container_of(delayed_work, + struct nbl_fc_mgt, counter_work); + unsigned long now = jiffies; + + if (!list_empty(&mgt->counter_hash_list) || !list_empty(&mgt->counter_stat_hash_list)) + queue_delayed_work(mgt->counter_wq, &mgt->counter_work, mgt->query_interval); + + /* no need too much overhead in counter update */ + if (time_before(now, mgt->next_query)) + return; + flow_counter_update(mgt, NBL_FC_COMMON_TYPE); + flow_counter_update(mgt, NBL_FC_SPEC_TYPE); + mgt->next_query = now + mgt->query_interval; +} + +static void nbl_fc_free_res(struct nbl_fc_mgt *mgt) +{ + int i; + + kfree(mgt->counter_update_list); + mgt->counter_update_list = NULL; + + for (i = 0; i < NBL_FC_TYPE_MAX; i++) { + nbl_common_remove_index_table(mgt->cls_cookie_tbl[i], NULL); + mgt->cls_cookie_tbl[i] = NULL; + } +} + +static int nbl_fc_init_hash_map(struct nbl_fc_mgt *mgt) +{ + int i; + u32 idx_num[NBL_FC_TYPE_MAX] = {NBL_COUNTER_MAX_ID, NBL_COUNTER_MAX_STAT_ID}; + struct nbl_index_tbl_key tbl_key; + + mgt->counter_update_list = kcalloc(NBL_COUNTER_MAX_ID, sizeof(*mgt->counter_update_list), + GFP_KERNEL); + if (!mgt->counter_update_list) + goto alloc_counter_list_failed; + + for (i = 0; i < NBL_FC_TYPE_MAX; i++) { + NBL_INDEX_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(mgt->common), 0, + idx_num[i], sizeof(unsigned long)); + mgt->cls_cookie_tbl[i] = nbl_common_init_index_table(&tbl_key); + if (!mgt->cls_cookie_tbl[i]) + goto alloc_index_tbl_failed; + } + + return 0; + +alloc_index_tbl_failed: +alloc_counter_list_failed: + return -ENOMEM; +} + +/* NBL_COUNTER_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_COUNTER_OPS_TBL \ +do { \ + NBL_COUNTER_SET_OPS(query_tc_stats, nbl_fc_get_stats); \ +} while (0) + +static void nbl_fc_remove_mgt(struct device *dev, struct nbl_fc_mgt **fc_mgt) +{ + devm_kfree(dev, *fc_mgt); + *fc_mgt = NULL; +} + +int nbl_fc_set_stats(struct nbl_fc_mgt *mgt, void *data, unsigned long cookie) +{ + int ret = 0; + int i; + int idx; + struct nbl_stats_data *data_info = (struct nbl_stats_data *)data; + struct nbl_flow_counter *counter_node = NULL; + struct nbl_index_key_extra extra_key; + + spin_lock(&mgt->counter_lock); + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, 0, 0, true); + for (i = 0; i < NBL_FC_TYPE_MAX; i++) { + idx = nbl_common_get_index_with_data(mgt->cls_cookie_tbl[i], &cookie, &extra_key, + NULL, 0, (void **)&counter_node); + if (idx != U32_MAX) + break; + } + + if (!counter_node) { + nbl_debug(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc cookie %lu is not exist now", + cookie); + ret = -ENOKEY; + goto counter_rte_hash_lookup_err; + } + + if (data_info->packets != counter_node->cache.packets) { + counter_node->cache.packets = data_info->packets; + counter_node->cache.bytes = data_info->bytes; + counter_node->lastuse = jiffies; + } + + spin_unlock(&mgt->counter_lock); + return 0; + +counter_rte_hash_lookup_err: + spin_unlock(&mgt->counter_lock); + return ret; +} + +int nbl_fc_setup_mgt(struct device *dev, struct nbl_fc_mgt **fc_mgt) +{ + struct nbl_fc_mgt *mgt; + *fc_mgt = devm_kzalloc(dev, sizeof(struct nbl_fc_mgt), GFP_KERNEL); + + mgt = *fc_mgt; + if (!mgt) + return -ENOMEM; + + spin_lock_init(&mgt->counter_lock); + INIT_LIST_HEAD(&mgt->counter_hash_list); + INIT_LIST_HEAD(&mgt->counter_stat_hash_list); + mgt->query_interval = NBL_COUNTER_PERIOD_INTERVAL; + + return 0; +} + +int nbl_fc_add_stats(void *priv, enum nbl_pp_fc_type fc_type, unsigned long cookie) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_fc_mgt *fc_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_flow_counter *counter_node; + struct list_head *counter_list; + int ret = 0; + int idx = 0; + struct nbl_flow_counter counter_data; + struct nbl_index_key_extra extra_key; + + fc_mgt = NBL_RES_MGT_TO_COUNTER_MGT(res_mgt); + if (!fc_mgt) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl flow fc add failed: counter not init."); + return -EINVAL; + } + + spin_lock(&fc_mgt->counter_lock); + if (fc_type == NBL_FC_COMMON_TYPE) + counter_list = &fc_mgt->counter_hash_list; + else + counter_list = &fc_mgt->counter_stat_hash_list; + + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, 0, 0, true); + idx = nbl_common_get_index_with_data(fc_mgt->cls_cookie_tbl[fc_type], &cookie, &extra_key, + NULL, 0, (void **)&counter_node); + if (idx != U32_MAX) { + nbl_err(common, NBL_DEBUG_FLOW, "nbl flow fc add failed: cookie exist(%lu-%d)\n", + cookie, fc_type); + ret = -EEXIST; + goto add_counter_failed; + } + + memset(&counter_data, 0, sizeof(counter_data)); + counter_data.cookie = cookie; + idx = nbl_common_alloc_index(fc_mgt->cls_cookie_tbl[fc_type], &cookie, NULL, &counter_data, + sizeof(counter_data), (void **)&counter_node); + if (idx == U32_MAX) + goto add_counter_failed; + + counter_node->counter_id = (u32)idx; + list_add(&counter_node->entries, counter_list); + + /* wake up update worker */ + mod_delayed_work(fc_mgt->counter_wq, &fc_mgt->counter_work, 0); + nbl_debug(common, NBL_DEBUG_FLOW, "nbl flow fc add counter(%u-%lu-%d) success\n", + idx, cookie, fc_type); + ret = (int)idx; + +add_counter_failed: + spin_unlock(&fc_mgt->counter_lock); + return ret; +} + +int nbl_fc_del_stats(void *priv, unsigned long cookie) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_fc_mgt *fc_mgt; + struct nbl_flow_counter *counter_node = NULL; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct list_head *counter_list; + int ret = 0; + int idx; + int i; + struct nbl_flow_query_counter counter_array; + struct nbl_index_key_extra extra_key; + + fc_mgt = NBL_RES_MGT_TO_COUNTER_MGT(res_mgt); + if (!fc_mgt) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl flow fc del failed: counter not init."); + return -EINVAL; + } + + memset(&counter_array, 0, sizeof(counter_array)); + + spin_lock(&fc_mgt->counter_lock); + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, 0, 0, true); + for (i = 0; i < NBL_FC_TYPE_MAX; i++) { + idx = nbl_common_get_index_with_data(fc_mgt->cls_cookie_tbl[i], &cookie, + &extra_key, NULL, 0, (void **)&counter_node); + if (idx != U32_MAX) + break; + } + + if (!counter_node || i >= NBL_FC_TYPE_MAX) { + nbl_debug(common, NBL_DEBUG_FLOW, "nbl flow fc del key(%lu) not exist", cookie); + ret = -ENOKEY; + goto del_counter_failed; + } + + if (i == NBL_FC_COMMON_TYPE) + counter_list = &fc_mgt->counter_hash_list; + else + counter_list = &fc_mgt->counter_stat_hash_list; + + counter_array.counter_id[0] = idx; + counter_array.cookie[0] = cookie; + fc_mgt->fc_ops.update_stats(fc_mgt, &counter_array, 1, 1, i); + list_del(&counter_node->entries); + nbl_common_free_index(fc_mgt->cls_cookie_tbl[i], &cookie); + nbl_debug(common, NBL_DEBUG_FLOW, "nbl flow fc del counter(%lu-%d) success\n", cookie, i); +del_counter_failed: + spin_unlock(&fc_mgt->counter_lock); + return ret; +} + +int nbl_fc_mgt_start(struct nbl_fc_mgt *mgt) +{ + int ret = -ENOMEM; + + mgt->counter_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, "nbl_fc_wq"); + if (!mgt->counter_wq) + goto init_counter_fail; + + ret = nbl_fc_init_hash_map(mgt); + if (ret) + goto init_counter_fail; + + INIT_DELAYED_WORK(&mgt->counter_work, nbl_fc_stats_work); + queue_delayed_work(mgt->counter_wq, &mgt->counter_work, mgt->query_interval); + + nbl_info(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc init success in tc mode"); + return 0; + +init_counter_fail: + nbl_err(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc init failed in tc mode"); + return ret; +} + +void nbl_fc_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_fc_mgt **fc_mgt; + struct nbl_fc_mgt *mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + fc_mgt = &NBL_RES_MGT_TO_COUNTER_MGT(res_mgt); + mgt = (*fc_mgt); + if (!mgt) + return; + + cancel_delayed_work_sync(&mgt->counter_work); + destroy_workqueue(mgt->counter_wq); + nbl_fc_free_res(mgt); + nbl_fc_remove_mgt(dev, fc_mgt); + nbl_info(common, NBL_DEBUG_FLOW, "nbl flow fc deinit success in tc mode"); +} + +int nbl_fc_setup_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_COUNTER_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_COUNTER_OPS_TBL; +#undef NBL_COUNTER_SET_OPS + + return 0; +} + +void nbl_fc_remove_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_COUNTER_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_COUNTER_OPS_TBL; +#undef NBL_COUNTER_SET_OPS +} + diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.h new file mode 100644 index 0000000000000000000000000000000000000000..ee63feb5e3377dc39ea212ca0ed13ac5cb5907d2 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_FC_H_ +#define _NBL_FC_H_ + +#include "nbl_resource.h" + +#define NBL_COUNTER_PERIOD_INTERVAL msecs_to_jiffies(3000) + +#define NBL_FLOW_STAT_CLR_OFT (3) +#define NBL_FLOW_STAT_NUM_MASK (0x7) + +#define NBL_CMDQ_ACL_STAT_BASE_LEN 32 + +struct nbl_stats_data { + u32 flow_id; + u64 bytes; + u64 packets; +}; + +int nbl_fc_add_stats(void *priv, enum nbl_pp_fc_type fc_type, unsigned long cookie); +int nbl_fc_del_stats(void *priv, unsigned long cookie); +int nbl_fc_setup_ops(struct nbl_resource_ops *res_ops); +void nbl_fc_remove_ops(struct nbl_resource_ops *res_ops); +int nbl_fc_mgt_start(struct nbl_fc_mgt *mgt); +void nbl_fc_mgt_stop(struct nbl_resource_mgt *res_mgt); +int nbl_fc_setup_mgt(struct device *dev, struct nbl_fc_mgt **fc_mgt); +int nbl_fc_set_stats(struct nbl_fc_mgt *mgt, void *data, unsigned long cookie); +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.c new file mode 100644 index 0000000000000000000000000000000000000000..91a5a65ac63bf1529a3d21cb141d20e0e3308a51 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.c @@ -0,0 +1,1041 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_fd.h" +#include "nbl_p4_actions.h" + +struct nbl_fd_get_tlv_udf { + u64 val; + u64 mask; + bool valid; +}; + +struct nbl_fd_tcam_default_entry { + union nbl_fd_tcam_default_data_u data; + union nbl_fd_tcam_default_data_u mask; + struct nbl_flow_direct_entry *entry; + u32 action; +}; + +static int nbl_fd_get_profile_id(enum nbl_chan_fdir_flow_type type, u8 mode) +{ + switch (type) { + case NBL_CHAN_FDIR_FLOW_TCP_IPv4: + case NBL_CHAN_FDIR_FLOW_UDP_IPv4: + case NBL_CHAN_FDIR_FLOW_IPv4: + return mode == NBL_FD_MODE_DEFAULT ? NBL_FD_PROFILE_DEFAULT : NBL_FD_PROFILE_IPV4; + case NBL_CHAN_FDIR_FLOW_TCP_IPv6: + case NBL_CHAN_FDIR_FLOW_UDP_IPv6: + case NBL_CHAN_FDIR_FLOW_IPv6: + case NBL_CHAN_FDIR_FLOW_ETHER: + case NBL_CHAN_FDIR_FLOW_FULL: + return mode == NBL_FD_MODE_DEFAULT ? NBL_FD_PROFILE_DEFAULT : + NBL_FD_PROFILE_L2_IPV6; + default: + break; + }; + + return -1; +} + +static struct nbl_flow_direct_entry *nbl_fd_find_flow(struct nbl_flow_direct_info *info, + enum nbl_chan_fdir_rule_type rule_type, + u32 loc) +{ + struct nbl_flow_direct_entry *entry = NULL; + + if (rule_type >= NBL_CHAN_FDIR_RULE_MAX) + return NULL; + + list_for_each_entry(entry, &info->list[rule_type], node) + if (entry->param.location == loc) + return entry; + + return NULL; +} + +static int nbl_fd_get_udf(u16 type, u16 length, u8 *val, void *data) +{ + struct nbl_fd_get_tlv_udf *udf = (struct nbl_fd_get_tlv_udf *)data; + + if (type != NBL_CHAN_FDIR_KEY_UDF) + return 0; + + udf->valid = 1; + udf->val = *(u64 *)val; + udf->mask = *(u64 *)(val + 8); + + return 1; +} + +static u16 nbl_fd_get_flow_layer(enum nbl_chan_fdir_flow_type type) +{ + switch (type) { + case NBL_CHAN_FDIR_FLOW_ETHER: + return 0; + case NBL_CHAN_FDIR_FLOW_IPv4: + case NBL_CHAN_FDIR_FLOW_IPv6: + return 1; + case NBL_CHAN_FDIR_FLOW_TCP_IPv4: + case NBL_CHAN_FDIR_FLOW_TCP_IPv6: + case NBL_CHAN_FDIR_FLOW_UDP_IPv4: + case NBL_CHAN_FDIR_FLOW_UDP_IPv6: + case NBL_CHAN_FDIR_FLOW_FULL: + default: + return 2; + } +} + +static int nbl_fd_validate_rule(struct nbl_flow_direct_mgt *fd_mgt, + struct nbl_chan_param_fdir_replace *param, + struct nbl_flow_direct_entry *entry) +{ + struct nbl_fd_get_tlv_udf udf = {0}; + int pid = -1; + u16 udf_offset; + u16 udf_layer; + bool rule_udf = false; + u8 *tlv; + + if (param->rule_type >= NBL_CHAN_FDIR_RULE_MAX) + return -EINVAL; + + tlv = (u8 *)param + param->base_length; + nbl_flow_direct_parse_tlv_data(tlv, param->tlv_length, nbl_fd_get_udf, &udf); + if (udf.valid) { + udf_offset = (udf.val & NBL_FD_UDF_FLEX_OFFS_M) >> NBL_FD_UDF_FLEX_OFFS_S; + udf_layer = nbl_fd_get_flow_layer(param->flow_type); + + if (entry) + rule_udf = entry->udf; + + /* Offset must be the same for all rules */ + if (fd_mgt->udf_cnt > 0 && + (fd_mgt->udf_offset != udf_offset || fd_mgt->udf_layer != udf_layer) && + (fd_mgt->udf_cnt != 1 || !rule_udf)) + return -EINVAL; + + if (udf_offset > 52) + return -EINVAL; + + /* For offset, we don't support mask */ + if (((udf.mask & NBL_FD_UDF_FLEX_OFFS_M) >> NBL_FD_UDF_FLEX_OFFS_S) != 0xFFFFFFFF) + return -EINVAL; + } + + /* replace rule not check cnt current, alway keep full mode */ + if (entry) + return 0; + + pid = nbl_fd_get_profile_id(param->flow_type, fd_mgt->mode); + switch (pid) { + case NBL_FD_PROFILE_DEFAULT: + if (fd_mgt->cnt[NBL_FD_PROFILE_DEFAULT] >= NBL_FD_RULE_MAX_512) + return -EINVAL; + break; + case NBL_FD_PROFILE_IPV4: + if (fd_mgt->mode == NBL_FD_MODE_LITE && + fd_mgt->cnt[NBL_FD_PROFILE_IPV4] >= NBL_FD_RULE_MAX_1536) + return -EINVAL; + if (fd_mgt->mode == NBL_FD_MODE_FULL && + fd_mgt->cnt[NBL_FD_PROFILE_IPV4] >= NBL_FD_RULE_MAX_512 && + fd_mgt->cnt[NBL_FD_PROFILE_L2_IPV6] > 0) + return -EINVAL; + break; + case NBL_FD_PROFILE_L2_IPV6: + /* We will always try to change the mode to FULL, so if we are in LITE now, + * then don't support any IPV6 rules whatsoever. + */ + if (fd_mgt->mode == NBL_FD_MODE_LITE || + fd_mgt->cnt[NBL_FD_PROFILE_L2_IPV6] >= NBL_FD_RULE_MAX_512) + return -EINVAL; + break; + default: + return -EINVAL; + } + + return 0; +} + +static struct nbl_flow_direct_entry *nbl_fd_add_flow(struct nbl_flow_direct_mgt *fd_mgt, + struct nbl_flow_direct_info *info, + struct nbl_chan_param_fdir_replace *param) +{ + struct nbl_flow_direct_entry *entry = NULL, *next = NULL; + struct nbl_fd_get_tlv_udf udf = {0}; + u8 pid; + + pid = nbl_fd_get_profile_id(param->flow_type, fd_mgt->mode); + if (pid > NBL_FD_PROFILE_MAX) + return NULL; + + entry = kzalloc(sizeof(*entry) + param->tlv_length, GFP_KERNEL); + if (!entry) + return NULL; + + entry->pid = pid; + memcpy(&entry->param, param, min_t(u32, sizeof(entry->param), param->base_length)); + memcpy(entry->param.tlv, ((u8 *)param + param->base_length), param->tlv_length); + entry->param.base_length = sizeof(entry->param); + + /* Maintain order */ + if (param->order) { + list_for_each_entry(next, &info->list[param->rule_type], node) + if (next->param.location >= entry->param.location) + break; + + if (nbl_list_entry_is_head(next, &info->list[param->rule_type], node)) + list_add(&entry->node, &info->list[param->rule_type]); + else + list_add(&entry->node, &list_prev_entry(next, node)->node); + } else { + list_add_tail(&entry->node, &info->list[param->rule_type]); + } + + info->cnt[param->rule_type]++; + fd_mgt->cnt[entry->pid]++; + + /* We have judged the capacity in validation, so we shouldn't have any trouble now. */ + if (fd_mgt->mode == NBL_FD_MODE_FULL && + fd_mgt->cnt[NBL_FD_PROFILE_IPV4] > NBL_FD_RULE_MAX_512) + fd_mgt->mode = NBL_FD_MODE_LITE; + + nbl_flow_direct_parse_tlv_data(param->tlv, param->tlv_length, nbl_fd_get_udf, &udf); + if (udf.valid) { + entry->udf = 1; + fd_mgt->udf_offset = (udf.val & NBL_FD_UDF_FLEX_OFFS_M) >> NBL_FD_UDF_FLEX_OFFS_S; + fd_mgt->udf_cnt++; + fd_mgt->udf_layer = nbl_fd_get_flow_layer(param->flow_type); + } + + return entry; +} + +static void nbl_fd_del_flow(struct nbl_flow_direct_mgt *fd_mgt, + struct nbl_flow_direct_info *info, + struct nbl_flow_direct_entry *entry) +{ + info->cnt[entry->param.rule_type]--; + fd_mgt->cnt[entry->pid]--; + + if (entry->udf) + fd_mgt->udf_cnt--; + + if (fd_mgt->mode == NBL_FD_MODE_LITE && + fd_mgt->cnt[NBL_FD_PROFILE_IPV4] <= NBL_FD_RULE_MAX_512) + fd_mgt->mode = NBL_FD_MODE_FULL; + + list_del(&entry->node); + kfree(entry); +} + +static int nbl_fd_find_and_del_flow(struct nbl_flow_direct_mgt *fd_mgt, + struct nbl_flow_direct_info *info, + enum nbl_chan_fdir_rule_type rule_type, + u32 loc) +{ + struct nbl_flow_direct_entry *entry = nbl_fd_find_flow(info, rule_type, loc); + + if (!entry) + return -ENOENT; + + nbl_fd_del_flow(fd_mgt, info, entry); + + return 0; +} + +static void nbl_fd_del_flow_all(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_flow_direct_entry *entry = NULL, *entry_safe = NULL; + int i = 0, j; + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) + for (j = 0; j < NBL_CHAN_FDIR_RULE_MAX; j++) + list_for_each_entry_safe(entry, entry_safe, &fd_mgt->info[i].list[j], node) + nbl_fd_del_flow(fd_mgt, &fd_mgt->info[i], entry); +} + +static int nbl_fd_setup_tcam_cfg(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + switch (fd_mgt->mode) { + case NBL_FD_MODE_DEFAULT: + phy_ops->set_fd_tcam_cfg_default(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + break; + case NBL_FD_MODE_FULL: + phy_ops->set_fd_tcam_cfg_full(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + break; + case NBL_FD_MODE_LITE: + phy_ops->set_fd_tcam_cfg_lite(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + break; + default: + return -EINVAL; + } + + if (fd_mgt->udf_cnt) + phy_ops->set_fd_udf(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + (u8)fd_mgt->udf_layer, + (u8)fd_mgt->udf_offset); + else + phy_ops->clear_fd_udf(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + return 0; +} + +static int nbl_fd_config_default_key_action(u16 type, u16 length, u8 *val, void *data) +{ + struct nbl_fd_tcam_default_entry *tcam_entry = (struct nbl_fd_tcam_default_entry *)data; + union nbl_fd_tcam_default_data_u *tcam_data = &tcam_entry->data; + union nbl_fd_tcam_default_data_u *tcam_mask = &tcam_entry->mask; + struct nbl_flow_direct_entry *entry = tcam_entry->entry; + union nbl_action_data action = {{0}}; + u8 reverse_mac[ETH_ALEN]; + u64 temp, mask; + u32 offset, udf_data, udf_mask; + + switch (type) { + case NBL_CHAN_FDIR_KEY_SRC_MAC: + nbl_convert_mac(val, reverse_mac); + ether_addr_copy((u8 *)&temp, reverse_mac); + tcam_data->info.src_mac = temp; + nbl_convert_mac(val + ETH_ALEN, reverse_mac); + ether_addr_copy((u8 *)&temp, reverse_mac); + tcam_mask->info.src_mac = temp; + break; + case NBL_CHAN_FDIR_KEY_DST_MAC: + nbl_convert_mac(val, reverse_mac); + ether_addr_copy((u8 *)&temp, reverse_mac); + tcam_data->info.dst_mac = temp; + nbl_convert_mac(val + ETH_ALEN, reverse_mac); + ether_addr_copy((u8 *)&temp, reverse_mac); + tcam_mask->info.dst_mac = temp; + break; + case NBL_CHAN_FDIR_KEY_PROTO: + tcam_data->info.ethertype = be16_to_cpu(*(u16 *)val); + tcam_mask->info.ethertype = be16_to_cpu(*(u16 *)(val + 2)); + break; + case NBL_CHAN_FDIR_KEY_SRC_IPv4: + tcam_data->info.sip_l = be32_to_cpu(*(u32 *)val); + tcam_mask->info.sip_l = be32_to_cpu(*(u32 *)(val + 4)); + break; + case NBL_CHAN_FDIR_KEY_DST_IPv4: + tcam_data->info.dip_l = be32_to_cpu(*(u32 *)val); + tcam_mask->info.dip_l = be32_to_cpu(*(u32 *)(val + 4)); + break; + case NBL_CHAN_FDIR_KEY_L4PROTO: + tcam_data->info.l4_proto = *val; + tcam_mask->info.l4_proto = *(val + 1); + break; + case NBL_CHAN_FDIR_KEY_SRC_IPv6: + tcam_data->info.sip_l = be64_to_cpu(*((u64 *)val + 1)); + tcam_mask->info.sip_l = be64_to_cpu(*((u64 *)val + 3)); + tcam_data->info.sip_h = be64_to_cpu(*(u64 *)val); + tcam_mask->info.sip_h = be64_to_cpu(*(u64 *)val + 2); + break; + case NBL_CHAN_FDIR_KEY_DST_IPv6: + tcam_data->info.dip_l = be64_to_cpu(*((u64 *)val + 1)); + tcam_mask->info.dip_l = be64_to_cpu(*((u64 *)val + 3)); + tcam_data->info.dip_h = be64_to_cpu(*(u64 *)val); + tcam_mask->info.dip_h = be64_to_cpu(*(u64 *)val + 2); + break; + case NBL_CHAN_FDIR_KEY_SPORT: + /* hw generate key is little endian */ + tcam_data->info.l4_sport = be16_to_cpu(*(u16 *)val); + tcam_mask->info.l4_sport = be16_to_cpu(*(u16 *)(val + 2)); + break; + case NBL_CHAN_FDIR_KEY_DPORT: + tcam_data->info.l4_dport = be16_to_cpu(*(u16 *)val); + tcam_mask->info.l4_dport = be16_to_cpu(*(u16 *)(val + 2)); + break; + case NBL_CHAN_FDIR_KEY_UDF: + temp = *(u64 *)val; + mask = *(u64 *)(val + 8); + offset = (temp & NBL_FD_UDF_FLEX_OFFS_M) >> NBL_FD_UDF_FLEX_OFFS_S; + udf_data = temp & NBL_FD_UDF_FLEX_WORD_M; + udf_mask = mask & NBL_FD_UDF_FLEX_WORD_M; + + /* data: high addr means payload first bytes. */ + if (offset % 4 == 1) { + udf_data = (u8)udf_data << 24 | udf_data >> 8; + udf_mask = (u8)udf_mask << 24 | udf_mask >> 8; + + } else if (offset % 4 == 3) { + udf_data = udf_data >> 24 | udf_data << 8; + udf_mask = udf_mask >> 24 | udf_mask << 8; + } + + tcam_data->info.udf = udf_data; + tcam_mask->info.udf = udf_mask; + break; + case NBL_CHAN_FDIR_ACTION_QUEUE: + if (entry->param.global_queue_id != 0xFFFF) { + action.dqueue.que_id = entry->param.global_queue_id; + tcam_entry->action = action.data + (NBL_ACT_SET_QUE_IDX << 16); + } else { + action.data = 0xFFF; + tcam_entry->action = action.data + (NBL_ACT_SET_DPORT << 16); + } + break; + case NBL_CHAN_FDIR_ACTION_VSI: + if (entry->param.dport != 0xFFFF) { + action.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + action.dport.up.port_id = entry->param.dport; + action.dport.up.upcall_flag = AUX_KEEP_FWD_TYPE; + action.dport.up.next_stg_sel = NEXT_STG_SEL_EPRO; + } else { + action.data = 0xFFF; + } + tcam_entry->action = action.data + (NBL_ACT_SET_DPORT << 16); + + break; + default: + break; + } + + return 0; +} + +static int nbl_fd_config_key(struct nbl_flow_direct_entry *entry, struct nbl_acl_tcam_param *data, + struct nbl_acl_tcam_param *mask, u32 *action, u16 vsi_id) +{ + struct nbl_fd_tcam_default_entry tcam_default_entry; + + memset(&tcam_default_entry, 0, sizeof(tcam_default_entry)); + tcam_default_entry.entry = entry; + + switch (entry->pid) { + case NBL_FD_PROFILE_DEFAULT: + nbl_flow_direct_parse_tlv_data(entry->param.tlv, entry->param.tlv_length, + nbl_fd_config_default_key_action, + &tcam_default_entry); + + tcam_default_entry.data.info.dport = (0x2 << 10) + vsi_id; + tcam_default_entry.mask.info.dport = 0xFFFF; + tcam_default_entry.data.info.pid = NBL_FD_PROFILE_DEFAULT; + tcam_default_entry.mask.info.pid = 0xE; + + memcpy(&data->info.data, &tcam_default_entry.data, sizeof(tcam_default_entry.data)); + memcpy(&mask->info.data, &tcam_default_entry.mask, sizeof(tcam_default_entry.mask)); + data->len = sizeof(tcam_default_entry.data); + mask->len = sizeof(tcam_default_entry.mask); + *action = tcam_default_entry.action; + + break; + case NBL_FD_PROFILE_IPV4: + case NBL_FD_PROFILE_L2_IPV6: + default: + return -EINVAL; + } + + return 0; +} + +static int nbl_fd_get_tcam_index(struct nbl_fd_tcam_index_info *info, u8 pid, + u16 *ram_index, u16 *depth_index, int mode) +{ + switch (pid) { + case NBL_FD_PROFILE_DEFAULT: + if (info->default_index[0].depth_index >= NBL_FD_TCAM_DEPTH) + return -EINVAL; + + *ram_index = 0; + *depth_index = info->default_index[0].depth_index++; + + break; + case NBL_FD_PROFILE_IPV4: + if (mode != NBL_FD_MODE_LITE && + (info->v4_cnt > 1 || info->v4[0].depth_index >= NBL_FD_TCAM_DEPTH)) + return -EINVAL; + + if (info->v4[info->v4_cnt].depth_index < NBL_FD_TCAM_DEPTH) { + *ram_index = info->v4_cnt; + *depth_index = info->v4[info->v4_cnt].depth_index++; + } else { + *ram_index = info->v4_cnt++; + *depth_index = info->v4[info->v4_cnt].depth_index++; + } + + break; + case NBL_FD_PROFILE_L2_IPV6: + if (mode == NBL_FD_MODE_LITE || info->v6[0].depth_index >= NBL_FD_TCAM_DEPTH) + return -EINVAL; + + *ram_index = NBL_FD_IPV4_TCAM_WIDTH; + *depth_index = info->v6[0].depth_index++; + + break; + default: + return -EINVAL; + } + + return 0; +} + +static u16 nbl_fd_get_action_index(u16 ram_index) +{ + /* This is a bit tricky... + * + * For DEFAULT mode, ram_index is always 0, so we always use action_ram 0. + * + * For FULL mode, IPV4 rules always have ram_index 0, so they use action_ram 0, and + * IPV6 rules always have ram_index equals to NBL_FD_IPV4_TCAM_WIDTH, so they use + * action_ram 1. + * + * For LITE mode, every 512 IPV4 rules use one action_ram. + */ + return ram_index / NBL_FD_IPV4_TCAM_WIDTH; +} + +static int nbl_fd_setup_tcam_for_list(struct nbl_resource_mgt *res_mgt, + struct nbl_fd_tcam_index_info *index_info, + struct list_head *head, u16 vsi_id) +{ + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_acl_tcam_param data, mask; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_flow_direct_entry *entry = NULL; + u16 ram_index = 0, depth_index = 0, action_index = 0; + u32 action = 0; + int ret; + + memset(&data, 0, sizeof(data)); + memset(&mask, 0, sizeof(mask)); + list_for_each_entry(entry, head, node) { + ret = nbl_fd_get_tcam_index(index_info, entry->pid, &ram_index, + &depth_index, fd_mgt->mode); + if (ret) + return ret; + + nbl_fd_config_key(entry, &data, &mask, &action, vsi_id); + action_index = nbl_fd_get_action_index(ram_index); + + entry->action_index = action_index; + entry->depth_index = depth_index; + ret = phy_ops->set_fd_action_ram(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + action, action_index, depth_index); + + ret = phy_ops->set_fd_tcam_ram(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + &data, &mask, ram_index, depth_index); + if (ret) + return ret; + } + + return 0; +} + +static int nbl_fd_setup_tcam(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_fd_tcam_index_info index_info; + u16 vsi_id = 0; + int i = 0, j, ret = 0; + + memset(&index_info, 0, sizeof(index_info)); + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + vsi_id = nbl_res_pfvfid_to_vsi_id(res_mgt, i, -1, NBL_VSI_DATA); + for (j = 0; j < NBL_CHAN_FDIR_RULE_MAX; j++) { + ret = nbl_fd_setup_tcam_for_list(res_mgt, &index_info, + &fd_mgt->info[i].list[j], vsi_id); + if (ret) + return ret; + } + } + + return 0; +} + +static int nbl_fd_setup_flow(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int ret = 0; + + if (fd_mgt->state != NBL_FD_STATE_ON) + return 0; + + phy_ops->clear_acl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + ret = nbl_fd_setup_tcam_cfg(res_mgt); + if (ret) + goto fail; + + ret = nbl_fd_setup_tcam(res_mgt); + if (ret) + goto fail; + + return 0; + +fail: + phy_ops->clear_acl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + return ret; +} + +static void nbl_fd_remove_flow(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->clear_acl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static int nbl_fd_handle_queue_update(u16 type, void *event_data, void *callback_data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)callback_data; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_event_queue_update_data *data = + (struct nbl_event_queue_update_data *)event_data; + struct nbl_flow_direct_entry *entry = NULL; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + union nbl_action_data action = {{0}}; + int pf_id, vf_id; + u32 action_data; + u16 func_id = data->func_id; + + nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pf_id, &vf_id); + + if (pf_id < 0 || pf_id >= NBL_MAX_PF) + return 0; + + vf_id = vf_id + 1; + list_for_each_entry(entry, &fd_mgt->info[pf_id].list[NBL_CHAN_FDIR_RULE_NORMAL], node) { + if (entry->param.vf != vf_id) + continue; + + if (entry->param.ring < data->ring_num) { + entry->param.global_queue_id = data->map[entry->param.ring]; + action.dqueue.que_id = entry->param.global_queue_id; + action_data = action.data + (NBL_ACT_SET_QUE_IDX << 16); + } else { + entry->param.global_queue_id = 0xFFFF; + action.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + action.dport.up.port_id = 0x3FF; + action.dport.up.upcall_flag = AUX_KEEP_FWD_TYPE; + action.dport.up.next_stg_sel = NEXT_STG_SEL_EPRO; + action_data = action.data + (NBL_ACT_SET_DPORT << 16); + } + + phy_ops->set_fd_action_ram(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + action_data, entry->action_index, entry->depth_index); + } + + return 0; +} + +static int nbl_fd_handle_state_update(u16 type, void *event_data, void *callback_data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)callback_data; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_event_acl_state_update_data *data = + (struct nbl_event_acl_state_update_data *)event_data; + + if (fd_mgt->state == NBL_FD_STATE_OFF && !data->is_offload) { + fd_mgt->state = NBL_FD_STATE_ON; + nbl_fd_setup_flow(res_mgt); + } else if (fd_mgt->state == NBL_FD_STATE_ON && data->is_offload) { + nbl_fd_remove_flow(res_mgt); + fd_mgt->state = NBL_FD_STATE_OFF; + } + + return 0; +} + +/* --------- Res-layer ops Fucntions --------- */ + +static int nbl_fd_get_fd_flow_cnt(void *priv, enum nbl_chan_fdir_rule_type rule_type, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + int pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + + if (pf_id < 0 || pf_id >= NBL_MAX_PF) + return -EINVAL; + + if (rule_type >= NBL_CHAN_FDIR_RULE_MAX) + return -EINVAL; + + return fd_mgt->info[pf_id].cnt[rule_type]; +} + +static int nbl_fd_get_fd_flow_all(void *priv, struct nbl_chan_param_get_fd_flow_all *param, + u32 *rule_locs) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_flow_direct_entry *entry = NULL; + int pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, param->vsi_id), index = 0; + + if (pf_id < 0 || pf_id >= NBL_MAX_PF) + return -EINVAL; + + if (param->rule_type >= NBL_CHAN_FDIR_RULE_MAX) + return -EINVAL; + + list_for_each_entry(entry, &fd_mgt->info[pf_id].list[param->rule_type], node) { + if (index < param->start) + continue; + + if (index >= param->start + param->num) + break; + + rule_locs[index++] = entry->param.location; + } + + return 0; +} + +static int nbl_fd_get_fd_flow_max(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + + return fd_mgt->max_spec; +} + +static int nbl_fd_config_fd_flow_state(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u16 vsi_id, u16 state) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_flow_direct_entry *entry = NULL, *entry_safe = NULL; + int pf_id; + + pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + if (pf_id < 0 || pf_id >= NBL_MAX_PF) + return -EINVAL; + + if (rule_type >= NBL_CHAN_FDIR_RULE_MAX) + return -EINVAL; + + if (state == NBL_FD_STATE_OFF || state == NBL_FD_STATE_FLUSH) { + list_for_each_entry_safe(entry, entry_safe, + &fd_mgt->info[pf_id].list[rule_type], node) + nbl_fd_del_flow(fd_mgt, &fd_mgt->info[pf_id], entry); + nbl_fd_setup_flow(res_mgt); + } + if (state != NBL_FD_STATE_FLUSH) + fd_mgt->info[pf_id].state[rule_type] = state; + + return 0; +} + +static int nbl_fd_get_fd_flow(void *priv, u16 vsi_id, u32 location, + enum nbl_chan_fdir_rule_type rule_type, + struct nbl_chan_param_fdir_replace *cmd) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_flow_direct_entry *entry = NULL; + int pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + + if (location >= fd_mgt->max_spec || pf_id < 0 || pf_id >= NBL_MAX_PF) + return -EINVAL; + + entry = nbl_fd_find_flow(&fd_mgt->info[pf_id], rule_type, location); + if (!entry) + return -ENOENT; + + memcpy(cmd, &entry->param, sizeof(*cmd) + entry->param.tlv_length); + return 0; +} + +static int nbl_fd_replace_fd_flow(void *priv, struct nbl_chan_param_fdir_replace *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_flow_direct_info *info = NULL; + struct nbl_flow_direct_entry *entry = NULL; + int pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, param->vsi), ret = 0; + + if (pf_id < 0 || pf_id >= NBL_MAX_PF || param->location >= fd_mgt->max_spec) + return -EINVAL; + + if (param->rule_type == NBL_CHAN_FDIR_RULE_NORMAL && + fd_mgt->info[pf_id].state[param->rule_type] == NBL_FD_STATE_OFF) + return -EINVAL; + + info = &fd_mgt->info[pf_id]; + entry = nbl_fd_find_flow(info, param->rule_type, param->location); + ret = nbl_fd_validate_rule(fd_mgt, param, entry); + if (ret) + return ret; + + if (entry) + nbl_fd_del_flow(fd_mgt, info, entry); + + entry = nbl_fd_add_flow(fd_mgt, info, param); + if (!entry) + goto add_entry_fail; + + ret = nbl_fd_setup_flow(res_mgt); + if (ret) + goto setup_flow_fail; + + return 0; + +setup_flow_fail: + nbl_fd_find_and_del_flow(fd_mgt, info, param->rule_type, param->location); +add_entry_fail: + return ret; +} + +static int nbl_fd_remove_fd_flow(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u32 loc, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_flow_direct_info *info = NULL; + int pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + int ret; + + if (pf_id < 0 || pf_id >= NBL_MAX_PF || loc >= fd_mgt->max_spec) + return -EINVAL; + + info = &fd_mgt->info[pf_id]; + ret = nbl_fd_find_and_del_flow(fd_mgt, info, rule_type, loc); + if (ret) + return ret; + + return nbl_fd_setup_flow(res_mgt); +} + +static void nbl_fd_cfg_update_event(void *priv, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_event_callback event_callback = {0}; + + event_callback.callback_data = res_mgt; + + if (enable) { + event_callback.callback = nbl_fd_handle_state_update; + nbl_event_register(NBL_EVENT_ACL_STATE_UPDATE, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + event_callback.callback = nbl_fd_handle_queue_update; + nbl_event_register(NBL_EVENT_QUEUE_ALLOC, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } else { + event_callback.callback = nbl_fd_handle_state_update; + nbl_event_unregister(NBL_EVENT_ACL_STATE_UPDATE, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + event_callback.callback = nbl_fd_handle_queue_update; + nbl_event_unregister(NBL_EVENT_QUEUE_ALLOC, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } +} + +char flow_type_name[NBL_CHAN_FDIR_FLOW_MAX_TYPE][16] = { + "Full/Isolate", + "ETHER", + "IPV4", + "IPV6", + "TCP_V4", + "TCP_V6", + "UDP_V4", + "UDP_V6", +}; + +char mode_name[NBL_FD_MODE_MAX][16] = { + "DEFAULT", + "FULL", + "LITE", +}; + +static int nbl_fd_dump_entry_tlv(u16 type, u16 length, u8 *val, void *data) +{ + struct seq_file *m = (struct seq_file *)(data); + + switch (type) { + case NBL_CHAN_FDIR_KEY_SRC_MAC: + seq_printf(m, "\tCompo [ SRC-MAC ]: data %02x-%02x-%02x-%02x-%02x-%02x, mask %02x-%02x-%02x-%02x-%02x-%02x\n", + val[0], val[1], val[2], val[3], val[4], val[5], + val[6], val[7], val[8], val[9], val[10], val[11]); + break; + case NBL_CHAN_FDIR_KEY_DST_MAC: + seq_printf(m, "\tCompo [ DST-MAC ]: data %02x-%02x-%02x-%02x-%02x-%02x, mask %02x-%02x-%02x-%02x-%02x-%02x\n", + val[0], val[1], val[2], val[3], val[4], val[5], + val[6], val[7], val[8], val[9], val[10], val[11]); + break; + case NBL_CHAN_FDIR_KEY_PROTO: + seq_printf(m, "\tCompo [ ETHERTYPE ]: data 0x%04x, mask 0x%04x\n", + *(u16 *)val, *(u16 *)(val + 2)); + break; + case NBL_CHAN_FDIR_KEY_SRC_IPv4: + seq_printf(m, "\tCompo [ SRC-IPV4 ]: data %pI4, mask %pI4\n", + (u32 *)val, (u32 *)(val + 4)); + break; + case NBL_CHAN_FDIR_KEY_DST_IPv4: + seq_printf(m, "\tCompo [ DST-IPV4 ]: data %pI4, mask %pI4\n", + (u32 *)val, (u32 *)(val + 4)); + break; + case NBL_CHAN_FDIR_KEY_L4PROTO: + seq_printf(m, "\tCompo [ IPPROTO ]: data 0x%x, mask 0x%x\n", + *(u8 *)val, *(u8 *)(val + 1)); + break; + case NBL_CHAN_FDIR_KEY_SRC_IPv6: + seq_printf(m, "\tCompo [SRC-IPV6 ]: data %pI6, mask %pI6\n", + val, val + 12); + break; + case NBL_CHAN_FDIR_KEY_DST_IPv6: + seq_printf(m, "\tCompo [DST-IPV6 ]: data %pI6, mask %pI6\n", + val, val + 12); + break; + case NBL_CHAN_FDIR_KEY_SPORT: + seq_printf(m, "\tCompo [ L4-SPORT ]: data 0x%x, mask 0x%x\n", + *(u16 *)val, *(u16 *)(val + 2)); + break; + case NBL_CHAN_FDIR_KEY_DPORT: + seq_printf(m, "\tCompo [ L4-DPORT ]: data 0x%x, mask 0x%x\n", + *(u16 *)val, *(u16 *)(val + 2)); + break; + case NBL_CHAN_FDIR_KEY_UDF: + seq_printf(m, "\tCompo [ USER-DEF ]: data 0x%llx, mask 0x%llx\n", + *(u64 *)val, *(u64 *)(val + 8)); + break; + case NBL_CHAN_FDIR_ACTION_QUEUE: + seq_printf(m, "\tCompo [ GLOBAL-QUE ]: data 0x%llx\n", *(u64 *)val); + break; + case NBL_CHAN_FDIR_ACTION_VSI: + seq_printf(m, "\tCompo [ VSI ]: vsi 0x%llx\n", *(u64 *)val); + break; + default: + break; + } + + return 0; +} + +static void nbl_fd_dump_entry(struct seq_file *m, struct nbl_flow_direct_entry *entry) +{ + struct nbl_chan_param_fdir_replace *param = &entry->param; + + seq_printf(m, "\n[ %-10s]: pid %d, location %4d, global queue id %4u\n", + flow_type_name[param->flow_type], entry->pid, + param->location, param->global_queue_id); + + nbl_flow_direct_parse_tlv_data(entry->param.tlv, entry->param.tlv_length, + nbl_fd_dump_entry_tlv, m); +} + +static void nbl_fd_dump_flow(void *priv, struct seq_file *m) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_flow_direct_entry *entry = NULL; + int i, j; + + seq_puts(m, "\n/* ----------------------- Flow Direct ----------------------- */\n\n"); + + seq_printf(m, "[STATE\t\t %-4s\t]\n[MODE\t\t %-4s\t]\n[DEFAULT_CNT\t %-4d]\n[IPV4_CNT\t %-4d\t]\n[L2&IPV6_CNT\t %-4d\t]\n[UDF cnt/layer/offset:\t %-4d %-4d %-4d\t]\n", + fd_mgt->state == NBL_FD_STATE_OFF ? "OFF" : "ON", mode_name[fd_mgt->mode], + fd_mgt->cnt[NBL_FD_PROFILE_DEFAULT], fd_mgt->cnt[NBL_FD_PROFILE_IPV4], + fd_mgt->cnt[NBL_FD_PROFILE_L2_IPV6], fd_mgt->udf_cnt, fd_mgt->udf_layer, + fd_mgt->udf_offset); + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + for (j = 0; j < NBL_CHAN_FDIR_RULE_MAX; j++) { + seq_printf(m, "\nPF %d/%d: %d flows state %-4s -------------------\n", + i, j, fd_mgt->info[i].cnt[j], + fd_mgt->info[i].state[j] == NBL_FD_STATE_OFF ? "OFF" : "ON"); + + list_for_each_entry(entry, &fd_mgt->info[i].list[j], node) + nbl_fd_dump_entry(m, entry); + } + } + + seq_puts(m, "\n"); +} + +/* NBL_FD_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_FD_OPS_TBL \ +do { \ + NBL_FD_SET_OPS(get_fd_flow, nbl_fd_get_fd_flow); \ + NBL_FD_SET_OPS(get_fd_flow_cnt, nbl_fd_get_fd_flow_cnt); \ + NBL_FD_SET_OPS(get_fd_flow_all, nbl_fd_get_fd_flow_all); \ + NBL_FD_SET_OPS(get_fd_flow_max, nbl_fd_get_fd_flow_max); \ + NBL_FD_SET_OPS(config_fd_flow_state, nbl_fd_config_fd_flow_state); \ + NBL_FD_SET_OPS(replace_fd_flow, nbl_fd_replace_fd_flow); \ + NBL_FD_SET_OPS(remove_fd_flow, nbl_fd_remove_fd_flow); \ + NBL_FD_SET_OPS(cfg_fd_update_event, nbl_fd_cfg_update_event); \ + NBL_FD_SET_OPS(dump_fd_flow, nbl_fd_dump_flow); \ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_fd_setup_mgt(struct device *dev, struct nbl_flow_direct_mgt **fd_mgt) +{ + int i, j; + + *fd_mgt = devm_kzalloc(dev, sizeof(struct nbl_flow_direct_mgt), GFP_KERNEL); + if (!*fd_mgt) + return -ENOMEM; + + for (i = 0; i < NBL_MAX_PF; i++) { + for (j = 0; j < NBL_CHAN_FDIR_RULE_MAX; j++) { + INIT_LIST_HEAD(&(*fd_mgt)->info[i].list[j]); + (*fd_mgt)->info[i].state[j] = NBL_FD_STATE_OFF; + } + } + + (*fd_mgt)->udf_cnt = 0; + (*fd_mgt)->udf_layer = 0; + + (*fd_mgt)->mode = NBL_FD_MODE_DEFAULT; + (*fd_mgt)->max_spec = NBL_FD_RULE_MAX_DEFAULT; + (*fd_mgt)->state = NBL_FD_STATE_ON; + + return 0; +} + +static void nbl_fd_remove_mgt(struct device *dev, struct nbl_flow_direct_mgt **fd_mgt) +{ + devm_kfree(dev, *fd_mgt); + *fd_mgt = NULL; +} + +int nbl_fd_mgt_start(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_flow_direct_mgt **fd_mgt = &NBL_RES_MGT_TO_FD_MGT(res_mgt); + int ret = 0; + + ret = nbl_fd_setup_mgt(dev, fd_mgt); + + return ret; +} + +void nbl_fd_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_flow_direct_mgt **fd_mgt = &NBL_RES_MGT_TO_FD_MGT(res_mgt); + + if (!(*fd_mgt)) + return; + + nbl_fd_remove_flow(res_mgt); + nbl_fd_del_flow_all(res_mgt); + nbl_fd_remove_mgt(dev, fd_mgt); +} + +int nbl_fd_setup_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_FD_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_FD_OPS_TBL; +#undef NBL_FD_SET_OPS + + return 0; +} + +void nbl_fd_remove_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_FD_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_FD_OPS_TBL; +#undef NBL_FD_SET_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.h new file mode 100644 index 0000000000000000000000000000000000000000..b61c6c94eee7474757cea4c91b2c434d08ee88d3 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_FD_H_ +#define _NBL_FD_H_ + +#include "nbl_resource.h" + +#define NBL_FD_RULE_MAX_512 (512) +#define NBL_FD_RULE_MAX_1024 (1024) +#define NBL_FD_RULE_MAX_1536 (1536) +#define NBL_FD_RULE_MAX_DEFAULT (NBL_FD_RULE_MAX_512) +#define NBL_FD_RULE_MAX (NBL_FD_RULE_MAX_1536) + +#define NBL_FD_TCAM_DEPTH (512) + +#define NBL_FD_IPV4_TCAM_WIDTH (5) +#define NBL_FD_L2_IPV6_TCAM_WIDTH (10) +#define NBL_FD_DEFAULT_MODE_DEPTH (1) +#define NBL_FD_LITE_MODE_DEPTH (4) +#define NBL_FD_FULL_MODE_DEPTH (1) + +#define NBL_FD_UDF_FLEX_WORD_M GENMASK_ULL(31, 0) +#define NBL_FD_UDF_FLEX_OFFS_S 32 +#define NBL_FD_UDF_FLEX_OFFS_M GENMASK_ULL(63, NBL_FD_UDF_FLEX_OFFS_S) +#define NBL_FD_UDF_FLEX_FLTR_M GENMASK_ULL(63, 0) + +union nbl_fd_tcam_default_data_u { + struct nbl_fd_tcam_default_data { + u64 rsv1:12; + u64 dport:16; + u64 padding:8; + u64 l4_proto:8; + u64 l4_dport:16; + u64 l4_sport:16; + u64 ethertype:16; + u64 src_mac:48; + u64 dst_mac:48; + u64 udf:32; + u64 dip_l:64; + u64 dip_h:64; + u64 sip_l:64; + u64 sip_h:64; + u64 pid:4; + } __packed info; +#define NBL_FD_TCAM_DEFAULT_DATA_TAB_WIDTH (sizeof(struct nbl_fd_tcam_default_data) / sizeof(u32)) + u32 data[NBL_FD_TCAM_DEFAULT_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_fd_tcam_default_data)]; +}; + +union nbl_fd_tcam_ipv4_data_u { + struct nbl_fd_tcam_ipv4_data { + u64 rsv1:28; + u64 dport:16; + u64 padding:8; + u64 l4_proto:8; + u64 l4_dport:16; + u64 l4_sport:16; + u64 udf:32; + u64 dip:32; + u64 sip:32; + u64 pid:4; + } __packed info; +#define NBL_FD_TCAM_IPV4_DATA_TAB_WIDTH (sizeof(struct nbl_fd_tcam_ipv4_data) / sizeof(u32)) + u32 data[NBL_FD_TCAM_IPV4_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_fd_tcam_ipv4_data)]; +}; + +union nbl_fd_tcam_l2_ipv6_data_u { + struct nbl_fd_tcam_l2_ipv6_data { + u64 rsv:28; + u64 dport:16; + u64 padding:8; + u64 l4_proto:8; + u64 l4_dport:16; + u64 l4_sport:16; + u64 ehtertype:16; + u64 udf:32; + u32 dip[NBL_IPV6_U32LEN]; + u32 sip[NBL_IPV6_U32LEN]; + u64 pid:4; + } __packed info; +#define NBL_FD_TCAM_L2_IPV6_DATA_TAB_WIDTH (sizeof(struct nbl_fd_tcam_l2_ipv6_data) / sizeof(u32)) + u32 data[NBL_FD_TCAM_L2_IPV6_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_fd_tcam_l2_ipv6_data)]; +}; + +struct nbl_fd_tcam_index { + u16 depth_index; +}; + +struct nbl_fd_tcam_index_info { + struct nbl_fd_tcam_index default_index[NBL_FD_DEFAULT_MODE_DEPTH]; + struct nbl_fd_tcam_index v4[NBL_FD_LITE_MODE_DEPTH]; + struct nbl_fd_tcam_index v6[NBL_FD_FULL_MODE_DEPTH]; + u8 default_cnt; + u8 v4_cnt; + u8 v6_cnt; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw.h index acb68f2bac4ba2bb74e508da8e1f39de4ddb244a..6ab5638b9fd2cd11a3d1f5946ea36c2055539b7b 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw.h @@ -18,19 +18,19 @@ #define NBL_KT_BYTE_LEN 40 #define NBL_KT_BYTE_HALF_LEN 20 -#define NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2 0 -#define NBL_EM0_PT_PHY_UP_LLDP_LACP 1 -#define NBL_EM0_PT_PHY_UP_UNICAST_L2 2 -#define NBL_EM0_PT_PHY_DOWN_UNICAST_L2 3 -#define NBL_EM0_PT_PHY_UP_MULTICAST_L2 4 -#define NBL_EM0_PT_PHY_DOWN_MULTICAST_L2 5 -#define NBL_EM0_PT_PHY_UP_MULTICAST_L3 6 -#define NBL_EM0_PT_PHY_DOWN_MULTICAST_L3 7 -#define NBL_EM0_PT_PHY_DPRBAC_IPV4 8 -#define NBL_EM0_PT_PHY_DPRBAC_IPV6 9 -#define NBL_EM0_PT_PHY_UL4S_IPV4 10 -#define NBL_EM0_PT_PHY_UL4S_IPV6 11 -#define NBL_EM0_PT_PMD_ND_UPCALL 12 +#define NBL_EM0_PT_PHY_UP_TUNNEL_L2 0 +#define NBL_EM0_PT_PHY_UP_L2 1 +#define NBL_EM0_PT_PHY_DOWN_L2 2 +#define NBL_EM0_PT_PHY_UP_LLDP_LACP 3 +#define NBL_EM0_PT_PMD_ND_UPCALL 4 +#define NBL_EM0_PT_PHY_L2_UP_MULTI_MCAST 5 +#define NBL_EM0_PT_PHY_L3_UP_MULTI_MCAST 6 +#define NBL_EM0_PT_PHY_L2_DOWN_MULTI_MCAST 7 +#define NBL_EM0_PT_PHY_L3_DOWN_MULTI_MCAST 8 +#define NBL_EM0_PT_PHY_DPRBAC_IPV4 9 +#define NBL_EM0_PT_PHY_DPRBAC_IPV6 10 +#define NBL_EM0_PT_PHY_UL4S_IPV4 11 +#define NBL_EM0_PT_PHY_UL4S_IPV6 12 #define NBL_PP0_PROFILE_ID_MIN (0) #define NBL_PP0_PROFILE_ID_MAX (15) @@ -41,9 +41,16 @@ #define NBL_PP_PROFILE_NUM (16) #define NBL_QID_MAP_TABLE_ENTRIES (4096) +#define NBL_EPRO_PF_RSS_RET_TBL_DEPTH (4096) #define NBL_EPRO_RSS_RET_TBL_DEPTH (8192 * 2) #define NBL_EPRO_RSS_ENTRY_SIZE_UNIT (16) +#define NBL_EPRO_PF_RSS_RET_TBL_COUNT (512) +#define NBL_EPRO_PF_RSS_ENTRY_SIZE (5) + +#define NBL_EPRO_RSS_ENTRY_MAX_COUNT (512) +#define NBL_EPRO_RSS_ENTRY_MAX_SIZE (4) + #define NBL_EPRO_RSS_SK_SIZE 40 #define NBL_EPRO_RSS_PER_KEY_SIZE 8 #define NBL_EPRO_RSS_KEY_NUM (NBL_EPRO_RSS_SK_SIZE / NBL_EPRO_RSS_PER_KEY_SIZE) @@ -59,6 +66,26 @@ enum { NBL_KT_FULL_MODE, }; +enum nbl_pp_type { + NBL_PP_TYPE_0, + NBL_PP_TYPE_1, + NBL_PP_TYPE_2, + NBL_PP_TYPE_MAX, +}; + +enum nbl_pp_at_type { + NBL_AT_TYPE_0, + NBL_AT_TYPE_1, + NBL_AT_TYPE_2, + NBL_AT_TYPE_MAX, +}; + +enum nbl_pp_fc_type { + NBL_FC_COMMON_TYPE, + NBL_FC_SPEC_TYPE, + NBL_FC_TYPE_MAX, +}; + #pragma pack(1) union nbl_action_data { struct clear_flag_act { @@ -275,7 +302,6 @@ union nbl_action_data { u16 data; }; - #pragma pack() enum nbl_chan_flow_rule_type { @@ -318,6 +344,8 @@ enum nbl_chan_flow_rule_type { NBL_FLOW_UCAR_CAR_CTRL_ADDR, NBL_FLOW_UCAR_GREEN_CELL_ADDR, NBL_FLOW_UCAR_GREEN_PKT_ADDR, + NBL_FLOW_UPED_VSI_TYPE_REG, + NBL_FLOW_DPED_VSI_TYPE_REG, }; enum nbl_chan_flow_mode { @@ -345,4 +373,18 @@ enum nbl_chan_flow_mode { #define SFF8636_TRANSMIT_COPPER_NEAR_END (0xe) #define SFF8636_TRANSMIT_COPPER_LINEAR_ACTIVE (0xf) +#define NBL_SPORT_ETH_OFFSET 8 + +enum { + NBL_FD_PROFILE_IPV4 = 2, + NBL_FD_PROFILE_L2_IPV6 = 4, + NBL_FD_PROFILE_DEFAULT = 6, + NBL_FD_PROFILE_MAX, +}; + +struct nbl_event_link_status_update_data { + u8 num; + u8 eth_id[NBL_MAX_ETHERNET]; +}; + #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath.h new file mode 100644 index 0000000000000000000000000000000000000000..c011422fb777b810c9f61a12e9bb3c0bf2251dc7 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath.h @@ -0,0 +1,9 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#include "nbl_datapath_upa.h" +#include "nbl_datapath_dpa.h" +#include "nbl_datapath_ucar.h" +#include "nbl_datapath_uped.h" +#include "nbl_datapath_dped.h" +#include "nbl_datapath_dstore.h" diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dpa.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dpa.h new file mode 100644 index 0000000000000000000000000000000000000000..a5e4301fd572c17396f229d6247eb7f6b623b2c0 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dpa.h @@ -0,0 +1,765 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DPA_H +#define NBL_DPA_H 1 + +#include + +#define NBL_DPA_BASE (0x0085C000) + +#define NBL_DPA_INT_STATUS_ADDR (0x85c000) +#define NBL_DPA_INT_STATUS_DEPTH (1) +#define NBL_DPA_INT_STATUS_WIDTH (32) +#define NBL_DPA_INT_STATUS_DWLEN (1) +union dpa_int_status_u { + struct dpa_int_status { + u32 fatal_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_underflow:1; /* [1] Default:0x0 RWC */ + u32 fifo_overflow:1; /* [2] Default:0x0 RWC */ + u32 fsm_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 rsv1:1; /* [5] Default:0x0 RO */ + u32 cfg_err:1; /* [6] Default:0x0 RWC */ + u32 ucor_err:1; /* [7] Default:0x0 RWC */ + u32 cor_err:1; /* [8] Default:0x0 RWC */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DPA_INT_MASK_ADDR (0x85c004) +#define NBL_DPA_INT_MASK_DEPTH (1) +#define NBL_DPA_INT_MASK_WIDTH (32) +#define NBL_DPA_INT_MASK_DWLEN (1) +union dpa_int_mask_u { + struct dpa_int_mask { + u32 fatal_err:1; /* [0] Default:0x0 RW */ + u32 fifo_underflow:1; /* [1] Default:0x0 RW */ + u32 fifo_overflow:1; /* [2] Default:0x0 RW */ + u32 fsm_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 rsv1:1; /* [5] Default:0x0 RO */ + u32 cfg_err:1; /* [6] Default:0x0 RW */ + u32 ucor_err:1; /* [7] Default:0x0 RW */ + u32 cor_err:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DPA_INT_SET_ADDR (0x85c008) +#define NBL_DPA_INT_SET_DEPTH (1) +#define NBL_DPA_INT_SET_WIDTH (32) +#define NBL_DPA_INT_SET_DWLEN (1) +union dpa_int_set_u { + struct dpa_int_set { + u32 fatal_err:1; /* [0] Default:0x0 WO */ + u32 fifo_underflow:1; /* [1] Default:0x0 WO */ + u32 fifo_overflow:1; /* [2] Default:0x0 WO */ + u32 fsm_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 rsv1:1; /* [5] Default:0x0 RO */ + u32 cfg_err:1; /* [6] Default:0x0 WO */ + u32 ucor_err:1; /* [7] Default:0x0 WO */ + u32 cor_err:1; /* [8] Default:0x0 WO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_INT_SET_DWLEN]; +} __packed; + +#define NBL_DPA_INIT_DONE_ADDR (0x85c00c) +#define NBL_DPA_INIT_DONE_DEPTH (1) +#define NBL_DPA_INIT_DONE_WIDTH (32) +#define NBL_DPA_INIT_DONE_DWLEN (1) +union dpa_init_done_u { + struct dpa_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_DPA_CIF_ERR_INFO_ADDR (0x85c040) +#define NBL_DPA_CIF_ERR_INFO_DEPTH (1) +#define NBL_DPA_CIF_ERR_INFO_WIDTH (32) +#define NBL_DPA_CIF_ERR_INFO_DWLEN (1) +union dpa_cif_err_info_u { + struct dpa_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPA_CFG_ERR_INFO_ADDR (0x85c050) +#define NBL_DPA_CFG_ERR_INFO_DEPTH (1) +#define NBL_DPA_CFG_ERR_INFO_WIDTH (32) +#define NBL_DPA_CFG_ERR_INFO_DWLEN (1) +union dpa_cfg_err_info_u { + struct dpa_cfg_err_info { + u32 id0:2; /* [1:0] Default:0x0 RO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPA_CAR_CTRL_ADDR (0x85c100) +#define NBL_DPA_CAR_CTRL_DEPTH (1) +#define NBL_DPA_CAR_CTRL_WIDTH (32) +#define NBL_DPA_CAR_CTRL_DWLEN (1) +union dpa_car_ctrl_u { + struct dpa_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DPA_INIT_START_ADDR (0x85c180) +#define NBL_DPA_INIT_START_DEPTH (1) +#define NBL_DPA_INIT_START_WIDTH (32) +#define NBL_DPA_INIT_START_DWLEN (1) +union dpa_init_start_u { + struct dpa_init_start { + u32 start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_INIT_START_DWLEN]; +} __packed; + +#define NBL_DPA_LAYO_CKSUM0_CTRL_ADDR (0x85c1b0) +#define NBL_DPA_LAYO_CKSUM0_CTRL_DEPTH (4) +#define NBL_DPA_LAYO_CKSUM0_CTRL_WIDTH (32) +#define NBL_DPA_LAYO_CKSUM0_CTRL_DWLEN (1) +union dpa_layo_cksum0_ctrl_u { + struct dpa_layo_cksum0_ctrl { + u32 data:32; /* [31:0] Default:0xFFFFFFFF RW */ + } __packed info; + u32 data[NBL_DPA_LAYO_CKSUM0_CTRL_DWLEN]; +} __packed; +#define NBL_DPA_LAYO_CKSUM0_CTRL_REG(r) (NBL_DPA_LAYO_CKSUM0_CTRL_ADDR + \ + (NBL_DPA_LAYO_CKSUM0_CTRL_DWLEN * 4) * (r)) + +#define NBL_DPA_FWD_TYPE_STAGE_0_ADDR (0x85c1d0) +#define NBL_DPA_FWD_TYPE_STAGE_0_DEPTH (1) +#define NBL_DPA_FWD_TYPE_STAGE_0_WIDTH (32) +#define NBL_DPA_FWD_TYPE_STAGE_0_DWLEN (1) +union dpa_fwd_type_stage_0_u { + struct dpa_fwd_type_stage_0 { + u32 tbl:32; /* [31:0] Default:0xF3FFFFC2 RW */ + } __packed info; + u32 data[NBL_DPA_FWD_TYPE_STAGE_0_DWLEN]; +} __packed; + +#define NBL_DPA_FWD_TYPE_STAGE_1_ADDR (0x85c1d4) +#define NBL_DPA_FWD_TYPE_STAGE_1_DEPTH (1) +#define NBL_DPA_FWD_TYPE_STAGE_1_WIDTH (32) +#define NBL_DPA_FWD_TYPE_STAGE_1_DWLEN (1) +union dpa_fwd_type_stage_1_u { + struct dpa_fwd_type_stage_1 { + u32 tbl:32; /* [31:0] Default:0xFFFFFFFF RW */ + } __packed info; + u32 data[NBL_DPA_FWD_TYPE_STAGE_1_DWLEN]; +} __packed; + +#define NBL_DPA_FWD_TYPE_STAGE_2_ADDR (0x85c1d8) +#define NBL_DPA_FWD_TYPE_STAGE_2_DEPTH (1) +#define NBL_DPA_FWD_TYPE_STAGE_2_WIDTH (32) +#define NBL_DPA_FWD_TYPE_STAGE_2_DWLEN (1) +union dpa_fwd_type_stage_2_u { + struct dpa_fwd_type_stage_2 { + u32 tbl:32; /* [31:0] Default:0xFFFFFFFF RW */ + } __packed info; + u32 data[NBL_DPA_FWD_TYPE_STAGE_2_DWLEN]; +} __packed; + +#define NBL_DPA_FWD_TYPE_BYPASS_0_ADDR (0x85c1e0) +#define NBL_DPA_FWD_TYPE_BYPASS_0_DEPTH (1) +#define NBL_DPA_FWD_TYPE_BYPASS_0_WIDTH (32) +#define NBL_DPA_FWD_TYPE_BYPASS_0_DWLEN (1) +union dpa_fwd_type_bypass_0_u { + struct dpa_fwd_type_bypass_0 { + u32 tbl:8; /* [7:0] Default:0x80 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_FWD_TYPE_BYPASS_0_DWLEN]; +} __packed; + +#define NBL_DPA_FWD_TYPE_BYPASS_1_ADDR (0x85c1e4) +#define NBL_DPA_FWD_TYPE_BYPASS_1_DEPTH (1) +#define NBL_DPA_FWD_TYPE_BYPASS_1_WIDTH (32) +#define NBL_DPA_FWD_TYPE_BYPASS_1_DWLEN (1) +union dpa_fwd_type_bypass_1_u { + struct dpa_fwd_type_bypass_1 { + u32 tbl:8; /* [7:0] Default:0x80 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_FWD_TYPE_BYPASS_1_DWLEN]; +} __packed; + +#define NBL_DPA_FWD_TYPE_BYPASS_2_ADDR (0x85c1e8) +#define NBL_DPA_FWD_TYPE_BYPASS_2_DEPTH (1) +#define NBL_DPA_FWD_TYPE_BYPASS_2_WIDTH (32) +#define NBL_DPA_FWD_TYPE_BYPASS_2_DWLEN (1) +union dpa_fwd_type_bypass_2_u { + struct dpa_fwd_type_bypass_2 { + u32 tbl:8; /* [7:0] Default:0x80 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_FWD_TYPE_BYPASS_2_DWLEN]; +} __packed; + +#define NBL_DPA_DPORT_EXTRACT_ADDR (0x85c1ec) +#define NBL_DPA_DPORT_EXTRACT_DEPTH (1) +#define NBL_DPA_DPORT_EXTRACT_WIDTH (32) +#define NBL_DPA_DPORT_EXTRACT_DWLEN (1) +union dpa_dport_extract_u { + struct dpa_dport_extract { + u32 id:6; /* [5:0] Default:0x9 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_DPORT_EXTRACT_DWLEN]; +} __packed; + +#define NBL_DPA_LAYO_PHV_ADDR (0x85c1f0) +#define NBL_DPA_LAYO_PHV_DEPTH (1) +#define NBL_DPA_LAYO_PHV_WIDTH (32) +#define NBL_DPA_LAYO_PHV_DWLEN (1) +union dpa_layo_phv_u { + struct dpa_layo_phv { + u32 len:7; /* [6:0] Default:0x5A RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_LAYO_PHV_DWLEN]; +} __packed; + +#define NBL_DPA_L4S_PAD_ADDR (0x85c1f4) +#define NBL_DPA_L4S_PAD_DEPTH (1) +#define NBL_DPA_L4S_PAD_WIDTH (32) +#define NBL_DPA_L4S_PAD_DWLEN (1) +union dpa_l4s_pad_u { + struct dpa_l4s_pad { + u32 p_length:7; /* [6:0] Default:0x3C RW */ + u32 en:1; /* [7] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_L4S_PAD_DWLEN]; +} __packed; + +#define NBL_DPA_IP_EXT_PROTOCOL_ADDR (0x85c1fc) +#define NBL_DPA_IP_EXT_PROTOCOL_DEPTH (1) +#define NBL_DPA_IP_EXT_PROTOCOL_WIDTH (32) +#define NBL_DPA_IP_EXT_PROTOCOL_DWLEN (1) +union dpa_ip_ext_protocol_u { + struct dpa_ip_ext_protocol { + u32 tcp:8; /* [7:0] Default:0x6 RW */ + u32 udp:8; /* [15:8] Default:0x11 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_IP_EXT_PROTOCOL_DWLEN]; +} __packed; + +#define NBL_DPA_L3V6_ML_DA_ADDR (0x85c204) +#define NBL_DPA_L3V6_ML_DA_DEPTH (1) +#define NBL_DPA_L3V6_ML_DA_WIDTH (32) +#define NBL_DPA_L3V6_ML_DA_DWLEN (1) +union dpa_l3v6_ml_da_u { + struct dpa_l3v6_ml_da { + u32 ml_da:16; /* [15:0] Default:0x3333 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_L3V6_ML_DA_DWLEN]; +} __packed; + +#define NBL_DPA_NEXT_KEY_ADDR (0x85c208) +#define NBL_DPA_NEXT_KEY_DEPTH (1) +#define NBL_DPA_NEXT_KEY_WIDTH (32) +#define NBL_DPA_NEXT_KEY_DWLEN (1) +union dpa_next_key_u { + struct dpa_next_key { + u32 key_b:8; /* [7:0] Default:0x10 RW */ + u32 key_a:8; /* [15:8] Default:0x0C RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_NEXT_KEY_DWLEN]; +} __packed; + +#define NBL_DPA_L3_ML_DA_ADDR (0x85c20c) +#define NBL_DPA_L3_ML_DA_DEPTH (1) +#define NBL_DPA_L3_ML_DA_WIDTH (32) +#define NBL_DPA_L3_ML_DA_DWLEN (1) +union dpa_l3_ml_da_u { + struct dpa_l3_ml_da { + u32 ml_da_0:16; /* [15:0] Default:0x5e00 RW */ + u32 ml_da_1:16; /* [31:16] Default:0x0100 RW */ + } __packed info; + u32 data[NBL_DPA_L3_ML_DA_DWLEN]; +} __packed; + +#define NBL_DPA_CK_CTRL_ADDR (0x85c210) +#define NBL_DPA_CK_CTRL_DEPTH (1) +#define NBL_DPA_CK_CTRL_WIDTH (32) +#define NBL_DPA_CK_CTRL_DWLEN (1) +union dpa_ck_ctrl_u { + struct dpa_ck_ctrl { + u32 tcp_csum_en:1; /* [0] Default:0x1 RW */ + u32 udp_csum_en:1; /* [1] Default:0x1 RW */ + u32 sctp_crc32c_en:1; /* [2] Default:0x1 RW */ + u32 ipv4_ck_en:1; /* [3] Default:0x1 RW */ + u32 ipv6_ck_en:1; /* [4] Default:0x1 RW */ + u32 DA_ck_en:1; /* [5] Default:0x1 RW */ + u32 ipv6_ext_en:1; /* [6] Default:0x0 RW */ + u32 vlan_error_en:1; /* [7] Default:0x1 RW */ + u32 ctrl_p_en:1; /* [8] Default:0x0 RW */ + u32 ip_tlen_ck_en:1; /* [9] Default:0x0 RW */ + u32 not_uc_p_plck_aux_en:1; /* [10] Default:0x0 RW */ + u32 sctp_crc_plck_aux_en:1; /* [11] Default:0x1 RW */ + u32 tcp_csum_offset_id:2; /* [13:12] Default:0x2 RW */ + u32 udp_csum_offset_id:2; /* [15:14] Default:0x2 RW */ + u32 sctp_crc32c_offset_id:2; /* [17:16] Default:0x2 RW */ + u32 ipv4_ck_offset_id:2; /* [19:18] Default:0x1 RW */ + u32 ipv6_ck_offset_id:2; /* [21:20] Default:0x1 RW */ + u32 DA_ck_offset_id:2; /* [23:22] Default:0x0 RW */ + u32 plck_offset_id:2; /* [25:24] Default:0x3 RW */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_CK_CTRL_DWLEN]; +} __packed; + +#define NBL_DPA_MC_INDEX_ADDR (0x85c214) +#define NBL_DPA_MC_INDEX_DEPTH (1) +#define NBL_DPA_MC_INDEX_WIDTH (32) +#define NBL_DPA_MC_INDEX_DWLEN (1) +union dpa_mc_index_u { + struct dpa_mc_index { + u32 l2_mc_index:5; /* [4:0] Default:0x8 RW */ + u32 rsv2:3; /* [7:5] Default:0x00 RO */ + u32 l3_mc_index:5; /* [12:8] Default:0x9 RW */ + u32 rsv1:3; /* [15:13] Default:0x00 RO */ + u32 ctrl_p_index:5; /* [20:16] Default:0xF RW */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_MC_INDEX_DWLEN]; +} __packed; + +#define NBL_DPA_CTRL_P_DA_ADDR (0x85c218) +#define NBL_DPA_CTRL_P_DA_DEPTH (1) +#define NBL_DPA_CTRL_P_DA_WIDTH (32) +#define NBL_DPA_CTRL_P_DA_DWLEN (1) +union dpa_ctrl_p_da_u { + struct dpa_ctrl_p_da { + u32 ctrl_da_0:16; /* [15:0] Default:0xC200 RW */ + u32 ctrl_da_1:16; /* [31:16] Default:0x0180 RW */ + } __packed info; + u32 data[NBL_DPA_CTRL_P_DA_DWLEN]; +} __packed; + +#define NBL_DPA_VLAN_INDEX_ADDR (0x85c220) +#define NBL_DPA_VLAN_INDEX_DEPTH (1) +#define NBL_DPA_VLAN_INDEX_WIDTH (32) +#define NBL_DPA_VLAN_INDEX_DWLEN (1) +union dpa_vlan_index_u { + struct dpa_vlan_index { + u32 o_vlan2_index:5; /* [4:0] Default:0x11 RW */ + u32 rsv1:3; /* [7:5] Default:0x0 RO */ + u32 o_vlan1_index:5; /* [12:8] Default:0x10 RW */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_VLAN_INDEX_DWLEN]; +} __packed; + +#define NBL_DPA_PRI_VLAN_INDEX_ADDR (0x85c224) +#define NBL_DPA_PRI_VLAN_INDEX_DEPTH (1) +#define NBL_DPA_PRI_VLAN_INDEX_WIDTH (32) +#define NBL_DPA_PRI_VLAN_INDEX_DWLEN (1) +union dpa_pri_vlan_index_u { + struct dpa_pri_vlan_index { + u32 ext_vlan2:7; /* [6:0] Default:0x30 RW */ + u32 rsv1:1; /* [7] Default:0x0 RO */ + u32 ext_vlan1:7; /* [14:8] Default:0x2E RW */ + u32 rsv:17; /* [31:15] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PRI_VLAN_INDEX_DWLEN]; +} __packed; + +#define NBL_DPA_PRI_DSCP_INDEX_ADDR (0x85c228) +#define NBL_DPA_PRI_DSCP_INDEX_DEPTH (1) +#define NBL_DPA_PRI_DSCP_INDEX_WIDTH (32) +#define NBL_DPA_PRI_DSCP_INDEX_DWLEN (1) +union dpa_pri_dscp_index_u { + struct dpa_pri_dscp_index { + u32 ext_dscp:7; /* [6:0] Default:0x32 RW */ + u32 rsv2:9; /* [15:7] Default:0x0 RO */ + u32 ipv4_flag:5; /* [20:16] Default:0x1 RW */ + u32 rsv1:3; /* [23:21] Default:0x0 RO */ + u32 ipv6_flag:5; /* [28:24] Default:0x2 RW */ + u32 rsv:3; /* [31:29] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PRI_DSCP_INDEX_DWLEN]; +} __packed; + +#define NBL_DPA_RDMA_INDEX_ADDR (0x85c22c) +#define NBL_DPA_RDMA_INDEX_DEPTH (1) +#define NBL_DPA_RDMA_INDEX_WIDTH (32) +#define NBL_DPA_RDMA_INDEX_DWLEN (1) +union dpa_rdma_index_u { + struct dpa_rdma_index { + u32 rdma_index:5; /* [4:0] Default:0xA RW */ + u32 rsv:27; /* [31:5] Default:0x00 RO */ + } __packed info; + u32 data[NBL_DPA_RDMA_INDEX_DWLEN]; +} __packed; + +#define NBL_DPA_PRI_SEL_CONF_ADDR (0x85c230) +#define NBL_DPA_PRI_SEL_CONF_DEPTH (6) +#define NBL_DPA_PRI_SEL_CONF_WIDTH (32) +#define NBL_DPA_PRI_SEL_CONF_DWLEN (1) +union dpa_pri_sel_conf_u { + struct dpa_pri_sel_conf { + u32 pri_sel:5; /* [4:0] Default:0x0 RW */ + u32 pri_default:3; /* [7:5] Default:0x0 RW */ + u32 pri_disen:1; /* [8] Default:0x1 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PRI_SEL_CONF_DWLEN]; +} __packed; +#define NBL_DPA_PRI_SEL_CONF_REG(r) (NBL_DPA_PRI_SEL_CONF_ADDR + \ + (NBL_DPA_PRI_SEL_CONF_DWLEN * 4) * (r)) + +#define NBL_DPA_ERROR_DROP_ADDR (0x85c248) +#define NBL_DPA_ERROR_DROP_DEPTH (1) +#define NBL_DPA_ERROR_DROP_WIDTH (32) +#define NBL_DPA_ERROR_DROP_DWLEN (1) +union dpa_error_drop_u { + struct dpa_error_drop { + u32 en:7; /* [6:0] Default:0x0 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_ERROR_DROP_DWLEN]; +} __packed; + +#define NBL_DPA_ERROR_CODE_ADDR (0x85c24c) +#define NBL_DPA_ERROR_CODE_DEPTH (1) +#define NBL_DPA_ERROR_CODE_WIDTH (32) +#define NBL_DPA_ERROR_CODE_DWLEN (1) +union dpa_error_code_u { + struct dpa_error_code { + u32 no:32; /* [31:0] Default:0x09123456 RW */ + } __packed info; + u32 data[NBL_DPA_ERROR_CODE_DWLEN]; +} __packed; + +#define NBL_DPA_PTYPE_SCAN_ADDR (0x85c250) +#define NBL_DPA_PTYPE_SCAN_DEPTH (1) +#define NBL_DPA_PTYPE_SCAN_WIDTH (32) +#define NBL_DPA_PTYPE_SCAN_DWLEN (1) +union dpa_ptype_scan_u { + struct dpa_ptype_scan { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PTYPE_SCAN_DWLEN]; +} __packed; + +#define NBL_DPA_PTYPE_SCAN_TH_ADDR (0x85c254) +#define NBL_DPA_PTYPE_SCAN_TH_DEPTH (1) +#define NBL_DPA_PTYPE_SCAN_TH_WIDTH (32) +#define NBL_DPA_PTYPE_SCAN_TH_DWLEN (1) +union dpa_ptype_scan_th_u { + struct dpa_ptype_scan_th { + u32 th:32; /* [31:00] Default:0x40 RW */ + } __packed info; + u32 data[NBL_DPA_PTYPE_SCAN_TH_DWLEN]; +} __packed; + +#define NBL_DPA_PTYPE_SCAN_MASK_ADDR (0x85c258) +#define NBL_DPA_PTYPE_SCAN_MASK_DEPTH (1) +#define NBL_DPA_PTYPE_SCAN_MASK_WIDTH (32) +#define NBL_DPA_PTYPE_SCAN_MASK_DWLEN (1) +union dpa_ptype_scan_mask_u { + struct dpa_ptype_scan_mask { + u32 addr:8; /* [7:0] Default:0x0 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PTYPE_SCAN_MASK_DWLEN]; +} __packed; + +#define NBL_DPA_PTYPE_INSERT_SEARCH_ADDR (0x85c25c) +#define NBL_DPA_PTYPE_INSERT_SEARCH_DEPTH (1) +#define NBL_DPA_PTYPE_INSERT_SEARCH_WIDTH (32) +#define NBL_DPA_PTYPE_INSERT_SEARCH_DWLEN (1) +union dpa_ptype_insert_search_u { + struct dpa_ptype_insert_search { + u32 ctrl:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PTYPE_INSERT_SEARCH_DWLEN]; +} __packed; + +#define NBL_DPA_PTYPE_INSERT_SEARCH_0_ADDR (0x85c260) +#define NBL_DPA_PTYPE_INSERT_SEARCH_0_DEPTH (1) +#define NBL_DPA_PTYPE_INSERT_SEARCH_0_WIDTH (32) +#define NBL_DPA_PTYPE_INSERT_SEARCH_0_DWLEN (1) +union dpa_ptype_insert_search_0_u { + struct dpa_ptype_insert_search_0 { + u32 key0:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPA_PTYPE_INSERT_SEARCH_0_DWLEN]; +} __packed; + +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_ADDR (0x85c268) +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_DEPTH (1) +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_WIDTH (32) +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_DWLEN (1) +union dpa_ptype_insert_search_result_u { + struct dpa_ptype_insert_search_result { + u32 result:8; /* [7:0] Default:0x0 RO */ + u32 hit:1; /* [8] Default:0x0 RO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_DWLEN]; +} __packed; + +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_ACK_ADDR (0x85c270) +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_ACK_DEPTH (1) +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_ACK_WIDTH (32) +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_ACK_DWLEN (1) +union dpa_ptype_insert_search_result_ack_u { + struct dpa_ptype_insert_search_result_ack { + u32 vld:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_ACK_DWLEN]; +} __packed; + +#define NBL_DPA_CFG_TEST_ADDR (0x85c80c) +#define NBL_DPA_CFG_TEST_DEPTH (1) +#define NBL_DPA_CFG_TEST_WIDTH (32) +#define NBL_DPA_CFG_TEST_DWLEN (1) +union dpa_cfg_test_u { + struct dpa_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPA_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_DPA_BP_STATE_ADDR (0x85cb00) +#define NBL_DPA_BP_STATE_DEPTH (1) +#define NBL_DPA_BP_STATE_WIDTH (32) +#define NBL_DPA_BP_STATE_DWLEN (1) +union dpa_bp_state_u { + struct dpa_bp_state { + u32 pa_rmux_data_bp:1; /* [0] Default:0x0 RO */ + u32 pa_rmux_info_bp:1; /* [1] Default:0x0 RO */ + u32 store_pa_data_bp:1; /* [2] Default:0x0 RO */ + u32 store_pa_info_bp:1; /* [3] Default:0x0 RO */ + u32 rx_data_fifo_afull:1; /* [4] Default:0x0 RO */ + u32 rx_info_fifo_afull:1; /* [5] Default:0x0 RO */ + u32 rx_ctrl_fifo_afull:1; /* [6] Default:0x0 RO */ + u32 cinf1_fifo_afull:1; /* [7] Default:0x0 RO */ + u32 ctrl_cinf1_fifo_afull:1; /* [8] Default:0x0 RO */ + u32 layo_info_fifo_afull:1; /* [9] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_BP_STATE_DWLEN]; +} __packed; + +#define NBL_DPA_BP_HISTORY_ADDR (0x85cb04) +#define NBL_DPA_BP_HISTORY_DEPTH (1) +#define NBL_DPA_BP_HISTORY_WIDTH (32) +#define NBL_DPA_BP_HISTORY_DWLEN (1) +union dpa_bp_history_u { + struct dpa_bp_history { + u32 pa_rmux_data_bp:1; /* [0] Default:0x0 RC */ + u32 pa_rmux_info_bp:1; /* [1] Default:0x0 RC */ + u32 store_pa_data_bp:1; /* [2] Default:0x0 RC */ + u32 store_pa_info_bp:1; /* [3] Default:0x0 RC */ + u32 rx_data_fifo_afull:1; /* [4] Default:0x0 RC */ + u32 rx_info_fifo_afull:1; /* [5] Default:0x0 RC */ + u32 rx_ctrl_fifo_afull:1; /* [6] Default:0x0 RC */ + u32 cinf1_fifo_afull:1; /* [7] Default:0x0 RC */ + u32 ctrl_cinf1_fifo_afull:1; /* [8] Default:0x0 RC */ + u32 layo_info_fifo_afull:1; /* [9] Default:0x0 RC */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_DPA_PRI_CONF_TABLE_ADDR (0x85e000) +#define NBL_DPA_PRI_CONF_TABLE_DEPTH (48) +#define NBL_DPA_PRI_CONF_TABLE_WIDTH (32) +#define NBL_DPA_PRI_CONF_TABLE_DWLEN (1) +union dpa_pri_conf_table_u { + struct dpa_pri_conf_table { + u32 pri0:4; /* [3:0] Default:0x0 RW */ + u32 pri1:4; /* [7:4] Default:0x0 RW */ + u32 pri2:4; /* [11:8] Default:0x0 RW */ + u32 pri3:4; /* [15:12] Default:0x0 RW */ + u32 pri4:4; /* [19:16] Default:0x0 RW */ + u32 pri5:4; /* [23:20] Default:0x0 RW */ + u32 pri6:4; /* [27:24] Default:0x0 RW */ + u32 pri7:4; /* [31:28] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPA_PRI_CONF_TABLE_DWLEN]; +} __packed; +#define NBL_DPA_PRI_CONF_TABLE_REG(r) (NBL_DPA_PRI_CONF_TABLE_ADDR + \ + (NBL_DPA_PRI_CONF_TABLE_DWLEN * 4) * (r)) + +#define NBL_DPA_KEY_TCAM_ADDR (0x85f000) +#define NBL_DPA_KEY_TCAM_DEPTH (128) +#define NBL_DPA_KEY_TCAM_WIDTH (64) +#define NBL_DPA_KEY_TCAM_DWLEN (2) +union dpa_key_tcam_u { + struct dpa_key_tcam { + u32 key_b:16; /* [15:0] Default:0x0 RW */ + u32 key_a:16; /* [31:16] Default:0x0 RW */ + u32 key_valid:1; /* [32] Default:0x0 RW */ + u32 rsv:31; /* [63:33] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_KEY_TCAM_DWLEN]; +} __packed; +#define NBL_DPA_KEY_TCAM_REG(r) (NBL_DPA_KEY_TCAM_ADDR + \ + (NBL_DPA_KEY_TCAM_DWLEN * 4) * (r)) + +#define NBL_DPA_MASK_TCAM_ADDR (0x85f800) +#define NBL_DPA_MASK_TCAM_DEPTH (128) +#define NBL_DPA_MASK_TCAM_WIDTH (32) +#define NBL_DPA_MASK_TCAM_DWLEN (1) +union dpa_mask_tcam_u { + struct dpa_mask_tcam { + u32 mask_b:16; /* [15:0] Default:0x0 RW */ + u32 mask_a:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPA_MASK_TCAM_DWLEN]; +} __packed; +#define NBL_DPA_MASK_TCAM_REG(r) (NBL_DPA_MASK_TCAM_ADDR + \ + (NBL_DPA_MASK_TCAM_DWLEN * 4) * (r)) + +#define NBL_DPA_ACT_TABLE_ADDR (0x860000) +#define NBL_DPA_ACT_TABLE_DEPTH (128) +#define NBL_DPA_ACT_TABLE_WIDTH (128) +#define NBL_DPA_ACT_TABLE_DWLEN (4) +union dpa_act_table_u { + struct dpa_act_table { + u32 flag_control_0:8; /* [7:0] Default:0x0 RW */ + u32 flag_control_1:8; /* [15:8] Default:0x0 RW */ + u32 flag_control_2:8; /* [23:16] Default:0x0 RW */ + u32 legality_check:8; /* [31:24] Default:0x0 RW */ + u32 nxt_off_B:8; /* [39:32] Default:0x0 RW */ + u32 nxt_off_A:8; /* [47:40] Default:0x0 RW */ + u32 protocol_header_off:8; /* [55:48] Default:0x0 RW */ + u32 payload_length:8; /* [63:56] Default:0x0 RW */ + u32 mask:8; /* [71:64] Default:0x0 RW */ + u32 nxt_stg:4; /* [75:72] Default:0x0 RW */ + u32 rsv_l:32; /* [127:76] Default:0x0 RO */ + u32 rsv_h:20; /* [127:76] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_ACT_TABLE_DWLEN]; +} __packed; +#define NBL_DPA_ACT_TABLE_REG(r) (NBL_DPA_ACT_TABLE_ADDR + \ + (NBL_DPA_ACT_TABLE_DWLEN * 4) * (r)) + +#define NBL_DPA_EXT_CONF_TABLE_ADDR (0x861000) +#define NBL_DPA_EXT_CONF_TABLE_DEPTH (512) +#define NBL_DPA_EXT_CONF_TABLE_WIDTH (32) +#define NBL_DPA_EXT_CONF_TABLE_DWLEN (1) +union dpa_ext_conf_table_u { + struct dpa_ext_conf_table { + u32 dst_offset:8; /* [7:0] Default:0x0 RW */ + u32 source_offset:6; /* [13:8] Default:0x0 RW */ + u32 mode_start_off:2; /* [15:14] Default:0x0 RW */ + u32 lx_sel:2; /* [17:16] Default:0x0 RW */ + u32 mode_sel:1; /* [18] Default:0x0 RW */ + u32 op_en:1; /* [19] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_EXT_CONF_TABLE_DWLEN]; +} __packed; +#define NBL_DPA_EXT_CONF_TABLE_REG(r) (NBL_DPA_EXT_CONF_TABLE_ADDR + \ + (NBL_DPA_EXT_CONF_TABLE_DWLEN * 4) * (r)) + +#define NBL_DPA_EXT_INDEX_TCAM_ADDR (0x862000) +#define NBL_DPA_EXT_INDEX_TCAM_DEPTH (32) +#define NBL_DPA_EXT_INDEX_TCAM_WIDTH (64) +#define NBL_DPA_EXT_INDEX_TCAM_DWLEN (2) +union dpa_ext_index_tcam_u { + struct dpa_ext_index_tcam { + u32 type_index:32; /* [31:0] Default:0x0 RW */ + u32 type_valid:1; /* [32] Default:0x0 RW */ + u32 rsv:31; /* [63:33] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_EXT_INDEX_TCAM_DWLEN]; +} __packed; +#define NBL_DPA_EXT_INDEX_TCAM_REG(r) (NBL_DPA_EXT_INDEX_TCAM_ADDR + \ + (NBL_DPA_EXT_INDEX_TCAM_DWLEN * 4) * (r)) + +#define NBL_DPA_EXT_INDEX_TCAM_MASK_ADDR (0x862200) +#define NBL_DPA_EXT_INDEX_TCAM_MASK_DEPTH (32) +#define NBL_DPA_EXT_INDEX_TCAM_MASK_WIDTH (32) +#define NBL_DPA_EXT_INDEX_TCAM_MASK_DWLEN (1) +union dpa_ext_index_tcam_mask_u { + struct dpa_ext_index_tcam_mask { + u32 mask:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPA_EXT_INDEX_TCAM_MASK_DWLEN]; +} __packed; +#define NBL_DPA_EXT_INDEX_TCAM_MASK_REG(r) (NBL_DPA_EXT_INDEX_TCAM_MASK_ADDR + \ + (NBL_DPA_EXT_INDEX_TCAM_MASK_DWLEN * 4) * (r)) + +#define NBL_DPA_EXT_INDEX_TABLE_ADDR (0x862300) +#define NBL_DPA_EXT_INDEX_TABLE_DEPTH (32) +#define NBL_DPA_EXT_INDEX_TABLE_WIDTH (32) +#define NBL_DPA_EXT_INDEX_TABLE_DWLEN (1) +union dpa_ext_index_table_u { + struct dpa_ext_index_table { + u32 p_index:3; /* [2:0] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_EXT_INDEX_TABLE_DWLEN]; +} __packed; +#define NBL_DPA_EXT_INDEX_TABLE_REG(r) (NBL_DPA_EXT_INDEX_TABLE_ADDR + \ + (NBL_DPA_EXT_INDEX_TABLE_DWLEN * 4) * (r)) + +#define NBL_DPA_TYPE_INDEX_TCAM_ADDR (0x864000) +#define NBL_DPA_TYPE_INDEX_TCAM_DEPTH (256) +#define NBL_DPA_TYPE_INDEX_TCAM_WIDTH (128) +#define NBL_DPA_TYPE_INDEX_TCAM_DWLEN (4) +union dpa_type_index_tcam_u { + struct dpa_type_index_tcam { + u32 layo_x:32; /* [31:0] Default:0xFFFFFFFF RW */ + u32 layo_y:32; /* [63:32] Default:0xFFFFFFFF RW */ + u32 type_valid:1; /* [64] Default:0x0 RW */ + u32 rsv_l:32; /* [127:65] Default:0x0 RO */ + u32 rsv_h:31; /* [127:65] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_TYPE_INDEX_TCAM_DWLEN]; +} __packed; +#define NBL_DPA_TYPE_INDEX_TCAM_REG(r) (NBL_DPA_TYPE_INDEX_TCAM_ADDR + \ + (NBL_DPA_TYPE_INDEX_TCAM_DWLEN * 4) * (r)) + +#define NBL_DPA_PACKET_TYPE_TABLE_ADDR (0x866000) +#define NBL_DPA_PACKET_TYPE_TABLE_DEPTH (256) +#define NBL_DPA_PACKET_TYPE_TABLE_WIDTH (32) +#define NBL_DPA_PACKET_TYPE_TABLE_DWLEN (1) +union dpa_packet_type_table_u { + struct dpa_packet_type_table { + u32 p_type:8; /* [7:0] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PACKET_TYPE_TABLE_DWLEN]; +} __packed; +#define NBL_DPA_PACKET_TYPE_TABLE_REG(r) (NBL_DPA_PACKET_TYPE_TABLE_ADDR + \ + (NBL_DPA_PACKET_TYPE_TABLE_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dped.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dped.h new file mode 100644 index 0000000000000000000000000000000000000000..313fe6c4bb6b3b34e23a831dc60b3046d757a4f4 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dped.h @@ -0,0 +1,2152 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + // Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DPED_H +#define NBL_DPED_H 1 + +#include + +#define NBL_DPED_BASE (0x0075C000) + +#define NBL_DPED_INT_STATUS_ADDR (0x75c000) +#define NBL_DPED_INT_STATUS_DEPTH (1) +#define NBL_DPED_INT_STATUS_WIDTH (32) +#define NBL_DPED_INT_STATUS_DWLEN (1) +union dped_int_status_u { + struct dped_int_status { + u32 pkt_length_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RWC */ + u32 fsm_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 input_err:1; /* [5] Default:0x0 RWC */ + u32 cfg_err:1; /* [6] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [7] Default:0x0 RWC */ + u32 inmeta_ucor_err:1; /* [8] Default:0x0 RWC */ + u32 meta_ucor_err:1; /* [9] Default:0x0 RWC */ + u32 meta_cor_ecc_err:1; /* [10] Default:0x0 RWC */ + u32 fwd_atid_nomat_err:1; /* [11] Default:0x0 RWC */ + u32 meta_value_err:1; /* [12] Default:0x0 RWC */ + u32 edit_atnum_err:1; /* [13] Default:0x0 RWC */ + u32 header_oft_ovf:1; /* [14] Default:0x0 RWC */ + u32 edit_pos_err:1; /* [15] Default:0x0 RWC */ + u32 da_oft_len_ovf:1; /* [16] Default:0x0 RWC */ + u32 lxoffset_ovf:1; /* [17] Default:0x0 RWC */ + u32 add_head_ovf:1; /* [18] Default:0x0 RWC */ + u32 rsv:13; /* [31:19] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DPED_INT_MASK_ADDR (0x75c004) +#define NBL_DPED_INT_MASK_DEPTH (1) +#define NBL_DPED_INT_MASK_WIDTH (32) +#define NBL_DPED_INT_MASK_DWLEN (1) +union dped_int_mask_u { + struct dped_int_mask { + u32 pkt_length_err:1; /* [0] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RW */ + u32 fsm_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 input_err:1; /* [5] Default:0x0 RW */ + u32 cfg_err:1; /* [6] Default:0x0 RW */ + u32 data_ucor_err:1; /* [7] Default:0x0 RW */ + u32 inmeta_ucor_err:1; /* [8] Default:0x0 RW */ + u32 meta_ucor_err:1; /* [9] Default:0x0 RW */ + u32 meta_cor_ecc_err:1; /* [10] Default:0x0 RW */ + u32 fwd_atid_nomat_err:1; /* [11] Default:0x1 RW */ + u32 meta_value_err:1; /* [12] Default:0x0 RW */ + u32 edit_atnum_err:1; /* [13] Default:0x0 RW */ + u32 header_oft_ovf:1; /* [14] Default:0x0 RW */ + u32 edit_pos_err:1; /* [15] Default:0x0 RW */ + u32 da_oft_len_ovf:1; /* [16] Default:0x0 RW */ + u32 lxoffset_ovf:1; /* [17] Default:0x0 RW */ + u32 add_head_ovf:1; /* [18] Default:0x0 RW */ + u32 rsv:13; /* [31:19] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DPED_INT_SET_ADDR (0x75c008) +#define NBL_DPED_INT_SET_DEPTH (1) +#define NBL_DPED_INT_SET_WIDTH (32) +#define NBL_DPED_INT_SET_DWLEN (1) +union dped_int_set_u { + struct dped_int_set { + u32 pkt_length_err:1; /* [0] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 WO */ + u32 fsm_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 input_err:1; /* [5] Default:0x0 WO */ + u32 cfg_err:1; /* [6] Default:0x0 WO */ + u32 data_ucor_err:1; /* [7] Default:0x0 WO */ + u32 inmeta_ucor_err:1; /* [8] Default:0x0 WO */ + u32 meta_ucor_err:1; /* [9] Default:0x0 WO */ + u32 meta_cor_ecc_err:1; /* [10] Default:0x0 WO */ + u32 fwd_atid_nomat_err:1; /* [11] Default:0x0 WO */ + u32 meta_value_err:1; /* [12] Default:0x0 WO */ + u32 edit_atnum_err:1; /* [13] Default:0x0 WO */ + u32 header_oft_ovf:1; /* [14] Default:0x0 WO */ + u32 edit_pos_err:1; /* [15] Default:0x0 WO */ + u32 da_oft_len_ovf:1; /* [16] Default:0x0 WO */ + u32 lxoffset_ovf:1; /* [17] Default:0x0 WO */ + u32 add_head_ovf:1; /* [18] Default:0x0 WO */ + u32 rsv:13; /* [31:19] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_INT_SET_DWLEN]; +} __packed; + +#define NBL_DPED_INIT_DONE_ADDR (0x75c00c) +#define NBL_DPED_INIT_DONE_DEPTH (1) +#define NBL_DPED_INIT_DONE_WIDTH (32) +#define NBL_DPED_INIT_DONE_DWLEN (1) +union dped_init_done_u { + struct dped_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_DPED_PKT_LENGTH_ERR_INFO_ADDR (0x75c020) +#define NBL_DPED_PKT_LENGTH_ERR_INFO_DEPTH (1) +#define NBL_DPED_PKT_LENGTH_ERR_INFO_WIDTH (32) +#define NBL_DPED_PKT_LENGTH_ERR_INFO_DWLEN (1) +union dped_pkt_length_err_info_u { + struct dped_pkt_length_err_info { + u32 ptr_eop:1; /* [0] Default:0x0 RC */ + u32 pkt_eop:1; /* [1] Default:0x0 RC */ + u32 pkt_mod:1; /* [2] Default:0x0 RC */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_PKT_LENGTH_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_CIF_ERR_INFO_ADDR (0x75c040) +#define NBL_DPED_CIF_ERR_INFO_DEPTH (1) +#define NBL_DPED_CIF_ERR_INFO_WIDTH (32) +#define NBL_DPED_CIF_ERR_INFO_DWLEN (1) +union dped_cif_err_info_u { + struct dped_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_INPUT_ERR_INFO_ADDR (0x75c048) +#define NBL_DPED_INPUT_ERR_INFO_DEPTH (1) +#define NBL_DPED_INPUT_ERR_INFO_WIDTH (32) +#define NBL_DPED_INPUT_ERR_INFO_DWLEN (1) +union dped_input_err_info_u { + struct dped_input_err_info { + u32 eoc_miss:1; /* [0] Default:0x0 RC */ + u32 soc_miss:1; /* [1] Default:0x0 RC */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_INPUT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_CFG_ERR_INFO_ADDR (0x75c050) +#define NBL_DPED_CFG_ERR_INFO_DEPTH (1) +#define NBL_DPED_CFG_ERR_INFO_WIDTH (32) +#define NBL_DPED_CFG_ERR_INFO_DWLEN (1) +union dped_cfg_err_info_u { + struct dped_cfg_err_info { + u32 length:1; /* [0] Default:0x0 RC */ + u32 rd_conflict:1; /* [1] Default:0x0 RC */ + u32 rd_addr:8; /* [9:2] Default:0x0 RC */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_FWD_ATID_NOMAT_ERR_INFO_ADDR (0x75c06c) +#define NBL_DPED_FWD_ATID_NOMAT_ERR_INFO_DEPTH (1) +#define NBL_DPED_FWD_ATID_NOMAT_ERR_INFO_WIDTH (32) +#define NBL_DPED_FWD_ATID_NOMAT_ERR_INFO_DWLEN (1) +union dped_fwd_atid_nomat_err_info_u { + struct dped_fwd_atid_nomat_err_info { + u32 dport:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_FWD_ATID_NOMAT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_META_VALUE_ERR_INFO_ADDR (0x75c070) +#define NBL_DPED_META_VALUE_ERR_INFO_DEPTH (1) +#define NBL_DPED_META_VALUE_ERR_INFO_WIDTH (32) +#define NBL_DPED_META_VALUE_ERR_INFO_DWLEN (1) +union dped_meta_value_err_info_u { + struct dped_meta_value_err_info { + u32 sport:1; /* [0] Default:0x0 RC */ + u32 dport:1; /* [1] Default:0x0 RC */ + u32 dscp_ecn:1; /* [2] Default:0x0 RC */ + u32 tnl:1; /* [3] Default:0x0 RC */ + u32 vni:1; /* [4] Default:0x0 RC */ + u32 vni_one:1; /* [5] Default:0x0 RC */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_META_VALUE_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_EDIT_ATNUM_ERR_INFO_ADDR (0x75c078) +#define NBL_DPED_EDIT_ATNUM_ERR_INFO_DEPTH (1) +#define NBL_DPED_EDIT_ATNUM_ERR_INFO_WIDTH (32) +#define NBL_DPED_EDIT_ATNUM_ERR_INFO_DWLEN (1) +union dped_edit_atnum_err_info_u { + struct dped_edit_atnum_err_info { + u32 replace:1; /* [0] Default:0x0 RC */ + u32 del_add:1; /* [1] Default:0x0 RC */ + u32 ttl:1; /* [2] Default:0x0 RC */ + u32 dscp:1; /* [3] Default:0x0 RC */ + u32 tnl:1; /* [4] Default:0x0 RC */ + u32 sport:1; /* [5] Default:0x0 RC */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_EDIT_ATNUM_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_HEADER_OFT_OVF_ADDR (0x75c080) +#define NBL_DPED_HEADER_OFT_OVF_DEPTH (1) +#define NBL_DPED_HEADER_OFT_OVF_WIDTH (32) +#define NBL_DPED_HEADER_OFT_OVF_DWLEN (1) +union dped_header_oft_ovf_u { + struct dped_header_oft_ovf { + u32 replace:1; /* [0] Default:0x0 RC */ + u32 rsv2:7; /* [7:1] Default:0x0 RO */ + u32 add_del:6; /* [13:8] Default:0x0 RC */ + u32 dscp_ecn:1; /* [14] Default:0x0 RC */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 ttl:1; /* [16] Default:0x0 RC */ + u32 sctp:1; /* [17] Default:0x0 RC */ + u32 dscp:1; /* [18] Default:0x0 RC */ + u32 pri:1; /* [19] Default:0x0 RC */ + u32 len0:1; /* [20] Default:0x0 RC */ + u32 len1:1; /* [21] Default:0x0 RC */ + u32 ck0:1; /* [22] Default:0x0 RC */ + u32 ck1:1; /* [23] Default:0x0 RC */ + u32 ck_start0_0:1; /* [24] Default:0x0 RC */ + u32 ck_start0_1:1; /* [25] Default:0x0 RC */ + u32 ck_start1_0:1; /* [26] Default:0x0 RC */ + u32 ck_start1_1:1; /* [27] Default:0x0 RC */ + u32 head:1; /* [28] Default:0x0 RC */ + u32 ck_len0:1; /* [29] Default:0x0 RC */ + u32 ck_len1:1; /* [30] Default:0x0 RC */ + u32 rsv:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HEADER_OFT_OVF_DWLEN]; +} __packed; + +#define NBL_DPED_EDIT_POS_ERR_ADDR (0x75c088) +#define NBL_DPED_EDIT_POS_ERR_DEPTH (1) +#define NBL_DPED_EDIT_POS_ERR_WIDTH (32) +#define NBL_DPED_EDIT_POS_ERR_DWLEN (1) +union dped_edit_pos_err_u { + struct dped_edit_pos_err { + u32 replace:1; /* [0] Default:0x0 RC */ + u32 cross_level:6; /* [6:1] Default:0x0 RC */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 add_del:6; /* [13:8] Default:0x0 RC */ + u32 dscp_ecn:1; /* [14] Default:0x0 RC */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 ttl:1; /* [16] Default:0x0 RC */ + u32 sctp:1; /* [17] Default:0x0 RC */ + u32 dscp:1; /* [18] Default:0x0 RC */ + u32 pri:1; /* [19] Default:0x0 RC */ + u32 len0:1; /* [20] Default:0x0 RC */ + u32 len1:1; /* [21] Default:0x0 RC */ + u32 ck0:1; /* [22] Default:0x0 RC */ + u32 ck1:1; /* [23] Default:0x0 RC */ + u32 ck_start0_0:1; /* [24] Default:0x0 RC */ + u32 ck_start0_1:1; /* [25] Default:0x0 RC */ + u32 ck_start1_0:1; /* [26] Default:0x0 RC */ + u32 ck_start1_1:1; /* [27] Default:0x0 RC */ + u32 ck_len0:1; /* [28] Default:0x0 RC */ + u32 ck_len1:1; /* [29] Default:0x0 RC */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_EDIT_POS_ERR_DWLEN]; +} __packed; + +#define NBL_DPED_DA_OFT_LEN_OVF_ADDR (0x75c090) +#define NBL_DPED_DA_OFT_LEN_OVF_DEPTH (1) +#define NBL_DPED_DA_OFT_LEN_OVF_WIDTH (32) +#define NBL_DPED_DA_OFT_LEN_OVF_DWLEN (1) +union dped_da_oft_len_ovf_u { + struct dped_da_oft_len_ovf { + u32 at0:5; /* [4:0] Default:0x0 RC */ + u32 at1:5; /* [9:5] Default:0x0 RC */ + u32 at2:5; /* [14:10] Default:0x0 RC */ + u32 at3:5; /* [19:15] Default:0x0 RC */ + u32 at4:5; /* [24:20] Default:0x0 RC */ + u32 at5:5; /* [29:25] Default:0x0 RC */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_DA_OFT_LEN_OVF_DWLEN]; +} __packed; + +#define NBL_DPED_LXOFFSET_OVF_ADDR (0x75c098) +#define NBL_DPED_LXOFFSET_OVF_DEPTH (1) +#define NBL_DPED_LXOFFSET_OVF_WIDTH (32) +#define NBL_DPED_LXOFFSET_OVF_DWLEN (1) +union dped_lxoffset_ovf_u { + struct dped_lxoffset_ovf { + u32 l2:1; /* [0] Default:0x0 RC */ + u32 l3:1; /* [1] Default:0x0 RC */ + u32 l4:1; /* [2] Default:0x0 RC */ + u32 pld:1; /* [3] Default:0x0 RC */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_LXOFFSET_OVF_DWLEN]; +} __packed; + +#define NBL_DPED_ADD_HEAD_OVF_ADDR (0x75c0a0) +#define NBL_DPED_ADD_HEAD_OVF_DEPTH (1) +#define NBL_DPED_ADD_HEAD_OVF_WIDTH (32) +#define NBL_DPED_ADD_HEAD_OVF_DWLEN (1) +union dped_add_head_ovf_u { + struct dped_add_head_ovf { + u32 tnl_l2:1; /* [0] Default:0x0 RC */ + u32 tnl_pkt:1; /* [1] Default:0x0 RC */ + u32 rsv1:14; /* [15:2] Default:0x0 RO */ + u32 mir_l2:1; /* [16] Default:0x0 RC */ + u32 mir_pkt:1; /* [17] Default:0x0 RC */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_ADD_HEAD_OVF_DWLEN]; +} __packed; + +#define NBL_DPED_CAR_CTRL_ADDR (0x75c100) +#define NBL_DPED_CAR_CTRL_DEPTH (1) +#define NBL_DPED_CAR_CTRL_WIDTH (32) +#define NBL_DPED_CAR_CTRL_DWLEN (1) +union dped_car_ctrl_u { + struct dped_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DPED_INIT_START_ADDR (0x75c10c) +#define NBL_DPED_INIT_START_DEPTH (1) +#define NBL_DPED_INIT_START_WIDTH (32) +#define NBL_DPED_INIT_START_DWLEN (1) +union dped_init_start_u { + struct dped_init_start { + u32 start:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_INIT_START_DWLEN]; +} __packed; + +#define NBL_DPED_TIMEOUT_CFG_ADDR (0x75c110) +#define NBL_DPED_TIMEOUT_CFG_DEPTH (1) +#define NBL_DPED_TIMEOUT_CFG_WIDTH (32) +#define NBL_DPED_TIMEOUT_CFG_DWLEN (1) +union dped_timeout_cfg_u { + struct dped_timeout_cfg { + u32 fsm_max_num:16; /* [15:00] Default:0xfff RW */ + u32 tab:8; /* [23:16] Default:0x40 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TIMEOUT_CFG_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_MAX_LENGTH_ADDR (0x75c154) +#define NBL_DPED_TNL_MAX_LENGTH_DEPTH (1) +#define NBL_DPED_TNL_MAX_LENGTH_WIDTH (32) +#define NBL_DPED_TNL_MAX_LENGTH_DWLEN (1) +union dped_tnl_max_length_u { + struct dped_tnl_max_length { + u32 th:7; /* [6:0] Default:0x5A RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TNL_MAX_LENGTH_DWLEN]; +} __packed; + +#define NBL_DPED_PKT_DROP_EN_ADDR (0x75c170) +#define NBL_DPED_PKT_DROP_EN_DEPTH (1) +#define NBL_DPED_PKT_DROP_EN_WIDTH (32) +#define NBL_DPED_PKT_DROP_EN_DWLEN (1) +union dped_pkt_drop_en_u { + struct dped_pkt_drop_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_PKT_DROP_EN_DWLEN]; +} __packed; + +#define NBL_DPED_PKT_HERR_DROP_EN_ADDR (0x75c174) +#define NBL_DPED_PKT_HERR_DROP_EN_DEPTH (1) +#define NBL_DPED_PKT_HERR_DROP_EN_WIDTH (32) +#define NBL_DPED_PKT_HERR_DROP_EN_DWLEN (1) +union dped_pkt_herr_drop_en_u { + struct dped_pkt_herr_drop_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_PKT_HERR_DROP_EN_DWLEN]; +} __packed; + +#define NBL_DPED_PKT_PARITY_DROP_EN_ADDR (0x75c178) +#define NBL_DPED_PKT_PARITY_DROP_EN_DEPTH (1) +#define NBL_DPED_PKT_PARITY_DROP_EN_WIDTH (32) +#define NBL_DPED_PKT_PARITY_DROP_EN_DWLEN (1) +union dped_pkt_parity_drop_en_u { + struct dped_pkt_parity_drop_en { + u32 en0:1; /* [0] Default:0x1 RW */ + u32 en1:1; /* [1] Default:0x1 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_PKT_PARITY_DROP_EN_DWLEN]; +} __packed; + +#define NBL_DPED_TTL_DROP_EN_ADDR (0x75c17c) +#define NBL_DPED_TTL_DROP_EN_DEPTH (1) +#define NBL_DPED_TTL_DROP_EN_WIDTH (32) +#define NBL_DPED_TTL_DROP_EN_DWLEN (1) +union dped_ttl_drop_en_u { + struct dped_ttl_drop_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TTL_DROP_EN_DWLEN]; +} __packed; + +#define NBL_DPED_TTL_ERROR_CODE_ADDR (0x75c188) +#define NBL_DPED_TTL_ERROR_CODE_DEPTH (1) +#define NBL_DPED_TTL_ERROR_CODE_WIDTH (32) +#define NBL_DPED_TTL_ERROR_CODE_DWLEN (1) +union dped_ttl_error_code_u { + struct dped_ttl_error_code { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv1:7; /* [7:1] Default:0x0 RO */ + u32 id:4; /* [11:8] Default:0x6 RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TTL_ERROR_CODE_DWLEN]; +} __packed; + +#define NBL_DPED_HIGH_PRI_PKT_EN_ADDR (0x75c190) +#define NBL_DPED_HIGH_PRI_PKT_EN_DEPTH (1) +#define NBL_DPED_HIGH_PRI_PKT_EN_WIDTH (32) +#define NBL_DPED_HIGH_PRI_PKT_EN_DWLEN (1) +union dped_high_pri_pkt_en_u { + struct dped_high_pri_pkt_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HIGH_PRI_PKT_EN_DWLEN]; +} __packed; + +#define NBL_DPED_PADDING_CFG_ADDR (0x75c194) +#define NBL_DPED_PADDING_CFG_DEPTH (1) +#define NBL_DPED_PADDING_CFG_WIDTH (32) +#define NBL_DPED_PADDING_CFG_DWLEN (1) +union dped_padding_cfg_u { + struct dped_padding_cfg { + u32 th:6; /* [5:0] Default:0x3B RW */ + u32 rsv1:2; /* [7:6] Default:0x0 RO */ + u32 mode:2; /* [9:8] Default:0x0 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_PADDING_CFG_DWLEN]; +} __packed; + +#define NBL_DPED_HW_EDIT_FLAG_SEL0_ADDR (0x75c204) +#define NBL_DPED_HW_EDIT_FLAG_SEL0_DEPTH (1) +#define NBL_DPED_HW_EDIT_FLAG_SEL0_WIDTH (32) +#define NBL_DPED_HW_EDIT_FLAG_SEL0_DWLEN (1) +union dped_hw_edit_flag_sel0_u { + struct dped_hw_edit_flag_sel0 { + u32 oft:5; /* [4:0] Default:0x1 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HW_EDIT_FLAG_SEL0_DWLEN]; +} __packed; + +#define NBL_DPED_HW_EDIT_FLAG_SEL1_ADDR (0x75c208) +#define NBL_DPED_HW_EDIT_FLAG_SEL1_DEPTH (1) +#define NBL_DPED_HW_EDIT_FLAG_SEL1_WIDTH (32) +#define NBL_DPED_HW_EDIT_FLAG_SEL1_DWLEN (1) +union dped_hw_edit_flag_sel1_u { + struct dped_hw_edit_flag_sel1 { + u32 oft:5; /* [4:0] Default:0x2 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HW_EDIT_FLAG_SEL1_DWLEN]; +} __packed; + +#define NBL_DPED_HW_EDIT_FLAG_SEL2_ADDR (0x75c20c) +#define NBL_DPED_HW_EDIT_FLAG_SEL2_DEPTH (1) +#define NBL_DPED_HW_EDIT_FLAG_SEL2_WIDTH (32) +#define NBL_DPED_HW_EDIT_FLAG_SEL2_DWLEN (1) +union dped_hw_edit_flag_sel2_u { + struct dped_hw_edit_flag_sel2 { + u32 oft:5; /* [4:0] Default:0x3 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HW_EDIT_FLAG_SEL2_DWLEN]; +} __packed; + +#define NBL_DPED_HW_EDIT_FLAG_SEL3_ADDR (0x75c210) +#define NBL_DPED_HW_EDIT_FLAG_SEL3_DEPTH (1) +#define NBL_DPED_HW_EDIT_FLAG_SEL3_WIDTH (32) +#define NBL_DPED_HW_EDIT_FLAG_SEL3_DWLEN (1) +union dped_hw_edit_flag_sel3_u { + struct dped_hw_edit_flag_sel3 { + u32 oft:5; /* [4:0] Default:0x4 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HW_EDIT_FLAG_SEL3_DWLEN]; +} __packed; + +#define NBL_DPED_HW_EDIT_FLAG_SEL4_ADDR (0x75c214) +#define NBL_DPED_HW_EDIT_FLAG_SEL4_DEPTH (1) +#define NBL_DPED_HW_EDIT_FLAG_SEL4_WIDTH (32) +#define NBL_DPED_HW_EDIT_FLAG_SEL4_DWLEN (1) +union dped_hw_edit_flag_sel4_u { + struct dped_hw_edit_flag_sel4 { + u32 oft:5; /* [4:0] Default:0xe RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HW_EDIT_FLAG_SEL4_DWLEN]; +} __packed; + +#define NBL_DPED_RDMA_FLAG_ADDR (0x75c22c) +#define NBL_DPED_RDMA_FLAG_DEPTH (1) +#define NBL_DPED_RDMA_FLAG_WIDTH (32) +#define NBL_DPED_RDMA_FLAG_DWLEN (1) +union dped_rdma_flag_u { + struct dped_rdma_flag { + u32 oft:5; /* [4:0] Default:0xa RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_RDMA_FLAG_DWLEN]; +} __packed; + +#define NBL_DPED_FWD_DPORT_ADDR (0x75c230) +#define NBL_DPED_FWD_DPORT_DEPTH (1) +#define NBL_DPED_FWD_DPORT_WIDTH (32) +#define NBL_DPED_FWD_DPORT_DWLEN (1) +union dped_fwd_dport_u { + struct dped_fwd_dport { + u32 id:6; /* [5:0] Default:0x9 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_FWD_DPORT_DWLEN]; +} __packed; + +#define NBL_DPED_FWD_MIRID_ADDR (0x75c238) +#define NBL_DPED_FWD_MIRID_DEPTH (1) +#define NBL_DPED_FWD_MIRID_WIDTH (32) +#define NBL_DPED_FWD_MIRID_DWLEN (1) +union dped_fwd_mirid_u { + struct dped_fwd_mirid { + u32 id:6; /* [5:0] Default:0x8 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_FWD_MIRID_DWLEN]; +} __packed; + +#define NBL_DPED_FWD_VNI0_ADDR (0x75c244) +#define NBL_DPED_FWD_VNI0_DEPTH (1) +#define NBL_DPED_FWD_VNI0_WIDTH (32) +#define NBL_DPED_FWD_VNI0_DWLEN (1) +union dped_fwd_vni0_u { + struct dped_fwd_vni0 { + u32 id:6; /* [5:0] Default:0xe RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_FWD_VNI0_DWLEN]; +} __packed; + +#define NBL_DPED_FWD_VNI1_ADDR (0x75c248) +#define NBL_DPED_FWD_VNI1_DEPTH (1) +#define NBL_DPED_FWD_VNI1_WIDTH (32) +#define NBL_DPED_FWD_VNI1_DWLEN (1) +union dped_fwd_vni1_u { + struct dped_fwd_vni1 { + u32 id:6; /* [5:0] Default:0xf RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_FWD_VNI1_DWLEN]; +} __packed; + +#define NBL_DPED_FWD_PRI_MDF_ADDR (0x75c250) +#define NBL_DPED_FWD_PRI_MDF_DEPTH (1) +#define NBL_DPED_FWD_PRI_MDF_WIDTH (32) +#define NBL_DPED_FWD_PRI_MDF_DWLEN (1) +union dped_fwd_pri_mdf_u { + struct dped_fwd_pri_mdf { + u32 id:6; /* [5:0] Default:0x15 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_FWD_PRI_MDF_DWLEN]; +} __packed; + +#define NBL_DPED_VLAN_TYPE0_ADDR (0x75c260) +#define NBL_DPED_VLAN_TYPE0_DEPTH (1) +#define NBL_DPED_VLAN_TYPE0_WIDTH (32) +#define NBL_DPED_VLAN_TYPE0_DWLEN (1) +union dped_vlan_type0_u { + struct dped_vlan_type0 { + u32 vau:16; /* [15:0] Default:0x8100 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_VLAN_TYPE0_DWLEN]; +} __packed; + +#define NBL_DPED_VLAN_TYPE1_ADDR (0x75c264) +#define NBL_DPED_VLAN_TYPE1_DEPTH (1) +#define NBL_DPED_VLAN_TYPE1_WIDTH (32) +#define NBL_DPED_VLAN_TYPE1_DWLEN (1) +union dped_vlan_type1_u { + struct dped_vlan_type1 { + u32 vau:16; /* [15:0] Default:0x88A8 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_VLAN_TYPE1_DWLEN]; +} __packed; + +#define NBL_DPED_VLAN_TYPE2_ADDR (0x75c268) +#define NBL_DPED_VLAN_TYPE2_DEPTH (1) +#define NBL_DPED_VLAN_TYPE2_WIDTH (32) +#define NBL_DPED_VLAN_TYPE2_DWLEN (1) +union dped_vlan_type2_u { + struct dped_vlan_type2 { + u32 vau:16; /* [15:0] Default:0x9100 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_VLAN_TYPE2_DWLEN]; +} __packed; + +#define NBL_DPED_VLAN_TYPE3_ADDR (0x75c26c) +#define NBL_DPED_VLAN_TYPE3_DEPTH (1) +#define NBL_DPED_VLAN_TYPE3_WIDTH (32) +#define NBL_DPED_VLAN_TYPE3_DWLEN (1) +union dped_vlan_type3_u { + struct dped_vlan_type3 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_VLAN_TYPE3_DWLEN]; +} __packed; + +#define NBL_DPED_L3_LEN_MDY_CMD_0_ADDR (0x75c300) +#define NBL_DPED_L3_LEN_MDY_CMD_0_DEPTH (1) +#define NBL_DPED_L3_LEN_MDY_CMD_0_WIDTH (32) +#define NBL_DPED_L3_LEN_MDY_CMD_0_DWLEN (1) +union dped_l3_len_mdy_cmd_0_u { + struct dped_l3_len_mdy_cmd_0 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 in_oft:7; /* [14:8] Default:0x2 RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x2 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x2 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x0 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L3_LEN_MDY_CMD_0_DWLEN]; +} __packed; + +#define NBL_DPED_L3_LEN_MDY_CMD_1_ADDR (0x75c304) +#define NBL_DPED_L3_LEN_MDY_CMD_1_DEPTH (1) +#define NBL_DPED_L3_LEN_MDY_CMD_1_WIDTH (32) +#define NBL_DPED_L3_LEN_MDY_CMD_1_DWLEN (1) +union dped_l3_len_mdy_cmd_1_u { + struct dped_l3_len_mdy_cmd_1 { + u32 value:8; /* [7:0] Default:0x28 RW */ + u32 in_oft:7; /* [14:8] Default:0x4 RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x2 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x1 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x0 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L3_LEN_MDY_CMD_1_DWLEN]; +} __packed; + +#define NBL_DPED_L4_LEN_MDY_CMD_0_ADDR (0x75c308) +#define NBL_DPED_L4_LEN_MDY_CMD_0_DEPTH (1) +#define NBL_DPED_L4_LEN_MDY_CMD_0_WIDTH (32) +#define NBL_DPED_L4_LEN_MDY_CMD_0_DWLEN (1) +union dped_l4_len_mdy_cmd_0_u { + struct dped_l4_len_mdy_cmd_0 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 in_oft:7; /* [14:8] Default:0xc RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x3 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x0 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x1 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_L4_LEN_MDY_CMD_0_DWLEN]; +} __packed; + +#define NBL_DPED_L4_LEN_MDY_CMD_1_ADDR (0x75c30c) +#define NBL_DPED_L4_LEN_MDY_CMD_1_DEPTH (1) +#define NBL_DPED_L4_LEN_MDY_CMD_1_WIDTH (32) +#define NBL_DPED_L4_LEN_MDY_CMD_1_DWLEN (1) +union dped_l4_len_mdy_cmd_1_u { + struct dped_l4_len_mdy_cmd_1 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 in_oft:7; /* [14:8] Default:0x4 RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x3 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x0 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x1 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_LEN_MDY_CMD_1_DWLEN]; +} __packed; + +#define NBL_DPED_L3_CK_CMD_00_ADDR (0x75c310) +#define NBL_DPED_L3_CK_CMD_00_DEPTH (1) +#define NBL_DPED_L3_CK_CMD_00_WIDTH (32) +#define NBL_DPED_L3_CK_CMD_00_DWLEN (1) +union dped_l3_ck_cmd_00_u { + struct dped_l3_ck_cmd_00 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0xa RW */ + u32 phid:2; /* [27:26] Default:0x2 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L3_CK_CMD_00_DWLEN]; +} __packed; + +#define NBL_DPED_L3_CK_CMD_01_ADDR (0x75c314) +#define NBL_DPED_L3_CK_CMD_01_DEPTH (1) +#define NBL_DPED_L3_CK_CMD_01_WIDTH (32) +#define NBL_DPED_L3_CK_CMD_01_DWLEN (1) +union dped_l3_ck_cmd_01_u { + struct dped_l3_ck_cmd_01 { + u32 ck_start0:6; /* [5:0] Default:0x0 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x0 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x0 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_L3_CK_CMD_01_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_00_ADDR (0x75c318) +#define NBL_DPED_L4_CK_CMD_00_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_00_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_00_DWLEN (1) +union dped_l4_ck_cmd_00_u { + struct dped_l4_ck_cmd_00 { + u32 value:8; /* [7:0] Default:0x6 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x2 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x10 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_00_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_01_ADDR (0x75c31c) +#define NBL_DPED_L4_CK_CMD_01_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_01_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_01_DWLEN (1) +union dped_l4_ck_cmd_01_u { + struct dped_l4_ck_cmd_01 { + u32 ck_start0:6; /* [5:0] Default:0xc RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x8 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_01_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_10_ADDR (0x75c320) +#define NBL_DPED_L4_CK_CMD_10_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_10_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_10_DWLEN (1) +union dped_l4_ck_cmd_10_u { + struct dped_l4_ck_cmd_10 { + u32 value:8; /* [7:0] Default:0x11 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x2 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x6 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x1 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_10_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_11_ADDR (0x75c324) +#define NBL_DPED_L4_CK_CMD_11_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_11_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_11_DWLEN (1) +union dped_l4_ck_cmd_11_u { + struct dped_l4_ck_cmd_11 { + u32 ck_start0:6; /* [5:0] Default:0xc RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x8 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_11_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_20_ADDR (0x75c328) +#define NBL_DPED_L4_CK_CMD_20_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_20_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_20_DWLEN (1) +union dped_l4_ck_cmd_20_u { + struct dped_l4_ck_cmd_20 { + u32 value:8; /* [7:0] Default:0x2e RW */ + u32 len_in_oft:7; /* [14:8] Default:0x4 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x10 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_20_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_21_ADDR (0x75c32c) +#define NBL_DPED_L4_CK_CMD_21_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_21_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_21_DWLEN (1) +union dped_l4_ck_cmd_21_u { + struct dped_l4_ck_cmd_21 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_21_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_30_ADDR (0x75c330) +#define NBL_DPED_L4_CK_CMD_30_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_30_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_30_DWLEN (1) +union dped_l4_ck_cmd_30_u { + struct dped_l4_ck_cmd_30 { + u32 value:8; /* [7:0] Default:0x39 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x4 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x6 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x1 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_30_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_31_ADDR (0x75c334) +#define NBL_DPED_L4_CK_CMD_31_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_31_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_31_DWLEN (1) +union dped_l4_ck_cmd_31_u { + struct dped_l4_ck_cmd_31 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_31_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_40_ADDR (0x75c338) +#define NBL_DPED_L4_CK_CMD_40_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_40_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_40_DWLEN (1) +union dped_l4_ck_cmd_40_u { + struct dped_l4_ck_cmd_40 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0x8 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x1 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_40_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_41_ADDR (0x75c33c) +#define NBL_DPED_L4_CK_CMD_41_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_41_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_41_DWLEN (1) +union dped_l4_ck_cmd_41_u { + struct dped_l4_ck_cmd_41 { + u32 ck_start0:6; /* [5:0] Default:0x0 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x0 RW */ + u32 ck_len0:7; /* [14:8] Default:0x0 RW */ + u32 ck_vld0:1; /* [15] Default:0x0 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x0 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_41_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_50_ADDR (0x75c340) +#define NBL_DPED_L4_CK_CMD_50_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_50_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_50_DWLEN (1) +union dped_l4_ck_cmd_50_u { + struct dped_l4_ck_cmd_50 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x2 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x2 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_50_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_51_ADDR (0x75c344) +#define NBL_DPED_L4_CK_CMD_51_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_51_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_51_DWLEN (1) +union dped_l4_ck_cmd_51_u { + struct dped_l4_ck_cmd_51 { + u32 ck_start0:6; /* [5:0] Default:0xc RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x8 RW */ + u32 ck_vld0:1; /* [15] Default:0x0 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_51_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_60_ADDR (0x75c348) +#define NBL_DPED_L4_CK_CMD_60_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_60_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_60_DWLEN (1) +union dped_l4_ck_cmd_60_u { + struct dped_l4_ck_cmd_60 { + u32 value:8; /* [7:0] Default:0x62 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x4 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x2 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_60_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_61_ADDR (0x75c34c) +#define NBL_DPED_L4_CK_CMD_61_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_61_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_61_DWLEN (1) +union dped_l4_ck_cmd_61_u { + struct dped_l4_ck_cmd_61 { + u32 ck_start0:6; /* [5:0] Default:0x0 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x0 RW */ + u32 ck_len0:7; /* [14:8] Default:0x0 RW */ + u32 ck_vld0:1; /* [15] Default:0x0 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x0 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_61_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L3_CK_CMD_00_ADDR (0x75c350) +#define NBL_DPED_TNL_L3_CK_CMD_00_DEPTH (1) +#define NBL_DPED_TNL_L3_CK_CMD_00_WIDTH (32) +#define NBL_DPED_TNL_L3_CK_CMD_00_DWLEN (1) +union dped_tnl_l3_ck_cmd_00_u { + struct dped_tnl_l3_ck_cmd_00 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0xa RW */ + u32 phid:2; /* [27:26] Default:0x2 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L3_CK_CMD_00_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L3_CK_CMD_01_ADDR (0x75c354) +#define NBL_DPED_TNL_L3_CK_CMD_01_DEPTH (1) +#define NBL_DPED_TNL_L3_CK_CMD_01_WIDTH (32) +#define NBL_DPED_TNL_L3_CK_CMD_01_DWLEN (1) +union dped_tnl_l3_ck_cmd_01_u { + struct dped_tnl_l3_ck_cmd_01 { + u32 ck_start0:6; /* [5:0] Default:0x0 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x0 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x0 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L3_CK_CMD_01_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_00_ADDR (0x75c360) +#define NBL_DPED_TNL_L4_CK_CMD_00_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_00_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_00_DWLEN (1) +union dped_tnl_l4_ck_cmd_00_u { + struct dped_tnl_l4_ck_cmd_00 { + u32 value:8; /* [7:0] Default:0x11 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x2 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x6 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x1 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_00_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_01_ADDR (0x75c364) +#define NBL_DPED_TNL_L4_CK_CMD_01_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_01_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_01_DWLEN (1) +union dped_tnl_l4_ck_cmd_01_u { + struct dped_tnl_l4_ck_cmd_01 { + u32 ck_start0:6; /* [5:0] Default:0xc RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x8 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_01_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_10_ADDR (0x75c368) +#define NBL_DPED_TNL_L4_CK_CMD_10_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_10_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_10_DWLEN (1) +union dped_tnl_l4_ck_cmd_10_u { + struct dped_tnl_l4_ck_cmd_10 { + u32 value:8; /* [7:0] Default:0x39 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x4 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x6 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x1 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_10_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_11_ADDR (0x75c36c) +#define NBL_DPED_TNL_L4_CK_CMD_11_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_11_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_11_DWLEN (1) +union dped_tnl_l4_ck_cmd_11_u { + struct dped_tnl_l4_ck_cmd_11 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_11_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_20_ADDR (0x75c370) +#define NBL_DPED_TNL_L4_CK_CMD_20_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_20_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_20_DWLEN (1) +union dped_tnl_l4_ck_cmd_20_u { + struct dped_tnl_l4_ck_cmd_20 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0x0 RW */ + u32 phid:2; /* [27:26] Default:0x0 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_20_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_21_ADDR (0x75c374) +#define NBL_DPED_TNL_L4_CK_CMD_21_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_21_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_21_DWLEN (1) +union dped_tnl_l4_ck_cmd_21_u { + struct dped_tnl_l4_ck_cmd_21 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x14 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_21_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_30_ADDR (0x75c378) +#define NBL_DPED_TNL_L4_CK_CMD_30_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_30_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_30_DWLEN (1) +union dped_tnl_l4_ck_cmd_30_u { + struct dped_tnl_l4_ck_cmd_30 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0x0 RW */ + u32 phid:2; /* [27:26] Default:0x0 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_30_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_31_ADDR (0x75c37c) +#define NBL_DPED_TNL_L4_CK_CMD_31_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_31_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_31_DWLEN (1) +union dped_tnl_l4_ck_cmd_31_u { + struct dped_tnl_l4_ck_cmd_31 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x8 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_31_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_40_ADDR (0x75c380) +#define NBL_DPED_TNL_L4_CK_CMD_40_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_40_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_40_DWLEN (1) +union dped_tnl_l4_ck_cmd_40_u { + struct dped_tnl_l4_ck_cmd_40 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0x0 RW */ + u32 phid:2; /* [27:26] Default:0x0 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_40_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_41_ADDR (0x75c384) +#define NBL_DPED_TNL_L4_CK_CMD_41_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_41_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_41_DWLEN (1) +union dped_tnl_l4_ck_cmd_41_u { + struct dped_tnl_l4_ck_cmd_41 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x8 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_41_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_50_ADDR (0x75c388) +#define NBL_DPED_TNL_L4_CK_CMD_50_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_50_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_50_DWLEN (1) +union dped_tnl_l4_ck_cmd_50_u { + struct dped_tnl_l4_ck_cmd_50 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0x0 RW */ + u32 phid:2; /* [27:26] Default:0x0 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_50_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_51_ADDR (0x75c38c) +#define NBL_DPED_TNL_L4_CK_CMD_51_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_51_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_51_DWLEN (1) +union dped_tnl_l4_ck_cmd_51_u { + struct dped_tnl_l4_ck_cmd_51 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x8 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_51_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_60_ADDR (0x75c390) +#define NBL_DPED_TNL_L4_CK_CMD_60_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_60_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_60_DWLEN (1) +union dped_tnl_l4_ck_cmd_60_u { + struct dped_tnl_l4_ck_cmd_60 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0x0 RW */ + u32 phid:2; /* [27:26] Default:0x0 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_60_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_61_ADDR (0x75c394) +#define NBL_DPED_TNL_L4_CK_CMD_61_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_61_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_61_DWLEN (1) +union dped_tnl_l4_ck_cmd_61_u { + struct dped_tnl_l4_ck_cmd_61 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x8 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_61_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_00_ADDR (0x75c3a0) +#define NBL_DPED_MIR_CMD_00_DEPTH (1) +#define NBL_DPED_MIR_CMD_00_WIDTH (32) +#define NBL_DPED_MIR_CMD_00_DWLEN (1) +union dped_mir_cmd_00_u { + struct dped_mir_cmd_00 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_00_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_01_ADDR (0x75c3a4) +#define NBL_DPED_MIR_CMD_01_DEPTH (1) +#define NBL_DPED_MIR_CMD_01_WIDTH (32) +#define NBL_DPED_MIR_CMD_01_DWLEN (1) +union dped_mir_cmd_01_u { + struct dped_mir_cmd_01 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_01_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_10_ADDR (0x75c3a8) +#define NBL_DPED_MIR_CMD_10_DEPTH (1) +#define NBL_DPED_MIR_CMD_10_WIDTH (32) +#define NBL_DPED_MIR_CMD_10_DWLEN (1) +union dped_mir_cmd_10_u { + struct dped_mir_cmd_10 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_10_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_11_ADDR (0x75c3ac) +#define NBL_DPED_MIR_CMD_11_DEPTH (1) +#define NBL_DPED_MIR_CMD_11_WIDTH (32) +#define NBL_DPED_MIR_CMD_11_DWLEN (1) +union dped_mir_cmd_11_u { + struct dped_mir_cmd_11 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_11_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_20_ADDR (0x75c3b0) +#define NBL_DPED_MIR_CMD_20_DEPTH (1) +#define NBL_DPED_MIR_CMD_20_WIDTH (32) +#define NBL_DPED_MIR_CMD_20_DWLEN (1) +union dped_mir_cmd_20_u { + struct dped_mir_cmd_20 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_20_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_21_ADDR (0x75c3b4) +#define NBL_DPED_MIR_CMD_21_DEPTH (1) +#define NBL_DPED_MIR_CMD_21_WIDTH (32) +#define NBL_DPED_MIR_CMD_21_DWLEN (1) +union dped_mir_cmd_21_u { + struct dped_mir_cmd_21 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_21_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_30_ADDR (0x75c3b8) +#define NBL_DPED_MIR_CMD_30_DEPTH (1) +#define NBL_DPED_MIR_CMD_30_WIDTH (32) +#define NBL_DPED_MIR_CMD_30_DWLEN (1) +union dped_mir_cmd_30_u { + struct dped_mir_cmd_30 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_30_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_31_ADDR (0x75c3bc) +#define NBL_DPED_MIR_CMD_31_DEPTH (1) +#define NBL_DPED_MIR_CMD_31_WIDTH (32) +#define NBL_DPED_MIR_CMD_31_DWLEN (1) +union dped_mir_cmd_31_u { + struct dped_mir_cmd_31 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_31_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_40_ADDR (0x75c3c0) +#define NBL_DPED_MIR_CMD_40_DEPTH (1) +#define NBL_DPED_MIR_CMD_40_WIDTH (32) +#define NBL_DPED_MIR_CMD_40_DWLEN (1) +union dped_mir_cmd_40_u { + struct dped_mir_cmd_40 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_40_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_41_ADDR (0x75c3c4) +#define NBL_DPED_MIR_CMD_41_DEPTH (1) +#define NBL_DPED_MIR_CMD_41_WIDTH (32) +#define NBL_DPED_MIR_CMD_41_DWLEN (1) +union dped_mir_cmd_41_u { + struct dped_mir_cmd_41 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_41_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_50_ADDR (0x75c3c8) +#define NBL_DPED_MIR_CMD_50_DEPTH (1) +#define NBL_DPED_MIR_CMD_50_WIDTH (32) +#define NBL_DPED_MIR_CMD_50_DWLEN (1) +union dped_mir_cmd_50_u { + struct dped_mir_cmd_50 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_50_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_51_ADDR (0x75c3cc) +#define NBL_DPED_MIR_CMD_51_DEPTH (1) +#define NBL_DPED_MIR_CMD_51_WIDTH (32) +#define NBL_DPED_MIR_CMD_51_DWLEN (1) +union dped_mir_cmd_51_u { + struct dped_mir_cmd_51 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_51_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_60_ADDR (0x75c3d0) +#define NBL_DPED_MIR_CMD_60_DEPTH (1) +#define NBL_DPED_MIR_CMD_60_WIDTH (32) +#define NBL_DPED_MIR_CMD_60_DWLEN (1) +union dped_mir_cmd_60_u { + struct dped_mir_cmd_60 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_60_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_61_ADDR (0x75c3d4) +#define NBL_DPED_MIR_CMD_61_DEPTH (1) +#define NBL_DPED_MIR_CMD_61_WIDTH (32) +#define NBL_DPED_MIR_CMD_61_DWLEN (1) +union dped_mir_cmd_61_u { + struct dped_mir_cmd_61 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_61_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_70_ADDR (0x75c3d8) +#define NBL_DPED_MIR_CMD_70_DEPTH (1) +#define NBL_DPED_MIR_CMD_70_WIDTH (32) +#define NBL_DPED_MIR_CMD_70_DWLEN (1) +union dped_mir_cmd_70_u { + struct dped_mir_cmd_70 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_70_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_71_ADDR (0x75c3dc) +#define NBL_DPED_MIR_CMD_71_DEPTH (1) +#define NBL_DPED_MIR_CMD_71_WIDTH (32) +#define NBL_DPED_MIR_CMD_71_DWLEN (1) +union dped_mir_cmd_71_u { + struct dped_mir_cmd_71 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_71_DWLEN]; +} __packed; + +#define NBL_DPED_DSCP_CK_EN_ADDR (0x75c3e8) +#define NBL_DPED_DSCP_CK_EN_DEPTH (1) +#define NBL_DPED_DSCP_CK_EN_WIDTH (32) +#define NBL_DPED_DSCP_CK_EN_DWLEN (1) +union dped_dscp_ck_en_u { + struct dped_dscp_ck_en { + u32 l4_en:1; /* [0] Default:0x0 RW */ + u32 l3_en:1; /* [1] Default:0x1 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_DSCP_CK_EN_DWLEN]; +} __packed; + +#define NBL_DPED_RDMA_ECN_REMARK_ADDR (0x75c3f0) +#define NBL_DPED_RDMA_ECN_REMARK_DEPTH (1) +#define NBL_DPED_RDMA_ECN_REMARK_WIDTH (32) +#define NBL_DPED_RDMA_ECN_REMARK_DWLEN (1) +union dped_rdma_ecn_remark_u { + struct dped_rdma_ecn_remark { + u32 vau:2; /* [1:0] Default:0x1 RW */ + u32 rsv1:2; /* [3:2] Default:0x0 RO */ + u32 en:1; /* [4] Default:0x0 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_RDMA_ECN_REMARK_DWLEN]; +} __packed; + +#define NBL_DPED_VLAN_OFFSET_ADDR (0x75c3f4) +#define NBL_DPED_VLAN_OFFSET_DEPTH (1) +#define NBL_DPED_VLAN_OFFSET_WIDTH (32) +#define NBL_DPED_VLAN_OFFSET_DWLEN (1) +union dped_vlan_offset_u { + struct dped_vlan_offset { + u32 oft:8; /* [7:0] Default:0xC RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_VLAN_OFFSET_DWLEN]; +} __packed; + +#define NBL_DPED_DSCP_OFFSET_0_ADDR (0x75c3f8) +#define NBL_DPED_DSCP_OFFSET_0_DEPTH (1) +#define NBL_DPED_DSCP_OFFSET_0_WIDTH (32) +#define NBL_DPED_DSCP_OFFSET_0_DWLEN (1) +union dped_dscp_offset_0_u { + struct dped_dscp_offset_0 { + u32 oft:8; /* [7:0] Default:0x8 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_DSCP_OFFSET_0_DWLEN]; +} __packed; + +#define NBL_DPED_DSCP_OFFSET_1_ADDR (0x75c3fc) +#define NBL_DPED_DSCP_OFFSET_1_DEPTH (1) +#define NBL_DPED_DSCP_OFFSET_1_WIDTH (32) +#define NBL_DPED_DSCP_OFFSET_1_DWLEN (1) +union dped_dscp_offset_1_u { + struct dped_dscp_offset_1 { + u32 oft:8; /* [7:0] Default:0x4 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_DSCP_OFFSET_1_DWLEN]; +} __packed; + +#define NBL_DPED_CFG_TEST_ADDR (0x75c600) +#define NBL_DPED_CFG_TEST_DEPTH (1) +#define NBL_DPED_CFG_TEST_WIDTH (32) +#define NBL_DPED_CFG_TEST_DWLEN (1) +union dped_cfg_test_u { + struct dped_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_DPED_BP_STATE_ADDR (0x75c608) +#define NBL_DPED_BP_STATE_DEPTH (1) +#define NBL_DPED_BP_STATE_WIDTH (32) +#define NBL_DPED_BP_STATE_DWLEN (1) +union dped_bp_state_u { + struct dped_bp_state { + u32 bm_rtn_tout:1; /* [0] Default:0x0 RO */ + u32 bm_not_rdy:1; /* [1] Default:0x0 RO */ + u32 dprbac_fc:1; /* [2] Default:0x0 RO */ + u32 qm_fc:1; /* [3] Default:0x0 RO */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_BP_STATE_DWLEN]; +} __packed; + +#define NBL_DPED_BP_HISTORY_ADDR (0x75c60c) +#define NBL_DPED_BP_HISTORY_DEPTH (1) +#define NBL_DPED_BP_HISTORY_WIDTH (32) +#define NBL_DPED_BP_HISTORY_DWLEN (1) +union dped_bp_history_u { + struct dped_bp_history { + u32 bm_rtn_tout:1; /* [0] Default:0x0 RC */ + u32 bm_not_rdy:1; /* [1] Default:0x0 RC */ + u32 dprbac_fc:1; /* [2] Default:0x0 RC */ + u32 qm_fc:1; /* [3] Default:0x0 RC */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_DPED_MIRID_IND_ADDR (0x75c900) +#define NBL_DPED_MIRID_IND_DEPTH (1) +#define NBL_DPED_MIRID_IND_WIDTH (32) +#define NBL_DPED_MIRID_IND_DWLEN (1) +union dped_mirid_ind_u { + struct dped_mirid_ind { + u32 nomat:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIRID_IND_DWLEN]; +} __packed; + +#define NBL_DPED_MD_AUX_OFT_ADDR (0x75c904) +#define NBL_DPED_MD_AUX_OFT_DEPTH (1) +#define NBL_DPED_MD_AUX_OFT_WIDTH (32) +#define NBL_DPED_MD_AUX_OFT_DWLEN (1) +union dped_md_aux_oft_u { + struct dped_md_aux_oft { + u32 l2_oft:8; /* [7:0] Default:0x0 RO */ + u32 l3_oft:8; /* [15:8] Default:0x0 RO */ + u32 l4_oft:8; /* [23:16] Default:0x0 RO */ + u32 pld_oft:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_AUX_OFT_DWLEN]; +} __packed; + +#define NBL_DPED_MD_AUX_PKT_LEN_ADDR (0x75c908) +#define NBL_DPED_MD_AUX_PKT_LEN_DEPTH (1) +#define NBL_DPED_MD_AUX_PKT_LEN_WIDTH (32) +#define NBL_DPED_MD_AUX_PKT_LEN_DWLEN (1) +union dped_md_aux_pkt_len_u { + struct dped_md_aux_pkt_len { + u32 len:14; /* [13:0] Default:0x0 RO */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_AUX_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_DPED_MD_FWD_MIR_ADDR (0x75c90c) +#define NBL_DPED_MD_FWD_MIR_DEPTH (1) +#define NBL_DPED_MD_FWD_MIR_WIDTH (32) +#define NBL_DPED_MD_FWD_MIR_DWLEN (1) +union dped_md_fwd_mir_u { + struct dped_md_fwd_mir { + u32 id:4; /* [3:0] Default:0x0 RO */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_FWD_MIR_DWLEN]; +} __packed; + +#define NBL_DPED_MD_FWD_DPORT_ADDR (0x75c910) +#define NBL_DPED_MD_FWD_DPORT_DEPTH (1) +#define NBL_DPED_MD_FWD_DPORT_WIDTH (32) +#define NBL_DPED_MD_FWD_DPORT_DWLEN (1) +union dped_md_fwd_dport_u { + struct dped_md_fwd_dport { + u32 id:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_FWD_DPORT_DWLEN]; +} __packed; + +#define NBL_DPED_MD_AUX_PLD_CKSUM_ADDR (0x75c914) +#define NBL_DPED_MD_AUX_PLD_CKSUM_DEPTH (1) +#define NBL_DPED_MD_AUX_PLD_CKSUM_WIDTH (32) +#define NBL_DPED_MD_AUX_PLD_CKSUM_DWLEN (1) +union dped_md_aux_pld_cksum_u { + struct dped_md_aux_pld_cksum { + u32 ck:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_AUX_PLD_CKSUM_DWLEN]; +} __packed; + +#define NBL_DPED_INNER_PKT_CKSUM_ADDR (0x75c918) +#define NBL_DPED_INNER_PKT_CKSUM_DEPTH (1) +#define NBL_DPED_INNER_PKT_CKSUM_WIDTH (32) +#define NBL_DPED_INNER_PKT_CKSUM_DWLEN (1) +union dped_inner_pkt_cksum_u { + struct dped_inner_pkt_cksum { + u32 ck:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_INNER_PKT_CKSUM_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_0_ADDR (0x75c920) +#define NBL_DPED_MD_EDIT_0_DEPTH (1) +#define NBL_DPED_MD_EDIT_0_WIDTH (32) +#define NBL_DPED_MD_EDIT_0_DWLEN (1) +union dped_md_edit_0_u { + struct dped_md_edit_0 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_0_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_1_ADDR (0x75c924) +#define NBL_DPED_MD_EDIT_1_DEPTH (1) +#define NBL_DPED_MD_EDIT_1_WIDTH (32) +#define NBL_DPED_MD_EDIT_1_DWLEN (1) +union dped_md_edit_1_u { + struct dped_md_edit_1 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_1_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_2_ADDR (0x75c928) +#define NBL_DPED_MD_EDIT_2_DEPTH (1) +#define NBL_DPED_MD_EDIT_2_WIDTH (32) +#define NBL_DPED_MD_EDIT_2_DWLEN (1) +union dped_md_edit_2_u { + struct dped_md_edit_2 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_2_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_3_ADDR (0x75c92c) +#define NBL_DPED_MD_EDIT_3_DEPTH (1) +#define NBL_DPED_MD_EDIT_3_WIDTH (32) +#define NBL_DPED_MD_EDIT_3_DWLEN (1) +union dped_md_edit_3_u { + struct dped_md_edit_3 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_3_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_4_ADDR (0x75c930) +#define NBL_DPED_MD_EDIT_4_DEPTH (1) +#define NBL_DPED_MD_EDIT_4_WIDTH (32) +#define NBL_DPED_MD_EDIT_4_DWLEN (1) +union dped_md_edit_4_u { + struct dped_md_edit_4 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_4_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_5_ADDR (0x75c934) +#define NBL_DPED_MD_EDIT_5_DEPTH (1) +#define NBL_DPED_MD_EDIT_5_WIDTH (32) +#define NBL_DPED_MD_EDIT_5_DWLEN (1) +union dped_md_edit_5_u { + struct dped_md_edit_5 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_5_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_6_ADDR (0x75c938) +#define NBL_DPED_MD_EDIT_6_DEPTH (1) +#define NBL_DPED_MD_EDIT_6_WIDTH (32) +#define NBL_DPED_MD_EDIT_6_DWLEN (1) +union dped_md_edit_6_u { + struct dped_md_edit_6 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_6_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_7_ADDR (0x75c93c) +#define NBL_DPED_MD_EDIT_7_DEPTH (1) +#define NBL_DPED_MD_EDIT_7_WIDTH (32) +#define NBL_DPED_MD_EDIT_7_DWLEN (1) +union dped_md_edit_7_u { + struct dped_md_edit_7 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_7_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_8_ADDR (0x75c940) +#define NBL_DPED_MD_EDIT_8_DEPTH (1) +#define NBL_DPED_MD_EDIT_8_WIDTH (32) +#define NBL_DPED_MD_EDIT_8_DWLEN (1) +union dped_md_edit_8_u { + struct dped_md_edit_8 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_8_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_9_ADDR (0x75c944) +#define NBL_DPED_MD_EDIT_9_DEPTH (1) +#define NBL_DPED_MD_EDIT_9_WIDTH (32) +#define NBL_DPED_MD_EDIT_9_DWLEN (1) +union dped_md_edit_9_u { + struct dped_md_edit_9 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_9_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_10_ADDR (0x75c948) +#define NBL_DPED_MD_EDIT_10_DEPTH (1) +#define NBL_DPED_MD_EDIT_10_WIDTH (32) +#define NBL_DPED_MD_EDIT_10_DWLEN (1) +union dped_md_edit_10_u { + struct dped_md_edit_10 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_10_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_11_ADDR (0x75c94c) +#define NBL_DPED_MD_EDIT_11_DEPTH (1) +#define NBL_DPED_MD_EDIT_11_WIDTH (32) +#define NBL_DPED_MD_EDIT_11_DWLEN (1) +union dped_md_edit_11_u { + struct dped_md_edit_11 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_11_DWLEN]; +} __packed; + +#define NBL_DPED_ADD_DEL_LEN_ADDR (0x75c950) +#define NBL_DPED_ADD_DEL_LEN_DEPTH (1) +#define NBL_DPED_ADD_DEL_LEN_WIDTH (32) +#define NBL_DPED_ADD_DEL_LEN_DWLEN (1) +union dped_add_del_len_u { + struct dped_add_del_len { + u32 len:9; /* [8:0] Default:0x0 RO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_ADD_DEL_LEN_DWLEN]; +} __packed; + +#define NBL_DPED_TTL_INFO_ADDR (0x75c970) +#define NBL_DPED_TTL_INFO_DEPTH (1) +#define NBL_DPED_TTL_INFO_WIDTH (32) +#define NBL_DPED_TTL_INFO_DWLEN (1) +union dped_ttl_info_u { + struct dped_ttl_info { + u32 old_ttl:8; /* [7:0] Default:0x0 RO */ + u32 new_ttl:8; /* [15:8] Default:0x0 RO */ + u32 ttl_val:1; /* [16] Default:0x0 RC */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TTL_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_LEN_INFO_VLD_ADDR (0x75c974) +#define NBL_DPED_LEN_INFO_VLD_DEPTH (1) +#define NBL_DPED_LEN_INFO_VLD_WIDTH (32) +#define NBL_DPED_LEN_INFO_VLD_DWLEN (1) +union dped_len_info_vld_u { + struct dped_len_info_vld { + u32 length0:1; /* [0] Default:0x0 RC */ + u32 length1:1; /* [1] Default:0x0 RC */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_LEN_INFO_VLD_DWLEN]; +} __packed; + +#define NBL_DPED_LEN0_INFO_ADDR (0x75c978) +#define NBL_DPED_LEN0_INFO_DEPTH (1) +#define NBL_DPED_LEN0_INFO_WIDTH (32) +#define NBL_DPED_LEN0_INFO_DWLEN (1) +union dped_len0_info_u { + struct dped_len0_info { + u32 old_len:16; /* [15:0] Default:0x0 RO */ + u32 new_len:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_LEN0_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_LEN1_INFO_ADDR (0x75c97c) +#define NBL_DPED_LEN1_INFO_DEPTH (1) +#define NBL_DPED_LEN1_INFO_WIDTH (32) +#define NBL_DPED_LEN1_INFO_DWLEN (1) +union dped_len1_info_u { + struct dped_len1_info { + u32 old_len:16; /* [15:0] Default:0x0 RO */ + u32 new_len:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_LEN1_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_EDIT_ATNUM_INFO_ADDR (0x75c980) +#define NBL_DPED_EDIT_ATNUM_INFO_DEPTH (1) +#define NBL_DPED_EDIT_ATNUM_INFO_WIDTH (32) +#define NBL_DPED_EDIT_ATNUM_INFO_DWLEN (1) +union dped_edit_atnum_info_u { + struct dped_edit_atnum_info { + u32 replace:4; /* [3:0] Default:0x0 RO */ + u32 del:4; /* [7:4] Default:0x0 RO */ + u32 add:4; /* [11:8] Default:0x0 RO */ + u32 ttl:4; /* [15:12] Default:0x0 RO */ + u32 dscp:4; /* [19:16] Default:0x0 RO */ + u32 tnl:4; /* [23:20] Default:0x0 RO */ + u32 sport:4; /* [27:24] Default:0x0 RO */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_EDIT_ATNUM_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_EDIT_NO_AT_INFO_ADDR (0x75c984) +#define NBL_DPED_EDIT_NO_AT_INFO_DEPTH (1) +#define NBL_DPED_EDIT_NO_AT_INFO_WIDTH (32) +#define NBL_DPED_EDIT_NO_AT_INFO_DWLEN (1) +union dped_edit_no_at_info_u { + struct dped_edit_no_at_info { + u32 l3_len:1; /* [0] Default:0x0 RC */ + u32 l4_len:1; /* [1] Default:0x0 RC */ + u32 l3_ck:1; /* [2] Default:0x0 RC */ + u32 l4_ck:1; /* [3] Default:0x0 RC */ + u32 sctp_ck:1; /* [4] Default:0x0 RC */ + u32 padding:1; /* [5] Default:0x0 RC */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_EDIT_NO_AT_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_HW_EDT_PROF_ADDR (0x75d000) +#define NBL_DPED_HW_EDT_PROF_DEPTH (32) +#define NBL_DPED_HW_EDT_PROF_WIDTH (32) +#define NBL_DPED_HW_EDT_PROF_DWLEN (1) +union dped_hw_edt_prof_u { + struct dped_hw_edt_prof { + u32 l4_len:2; /* [1:0] Default:0x2 RW */ + u32 l3_len:2; /* [3:2] Default:0x2 RW */ + u32 l4_ck:3; /* [6:4] Default:0x7 RW */ + u32 l3_ck:1; /* [7:7] Default:0x0 RW */ + u32 l4_ck_zero_free:1; /* [8:8] Default:0x1 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HW_EDT_PROF_DWLEN]; +} __packed; +#define NBL_DPED_HW_EDT_PROF_REG(r) (NBL_DPED_HW_EDT_PROF_ADDR + \ + (NBL_DPED_HW_EDT_PROF_DWLEN * 4) * (r)) + +#define NBL_DPED_OUT_MASK_ADDR (0x75e000) +#define NBL_DPED_OUT_MASK_DEPTH (24) +#define NBL_DPED_OUT_MASK_WIDTH (64) +#define NBL_DPED_OUT_MASK_DWLEN (2) +union dped_out_mask_u { + struct dped_out_mask { + u32 flag:32; /* [31:0] Default:0x0 RW */ + u32 fwd:30; /* [61:32] Default:0x0 RW */ + u32 rsv:2; /* [63:62] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_OUT_MASK_DWLEN]; +} __packed; +#define NBL_DPED_OUT_MASK_REG(r) (NBL_DPED_OUT_MASK_ADDR + \ + (NBL_DPED_OUT_MASK_DWLEN * 4) * (r)) + +#define NBL_DPED_TAB_EDIT_CMD_ADDR (0x75f000) +#define NBL_DPED_TAB_EDIT_CMD_DEPTH (32) +#define NBL_DPED_TAB_EDIT_CMD_WIDTH (32) +#define NBL_DPED_TAB_EDIT_CMD_DWLEN (1) +union dped_tab_edit_cmd_u { + struct dped_tab_edit_cmd { + u32 in_offset:8; /* [7:0] Default:0x0 RW */ + u32 phid:2; /* [9:8] Default:0x0 RW */ + u32 len:7; /* [16:10] Default:0x0 RW */ + u32 mode:4; /* [20:17] Default:0xf RW */ + u32 l4_ck_ofld_upt:1; /* [21] Default:0x1 RW */ + u32 l3_ck_ofld_upt:1; /* [22] Default:0x1 RW */ + u32 rsv:9; /* [31:23] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TAB_EDIT_CMD_DWLEN]; +} __packed; +#define NBL_DPED_TAB_EDIT_CMD_REG(r) (NBL_DPED_TAB_EDIT_CMD_ADDR + \ + (NBL_DPED_TAB_EDIT_CMD_DWLEN * 4) * (r)) + +#define NBL_DPED_TAB_MIR_ADDR (0x760000) +#define NBL_DPED_TAB_MIR_DEPTH (8) +#define NBL_DPED_TAB_MIR_WIDTH (1024) +#define NBL_DPED_TAB_MIR_DWLEN (32) +union dped_tab_mir_u { + struct dped_tab_mir { + u32 cfg_mir_data:16; /* [719:0] Default:0x0 RW */ + u32 cfg_mir_data_arr[22]; /* [719:0] Default:0x0 RW */ + u32 cfg_mir_info_l:32; /* [755:720] Default:0x0 RW */ + u32 cfg_mir_info_h:4; /* [755:720] Default:0x0 RW */ + u32 rsv:12; /* [1023:756] Default:0x0 RO */ + u32 rsv_arr[8]; /* [1023:756] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TAB_MIR_DWLEN]; +} __packed; +#define NBL_DPED_TAB_MIR_REG(r) (NBL_DPED_TAB_MIR_ADDR + \ + (NBL_DPED_TAB_MIR_DWLEN * 4) * (r)) + +#define NBL_DPED_TAB_VSI_TYPE_ADDR (0x761000) +#define NBL_DPED_TAB_VSI_TYPE_DEPTH (1031) +#define NBL_DPED_TAB_VSI_TYPE_WIDTH (32) +#define NBL_DPED_TAB_VSI_TYPE_DWLEN (1) +union dped_tab_vsi_type_u { + struct dped_tab_vsi_type { + u32 sel:4; /* [3:0] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TAB_VSI_TYPE_DWLEN]; +} __packed; +#define NBL_DPED_TAB_VSI_TYPE_REG(r) (NBL_DPED_TAB_VSI_TYPE_ADDR + \ + (NBL_DPED_TAB_VSI_TYPE_DWLEN * 4) * (r)) + +#define NBL_DPED_TAB_REPLACE_ADDR (0x763000) +#define NBL_DPED_TAB_REPLACE_DEPTH (2048) +#define NBL_DPED_TAB_REPLACE_WIDTH (64) +#define NBL_DPED_TAB_REPLACE_DWLEN (2) +union dped_tab_replace_u { + struct dped_tab_replace { + u32 vau_arr[2]; /* [63:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_TAB_REPLACE_DWLEN]; +} __packed; +#define NBL_DPED_TAB_REPLACE_REG(r) (NBL_DPED_TAB_REPLACE_ADDR + \ + (NBL_DPED_TAB_REPLACE_DWLEN * 4) * (r)) + +#define NBL_DPED_TAB_TNL_ADDR (0x7dc000) +#define NBL_DPED_TAB_TNL_DEPTH (4096) +#define NBL_DPED_TAB_TNL_WIDTH (1024) +#define NBL_DPED_TAB_TNL_DWLEN (32) +union dped_tab_tnl_u { + struct dped_tab_tnl { + u32 cfg_tnl_data:16; /* [719:0] Default:0x0 RW */ + u32 cfg_tnl_data_arr[22]; /* [719:0] Default:0x0 RW */ + u32 cfg_tnl_info:8; /* [791:720] Default:0x0 RW */ + u32 cfg_tnl_info_arr[2]; /* [791:720] Default:0x0 RW */ + u32 rsv_l:32; /* [1023:792] Default:0x0 RO */ + u32 rsv_h:8; /* [1023:792] Default:0x0 RO */ + u32 rsv_arr[6]; /* [1023:792] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TAB_TNL_DWLEN]; +} __packed; +#define NBL_DPED_TAB_TNL_REG(r) (NBL_DPED_TAB_TNL_ADDR + \ + (NBL_DPED_TAB_TNL_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dstore.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dstore.h new file mode 100644 index 0000000000000000000000000000000000000000..cf00d7eb34db1b569f922fb48b0d8ad0b6db5784 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dstore.h @@ -0,0 +1,957 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DSTORE_H +#define NBL_DSTORE_H 1 + +#include + +#define NBL_DSTORE_BASE (0x00704000) + +#define NBL_DSTORE_INT_STATUS_ADDR (0x704000) +#define NBL_DSTORE_INT_STATUS_DEPTH (1) +#define NBL_DSTORE_INT_STATUS_WIDTH (32) +#define NBL_DSTORE_INT_STATUS_DWLEN (1) +union dstore_int_status_u { + struct dstore_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 cor_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 parity_err:1; /* [5] Default:0x0 RWC */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DSTORE_INT_MASK_ADDR (0x704004) +#define NBL_DSTORE_INT_MASK_DEPTH (1) +#define NBL_DSTORE_INT_MASK_WIDTH (32) +#define NBL_DSTORE_INT_MASK_DWLEN (1) +union dstore_int_mask_u { + struct dstore_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 cor_err:1; /* [1] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 parity_err:1; /* [5] Default:0x0 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DSTORE_INT_SET_ADDR (0x704008) +#define NBL_DSTORE_INT_SET_DEPTH (0) +#define NBL_DSTORE_INT_SET_WIDTH (32) +#define NBL_DSTORE_INT_SET_DWLEN (1) +union dstore_int_set_u { + struct dstore_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 cor_err:1; /* [1] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 parity_err:1; /* [5] Default:0x0 WO */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_INT_SET_DWLEN]; +} __packed; + +#define NBL_DSTORE_COR_ERR_INFO_ADDR (0x70400c) +#define NBL_DSTORE_COR_ERR_INFO_DEPTH (1) +#define NBL_DSTORE_COR_ERR_INFO_WIDTH (32) +#define NBL_DSTORE_COR_ERR_INFO_DWLEN (1) +union dstore_cor_err_info_u { + struct dstore_cor_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DSTORE_PARITY_ERR_INFO_ADDR (0x704014) +#define NBL_DSTORE_PARITY_ERR_INFO_DEPTH (1) +#define NBL_DSTORE_PARITY_ERR_INFO_WIDTH (32) +#define NBL_DSTORE_PARITY_ERR_INFO_DWLEN (1) +union dstore_parity_err_info_u { + struct dstore_parity_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DSTORE_CIF_ERR_INFO_ADDR (0x70401c) +#define NBL_DSTORE_CIF_ERR_INFO_DEPTH (1) +#define NBL_DSTORE_CIF_ERR_INFO_WIDTH (32) +#define NBL_DSTORE_CIF_ERR_INFO_DWLEN (1) +union dstore_cif_err_info_u { + struct dstore_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DSTORE_CAR_CTRL_ADDR (0x704100) +#define NBL_DSTORE_CAR_CTRL_DEPTH (1) +#define NBL_DSTORE_CAR_CTRL_WIDTH (32) +#define NBL_DSTORE_CAR_CTRL_DWLEN (1) +union dstore_car_ctrl_u { + struct dstore_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DSTORE_INIT_START_ADDR (0x704104) +#define NBL_DSTORE_INIT_START_DEPTH (1) +#define NBL_DSTORE_INIT_START_WIDTH (32) +#define NBL_DSTORE_INIT_START_DWLEN (1) +union dstore_init_start_u { + struct dstore_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_INIT_START_DWLEN]; +} __packed; + +#define NBL_DSTORE_PKT_LEN_ADDR (0x704108) +#define NBL_DSTORE_PKT_LEN_DEPTH (1) +#define NBL_DSTORE_PKT_LEN_WIDTH (32) +#define NBL_DSTORE_PKT_LEN_DWLEN (1) +union dstore_pkt_len_u { + struct dstore_pkt_len { + u32 min:7; /* [6:0] Default:60 RW */ + u32 rsv1:8; /* [14:7] Default:0x0 RO */ + u32 min_chk_en:1; /* [15] Default:0x0 RW */ + u32 max:14; /* [29:16] Default:9600 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 max_chk_en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DSTORE_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_DSTORE_SCH_PD_BUFFER_TH_ADDR (0x704128) +#define NBL_DSTORE_SCH_PD_BUFFER_TH_DEPTH (1) +#define NBL_DSTORE_SCH_PD_BUFFER_TH_WIDTH (32) +#define NBL_DSTORE_SCH_PD_BUFFER_TH_DWLEN (1) +union dstore_sch_pd_buffer_th_u { + struct dstore_sch_pd_buffer_th { + u32 aful_th:9; /* [8:0] Default:500 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_SCH_PD_BUFFER_TH_DWLEN]; +} __packed; + +#define NBL_DSTORE_GLB_FC_TH_ADDR (0x70412c) +#define NBL_DSTORE_GLB_FC_TH_DEPTH (1) +#define NBL_DSTORE_GLB_FC_TH_WIDTH (32) +#define NBL_DSTORE_GLB_FC_TH_DWLEN (1) +union dstore_glb_fc_th_u { + struct dstore_glb_fc_th { + u32 xoff_th:10; /* [9:0] Default:900 RW */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 xon_th:10; /* [25:16] Default:850 RW */ + u32 rsv:5; /* [30:26] Default:0x0 RO */ + u32 fc_en:1; /* [31:31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DSTORE_GLB_FC_TH_DWLEN]; +} __packed; + +#define NBL_DSTORE_GLB_DROP_TH_ADDR (0x704130) +#define NBL_DSTORE_GLB_DROP_TH_DEPTH (1) +#define NBL_DSTORE_GLB_DROP_TH_WIDTH (32) +#define NBL_DSTORE_GLB_DROP_TH_DWLEN (1) +union dstore_glb_drop_th_u { + struct dstore_glb_drop_th { + u32 disc_th:10; /* [9:0] Default:985 RW */ + u32 rsv:21; /* [30:10] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DSTORE_GLB_DROP_TH_DWLEN]; +} __packed; + +#define NBL_DSTORE_PORT_FC_TH_ADDR (0x704134) +#define NBL_DSTORE_PORT_FC_TH_DEPTH (6) +#define NBL_DSTORE_PORT_FC_TH_WIDTH (32) +#define NBL_DSTORE_PORT_FC_TH_DWLEN (1) +union dstore_port_fc_th_u { + struct dstore_port_fc_th { + u32 xoff_th:10; /* [9:0] Default:400 RW */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 xon_th:10; /* [25:16] Default:400 RW */ + u32 rsv:4; /* [29:26] Default:0x0 RO */ + u32 fc_set:1; /* [30:30] Default:0x0 RW */ + u32 fc_en:1; /* [31:31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DSTORE_PORT_FC_TH_DWLEN]; +} __packed; +#define NBL_DSTORE_PORT_FC_TH_REG(r) (NBL_DSTORE_PORT_FC_TH_ADDR + \ + (NBL_DSTORE_PORT_FC_TH_DWLEN * 4) * (r)) + +#define NBL_DSTORE_PORT_DROP_TH_ADDR (0x704150) +#define NBL_DSTORE_PORT_DROP_TH_DEPTH (6) +#define NBL_DSTORE_PORT_DROP_TH_WIDTH (32) +#define NBL_DSTORE_PORT_DROP_TH_DWLEN (1) +union dstore_port_drop_th_u { + struct dstore_port_drop_th { + u32 disc_th:10; /* [9:0] Default:800 RW */ + u32 rsv:21; /* [30:10] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DSTORE_PORT_DROP_TH_DWLEN]; +} __packed; +#define NBL_DSTORE_PORT_DROP_TH_REG(r) (NBL_DSTORE_PORT_DROP_TH_ADDR + \ + (NBL_DSTORE_PORT_DROP_TH_DWLEN * 4) * (r)) + +#define NBL_DSTORE_CFG_TEST_ADDR (0x704170) +#define NBL_DSTORE_CFG_TEST_DEPTH (1) +#define NBL_DSTORE_CFG_TEST_WIDTH (32) +#define NBL_DSTORE_CFG_TEST_DWLEN (1) +union dstore_cfg_test_u { + struct dstore_cfg_test { + u32 test:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSTORE_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_DSTORE_HIGH_PRI_PKT_ADDR (0x70417c) +#define NBL_DSTORE_HIGH_PRI_PKT_DEPTH (1) +#define NBL_DSTORE_HIGH_PRI_PKT_WIDTH (32) +#define NBL_DSTORE_HIGH_PRI_PKT_DWLEN (1) +union dstore_high_pri_pkt_u { + struct dstore_high_pri_pkt { + u32 en:1; /* [0:0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_HIGH_PRI_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_COS_FC_TH_ADDR (0x704200) +#define NBL_DSTORE_COS_FC_TH_DEPTH (48) +#define NBL_DSTORE_COS_FC_TH_WIDTH (32) +#define NBL_DSTORE_COS_FC_TH_DWLEN (1) +union dstore_cos_fc_th_u { + struct dstore_cos_fc_th { + u32 xoff_th:10; /* [9:0] Default:100 RW */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 xon_th:10; /* [25:16] Default:100 RW */ + u32 rsv:4; /* [29:26] Default:0x0 RO */ + u32 fc_set:1; /* [30:30] Default:0x0 RW */ + u32 fc_en:1; /* [31:31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSTORE_COS_FC_TH_DWLEN]; +} __packed; +#define NBL_DSTORE_COS_FC_TH_REG(r) (NBL_DSTORE_COS_FC_TH_ADDR + \ + (NBL_DSTORE_COS_FC_TH_DWLEN * 4) * (r)) + +#define NBL_DSTORE_COS_DROP_TH_ADDR (0x704300) +#define NBL_DSTORE_COS_DROP_TH_DEPTH (48) +#define NBL_DSTORE_COS_DROP_TH_WIDTH (32) +#define NBL_DSTORE_COS_DROP_TH_DWLEN (1) +union dstore_cos_drop_th_u { + struct dstore_cos_drop_th { + u32 disc_th:10; /* [9:0] Default:120 RW */ + u32 rsv:21; /* [30:10] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSTORE_COS_DROP_TH_DWLEN]; +} __packed; +#define NBL_DSTORE_COS_DROP_TH_REG(r) (NBL_DSTORE_COS_DROP_TH_ADDR + \ + (NBL_DSTORE_COS_DROP_TH_DWLEN * 4) * (r)) + +#define NBL_DSTORE_SCH_PD_WRR_WGT_ADDR (0x704400) +#define NBL_DSTORE_SCH_PD_WRR_WGT_DEPTH (36) +#define NBL_DSTORE_SCH_PD_WRR_WGT_WIDTH (32) +#define NBL_DSTORE_SCH_PD_WRR_WGT_DWLEN (1) +union dstore_sch_pd_wrr_wgt_u { + struct dstore_sch_pd_wrr_wgt { + u32 wgt_cos:4; /* [3:0] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_SCH_PD_WRR_WGT_DWLEN]; +} __packed; +#define NBL_DSTORE_SCH_PD_WRR_WGT_REG(r) (NBL_DSTORE_SCH_PD_WRR_WGT_ADDR + \ + (NBL_DSTORE_SCH_PD_WRR_WGT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_COS7_FORCE_ADDR (0x704504) +#define NBL_DSTORE_COS7_FORCE_DEPTH (1) +#define NBL_DSTORE_COS7_FORCE_WIDTH (32) +#define NBL_DSTORE_COS7_FORCE_DWLEN (1) +union dstore_cos7_force_u { + struct dstore_cos7_force { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_COS7_FORCE_DWLEN]; +} __packed; + +#define NBL_DSTORE_D_DPORT_FC_TH_ADDR (0x704600) +#define NBL_DSTORE_D_DPORT_FC_TH_DEPTH (5) +#define NBL_DSTORE_D_DPORT_FC_TH_WIDTH (32) +#define NBL_DSTORE_D_DPORT_FC_TH_DWLEN (1) +union dstore_d_dport_fc_th_u { + struct dstore_d_dport_fc_th { + u32 xoff_th:11; /* [10:0] Default:200 RW */ + u32 rsv1:5; /* [15:11] Default:0x0 RO */ + u32 xon_th:11; /* [26:16] Default:100 RW */ + u32 rsv:3; /* [29:27] Default:0x0 RO */ + u32 fc_set:1; /* [30:30] Default:0x0 RW */ + u32 fc_en:1; /* [31:31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSTORE_D_DPORT_FC_TH_DWLEN]; +} __packed; +#define NBL_DSTORE_D_DPORT_FC_TH_REG(r) (NBL_DSTORE_D_DPORT_FC_TH_ADDR + \ + (NBL_DSTORE_D_DPORT_FC_TH_DWLEN * 4) * (r)) + +#define NBL_DSTORE_INIT_DONE_ADDR (0x704800) +#define NBL_DSTORE_INIT_DONE_DEPTH (1) +#define NBL_DSTORE_INIT_DONE_WIDTH (32) +#define NBL_DSTORE_INIT_DONE_DWLEN (1) +union dstore_init_done_u { + struct dstore_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_DSTORE_SCH_IDLE_LIST_STATUS_CURR_ADDR (0x70481c) +#define NBL_DSTORE_SCH_IDLE_LIST_STATUS_CURR_DEPTH (1) +#define NBL_DSTORE_SCH_IDLE_LIST_STATUS_CURR_WIDTH (32) +#define NBL_DSTORE_SCH_IDLE_LIST_STATUS_CURR_DWLEN (1) +union dstore_sch_idle_list_status_curr_u { + struct dstore_sch_idle_list_status_curr { + u32 empt:1; /* [0] Default:0x0 RO */ + u32 full:1; /* [1] Default:0x1 RO */ + u32 cnt:10; /* [11:2] Default:0x200 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_SCH_IDLE_LIST_STATUS_CURR_DWLEN]; +} __packed; + +#define NBL_DSTORE_SCH_QUE_LIST_STATUS_ADDR (0x704820) +#define NBL_DSTORE_SCH_QUE_LIST_STATUS_DEPTH (48) +#define NBL_DSTORE_SCH_QUE_LIST_STATUS_WIDTH (32) +#define NBL_DSTORE_SCH_QUE_LIST_STATUS_DWLEN (1) +union dstore_sch_que_list_status_u { + struct dstore_sch_que_list_status { + u32 curr_empt:1; /* [0] Default:0x1 RO */ + u32 curr_cnt:10; /* [10:1] Default:0x0 RO */ + u32 history_udf:1; /* [11] Default:0x0 RC */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_SCH_QUE_LIST_STATUS_DWLEN]; +} __packed; +#define NBL_DSTORE_SCH_QUE_LIST_STATUS_REG(r) (NBL_DSTORE_SCH_QUE_LIST_STATUS_ADDR + \ + (NBL_DSTORE_SCH_QUE_LIST_STATUS_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_TOTAL_PKT_ADDR (0x705050) +#define NBL_DSTORE_RCV_TOTAL_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_TOTAL_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_TOTAL_PKT_DWLEN (1) +union dstore_rcv_total_pkt_u { + struct dstore_rcv_total_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_TOTAL_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_TOTAL_BYTE_ADDR (0x705054) +#define NBL_DSTORE_RCV_TOTAL_BYTE_DEPTH (1) +#define NBL_DSTORE_RCV_TOTAL_BYTE_WIDTH (48) +#define NBL_DSTORE_RCV_TOTAL_BYTE_DWLEN (2) +union dstore_rcv_total_byte_u { + struct dstore_rcv_total_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_TOTAL_BYTE_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_TOTAL_RIGHT_PKT_ADDR (0x70505c) +#define NBL_DSTORE_RCV_TOTAL_RIGHT_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_TOTAL_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_TOTAL_RIGHT_PKT_DWLEN (1) +union dstore_rcv_total_right_pkt_u { + struct dstore_rcv_total_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_TOTAL_RIGHT_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_TOTAL_WRONG_PKT_ADDR (0x705060) +#define NBL_DSTORE_RCV_TOTAL_WRONG_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_TOTAL_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_TOTAL_WRONG_PKT_DWLEN (1) +union dstore_rcv_total_wrong_pkt_u { + struct dstore_rcv_total_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_TOTAL_WRONG_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_FWD_RIGHT_PKT_ADDR (0x705064) +#define NBL_DSTORE_RCV_FWD_RIGHT_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_FWD_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_FWD_RIGHT_PKT_DWLEN (1) +union dstore_rcv_fwd_right_pkt_u { + struct dstore_rcv_fwd_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_FWD_RIGHT_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_FWD_WRONG_PKT_ADDR (0x705068) +#define NBL_DSTORE_RCV_FWD_WRONG_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_FWD_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_FWD_WRONG_PKT_DWLEN (1) +union dstore_rcv_fwd_wrong_pkt_u { + struct dstore_rcv_fwd_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_FWD_WRONG_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_HERR_RIGHT_PKT_ADDR (0x70506c) +#define NBL_DSTORE_RCV_HERR_RIGHT_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_HERR_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_HERR_RIGHT_PKT_DWLEN (1) +union dstore_rcv_herr_right_pkt_u { + struct dstore_rcv_herr_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_HERR_RIGHT_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_HERR_WRONG_PKT_ADDR (0x705070) +#define NBL_DSTORE_RCV_HERR_WRONG_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_HERR_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_HERR_WRONG_PKT_DWLEN (1) +union dstore_rcv_herr_wrong_pkt_u { + struct dstore_rcv_herr_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_HERR_WRONG_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_IPRO_TOTAL_PKT_ADDR (0x705074) +#define NBL_DSTORE_IPRO_TOTAL_PKT_DEPTH (1) +#define NBL_DSTORE_IPRO_TOTAL_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_TOTAL_PKT_DWLEN (1) +union dstore_ipro_total_pkt_u { + struct dstore_ipro_total_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_TOTAL_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_IPRO_TOTAL_BYTE_ADDR (0x705078) +#define NBL_DSTORE_IPRO_TOTAL_BYTE_DEPTH (1) +#define NBL_DSTORE_IPRO_TOTAL_BYTE_WIDTH (48) +#define NBL_DSTORE_IPRO_TOTAL_BYTE_DWLEN (2) +union dstore_ipro_total_byte_u { + struct dstore_ipro_total_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_TOTAL_BYTE_DWLEN]; +} __packed; + +#define NBL_DSTORE_IPRO_FWD_RIGHT_PKT_ADDR (0x705080) +#define NBL_DSTORE_IPRO_FWD_RIGHT_PKT_DEPTH (1) +#define NBL_DSTORE_IPRO_FWD_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_FWD_RIGHT_PKT_DWLEN (1) +union dstore_ipro_fwd_right_pkt_u { + struct dstore_ipro_fwd_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_FWD_RIGHT_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_IPRO_FWD_WRONG_PKT_ADDR (0x705084) +#define NBL_DSTORE_IPRO_FWD_WRONG_PKT_DEPTH (1) +#define NBL_DSTORE_IPRO_FWD_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_FWD_WRONG_PKT_DWLEN (1) +union dstore_ipro_fwd_wrong_pkt_u { + struct dstore_ipro_fwd_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_FWD_WRONG_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_IPRO_HERR_RIGHT_PKT_ADDR (0x705088) +#define NBL_DSTORE_IPRO_HERR_RIGHT_PKT_DEPTH (1) +#define NBL_DSTORE_IPRO_HERR_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_HERR_RIGHT_PKT_DWLEN (1) +union dstore_ipro_herr_right_pkt_u { + struct dstore_ipro_herr_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_HERR_RIGHT_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_IPRO_HERR_WRONG_PKT_ADDR (0x70508c) +#define NBL_DSTORE_IPRO_HERR_WRONG_PKT_DEPTH (1) +#define NBL_DSTORE_IPRO_HERR_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_HERR_WRONG_PKT_DWLEN (1) +union dstore_ipro_herr_wrong_pkt_u { + struct dstore_ipro_herr_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_HERR_WRONG_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_PMEM_TOTAL_PKT_ADDR (0x705090) +#define NBL_DSTORE_PMEM_TOTAL_PKT_DEPTH (1) +#define NBL_DSTORE_PMEM_TOTAL_PKT_WIDTH (32) +#define NBL_DSTORE_PMEM_TOTAL_PKT_DWLEN (1) +union dstore_pmem_total_pkt_u { + struct dstore_pmem_total_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_PMEM_TOTAL_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_PMEM_TOTAL_BYTE_ADDR (0x705094) +#define NBL_DSTORE_PMEM_TOTAL_BYTE_DEPTH (1) +#define NBL_DSTORE_PMEM_TOTAL_BYTE_WIDTH (48) +#define NBL_DSTORE_PMEM_TOTAL_BYTE_DWLEN (2) +union dstore_pmem_total_byte_u { + struct dstore_pmem_total_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_PMEM_TOTAL_BYTE_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_TOTAL_ERR_DROP_PKT_ADDR (0x70509c) +#define NBL_DSTORE_RCV_TOTAL_ERR_DROP_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_TOTAL_ERR_DROP_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_TOTAL_ERR_DROP_PKT_DWLEN (1) +union dstore_rcv_total_err_drop_pkt_u { + struct dstore_rcv_total_err_drop_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_TOTAL_ERR_DROP_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_TOTAL_SHORT_PKT_ADDR (0x7050a0) +#define NBL_DSTORE_RCV_TOTAL_SHORT_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_TOTAL_SHORT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_TOTAL_SHORT_PKT_DWLEN (1) +union dstore_rcv_total_short_pkt_u { + struct dstore_rcv_total_short_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_TOTAL_SHORT_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_TOTAL_LONG_PKT_ADDR (0x7050a4) +#define NBL_DSTORE_RCV_TOTAL_LONG_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_TOTAL_LONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_TOTAL_LONG_PKT_DWLEN (1) +union dstore_rcv_total_long_pkt_u { + struct dstore_rcv_total_long_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_TOTAL_LONG_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_BUF_TOTAL_DROP_PKT_ADDR (0x7050a8) +#define NBL_DSTORE_BUF_TOTAL_DROP_PKT_DEPTH (1) +#define NBL_DSTORE_BUF_TOTAL_DROP_PKT_WIDTH (32) +#define NBL_DSTORE_BUF_TOTAL_DROP_PKT_DWLEN (1) +union dstore_buf_total_drop_pkt_u { + struct dstore_buf_total_drop_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_BUF_TOTAL_DROP_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_BUF_TOTAL_TRUN_PKT_ADDR (0x7050ac) +#define NBL_DSTORE_BUF_TOTAL_TRUN_PKT_DEPTH (1) +#define NBL_DSTORE_BUF_TOTAL_TRUN_PKT_WIDTH (32) +#define NBL_DSTORE_BUF_TOTAL_TRUN_PKT_DWLEN (1) +union dstore_buf_total_trun_pkt_u { + struct dstore_buf_total_trun_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_BUF_TOTAL_TRUN_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_PORT_PKT_ADDR (0x706000) +#define NBL_DSTORE_RCV_PORT_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_PKT_DWLEN (1) +union dstore_rcv_port_pkt_u { + struct dstore_rcv_port_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_PKT_REG(r) (NBL_DSTORE_RCV_PORT_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_BYTE_ADDR (0x706040) +#define NBL_DSTORE_RCV_PORT_BYTE_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_BYTE_WIDTH (48) +#define NBL_DSTORE_RCV_PORT_BYTE_DWLEN (2) +union dstore_rcv_port_byte_u { + struct dstore_rcv_port_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_BYTE_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_BYTE_REG(r) (NBL_DSTORE_RCV_PORT_BYTE_ADDR + \ + (NBL_DSTORE_RCV_PORT_BYTE_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_ADDR (0x7060c0) +#define NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_DWLEN (1) +union dstore_rcv_port_total_right_pkt_u { + struct dstore_rcv_port_total_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_REG(r) (NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_ADDR (0x706100) +#define NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_DWLEN (1) +union dstore_rcv_port_total_wrong_pkt_u { + struct dstore_rcv_port_total_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_REG(r) (NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_ADDR (0x706140) +#define NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_DWLEN (1) +union dstore_rcv_port_fwd_right_pkt_u { + struct dstore_rcv_port_fwd_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_REG(r) (NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_ADDR (0x706180) +#define NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_DWLEN (1) +union dstore_rcv_port_fwd_wrong_pkt_u { + struct dstore_rcv_port_fwd_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_REG(r) (NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_ADDR (0x7061c0) +#define NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_DWLEN (1) +union dstore_rcv_port_herr_right_pkt_u { + struct dstore_rcv_port_herr_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_REG(r) (NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_ADDR (0x706200) +#define NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_DWLEN (1) +union dstore_rcv_port_herr_wrong_pkt_u { + struct dstore_rcv_port_herr_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_REG(r) (NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_IPRO_PORT_PKT_ADDR (0x706240) +#define NBL_DSTORE_IPRO_PORT_PKT_DEPTH (12) +#define NBL_DSTORE_IPRO_PORT_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_PORT_PKT_DWLEN (1) +union dstore_ipro_port_pkt_u { + struct dstore_ipro_port_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_PORT_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_IPRO_PORT_PKT_REG(r) (NBL_DSTORE_IPRO_PORT_PKT_ADDR + \ + (NBL_DSTORE_IPRO_PORT_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_IPRO_PORT_BYTE_ADDR (0x706280) +#define NBL_DSTORE_IPRO_PORT_BYTE_DEPTH (12) +#define NBL_DSTORE_IPRO_PORT_BYTE_WIDTH (48) +#define NBL_DSTORE_IPRO_PORT_BYTE_DWLEN (2) +union dstore_ipro_port_byte_u { + struct dstore_ipro_port_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_PORT_BYTE_DWLEN]; +} __packed; +#define NBL_DSTORE_IPRO_PORT_BYTE_REG(r) (NBL_DSTORE_IPRO_PORT_BYTE_ADDR + \ + (NBL_DSTORE_IPRO_PORT_BYTE_DWLEN * 4) * (r)) + +#define NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_ADDR (0x706300) +#define NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_DEPTH (12) +#define NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_DWLEN (1) +union dstore_ipro_port_fwd_right_pkt_u { + struct dstore_ipro_port_fwd_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_REG(r) (NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_ADDR + \ + (NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_ADDR (0x706340) +#define NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_DEPTH (12) +#define NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_DWLEN (1) +union dstore_ipro_port_fwd_wrong_pkt_u { + struct dstore_ipro_port_fwd_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_REG(r) (NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_ADDR + \ + (NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_PMEM_PORT_PKT_ADDR (0x706380) +#define NBL_DSTORE_PMEM_PORT_PKT_DEPTH (12) +#define NBL_DSTORE_PMEM_PORT_PKT_WIDTH (32) +#define NBL_DSTORE_PMEM_PORT_PKT_DWLEN (1) +union dstore_pmem_port_pkt_u { + struct dstore_pmem_port_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_PMEM_PORT_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_PMEM_PORT_PKT_REG(r) (NBL_DSTORE_PMEM_PORT_PKT_ADDR + \ + (NBL_DSTORE_PMEM_PORT_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_PMEM_PORT_BYTE_ADDR (0x7063c0) +#define NBL_DSTORE_PMEM_PORT_BYTE_DEPTH (12) +#define NBL_DSTORE_PMEM_PORT_BYTE_WIDTH (48) +#define NBL_DSTORE_PMEM_PORT_BYTE_DWLEN (2) +union dstore_pmem_port_byte_u { + struct dstore_pmem_port_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_PMEM_PORT_BYTE_DWLEN]; +} __packed; +#define NBL_DSTORE_PMEM_PORT_BYTE_REG(r) (NBL_DSTORE_PMEM_PORT_BYTE_ADDR + \ + (NBL_DSTORE_PMEM_PORT_BYTE_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_ADDR (0x706440) +#define NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_DWLEN (1) +union dstore_rcv_err_port_drop_pkt_u { + struct dstore_rcv_err_port_drop_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_REG(r) (NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_ADDR + \ + (NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_ADDR (0x706480) +#define NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_DWLEN (1) +union dstore_rcv_port_short_drop_pkt_u { + struct dstore_rcv_port_short_drop_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_REG(r) (NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_LONG_PKT_ADDR (0x7064c0) +#define NBL_DSTORE_RCV_PORT_LONG_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_LONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_LONG_PKT_DWLEN (1) +union dstore_rcv_port_long_pkt_u { + struct dstore_rcv_port_long_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_LONG_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_LONG_PKT_REG(r) (NBL_DSTORE_RCV_PORT_LONG_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_LONG_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_BUF_PORT_DROP_PKT_ADDR (0x706500) +#define NBL_DSTORE_BUF_PORT_DROP_PKT_DEPTH (12) +#define NBL_DSTORE_BUF_PORT_DROP_PKT_WIDTH (32) +#define NBL_DSTORE_BUF_PORT_DROP_PKT_DWLEN (1) +union dstore_buf_port_drop_pkt_u { + struct dstore_buf_port_drop_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_BUF_PORT_DROP_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_BUF_PORT_DROP_PKT_REG(r) (NBL_DSTORE_BUF_PORT_DROP_PKT_ADDR + \ + (NBL_DSTORE_BUF_PORT_DROP_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_BUF_PORT_TRUN_PKT_ADDR (0x706540) +#define NBL_DSTORE_BUF_PORT_TRUN_PKT_DEPTH (12) +#define NBL_DSTORE_BUF_PORT_TRUN_PKT_WIDTH (32) +#define NBL_DSTORE_BUF_PORT_TRUN_PKT_DWLEN (1) +union dstore_buf_port_trun_pkt_u { + struct dstore_buf_port_trun_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_BUF_PORT_TRUN_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_BUF_PORT_TRUN_PKT_REG(r) (NBL_DSTORE_BUF_PORT_TRUN_PKT_ADDR + \ + (NBL_DSTORE_BUF_PORT_TRUN_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_BP_CUR_1ST_ADDR (0x706580) +#define NBL_DSTORE_BP_CUR_1ST_DEPTH (1) +#define NBL_DSTORE_BP_CUR_1ST_WIDTH (32) +#define NBL_DSTORE_BP_CUR_1ST_DWLEN (1) +union dstore_bp_cur_1st_u { + struct dstore_bp_cur_1st { + u32 link_fc:6; /* [5:0] Default:0x0 RO */ + u32 rsv:2; /* [7:6] Default:0x0 RO */ + u32 pfc:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_BP_CUR_1ST_DWLEN]; +} __packed; + +#define NBL_DSTORE_BP_CUR_2ND_ADDR (0x706584) +#define NBL_DSTORE_BP_CUR_2ND_DEPTH (1) +#define NBL_DSTORE_BP_CUR_2ND_WIDTH (32) +#define NBL_DSTORE_BP_CUR_2ND_DWLEN (1) +union dstore_bp_cur_2nd_u { + struct dstore_bp_cur_2nd { + u32 pfc:24; /* [23:0] Default:0x0 RO */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_BP_CUR_2ND_DWLEN]; +} __packed; + +#define NBL_DSTORE_BP_HISTORY_LINK_ADDR (0x706590) +#define NBL_DSTORE_BP_HISTORY_LINK_DEPTH (6) +#define NBL_DSTORE_BP_HISTORY_LINK_WIDTH (32) +#define NBL_DSTORE_BP_HISTORY_LINK_DWLEN (1) +union dstore_bp_history_link_u { + struct dstore_bp_history_link { + u32 fc:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_BP_HISTORY_LINK_DWLEN]; +} __packed; +#define NBL_DSTORE_BP_HISTORY_LINK_REG(r) (NBL_DSTORE_BP_HISTORY_LINK_ADDR + \ + (NBL_DSTORE_BP_HISTORY_LINK_DWLEN * 4) * (r)) + +#define NBL_DSTORE_BP_HISTORY_ADDR (0x7065b0) +#define NBL_DSTORE_BP_HISTORY_DEPTH (48) +#define NBL_DSTORE_BP_HISTORY_WIDTH (32) +#define NBL_DSTORE_BP_HISTORY_DWLEN (1) +union dstore_bp_history_u { + struct dstore_bp_history { + u32 pfc:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_BP_HISTORY_DWLEN]; +} __packed; +#define NBL_DSTORE_BP_HISTORY_REG(r) (NBL_DSTORE_BP_HISTORY_ADDR + \ + (NBL_DSTORE_BP_HISTORY_DWLEN * 4) * (r)) + +#define NBL_DSTORE_WRR_CUR_ADDR (0x706800) +#define NBL_DSTORE_WRR_CUR_DEPTH (36) +#define NBL_DSTORE_WRR_CUR_WIDTH (32) +#define NBL_DSTORE_WRR_CUR_DWLEN (1) +union dstore_wrr_cur_u { + struct dstore_wrr_cur { + u32 wgt_cos:5; /* [4:0] Default:0x0 RO */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_WRR_CUR_DWLEN]; +} __packed; +#define NBL_DSTORE_WRR_CUR_REG(r) (NBL_DSTORE_WRR_CUR_ADDR + \ + (NBL_DSTORE_WRR_CUR_DWLEN * 4) * (r)) + +#define NBL_DSTORE_DDPORT_CUR_ADDR (0x707018) +#define NBL_DSTORE_DDPORT_CUR_DEPTH (1) +#define NBL_DSTORE_DDPORT_CUR_WIDTH (32) +#define NBL_DSTORE_DDPORT_CUR_DWLEN (1) +union dstore_ddport_cur_u { + struct dstore_ddport_cur { + u32 link_fc:5; /* [4:0] Default:0x0 RO */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_DDPORT_CUR_DWLEN]; +} __packed; + +#define NBL_DSTORE_DDPORT_HISTORY_ADDR (0x70701c) +#define NBL_DSTORE_DDPORT_HISTORY_DEPTH (5) +#define NBL_DSTORE_DDPORT_HISTORY_WIDTH (32) +#define NBL_DSTORE_DDPORT_HISTORY_DWLEN (1) +union dstore_ddport_history_u { + struct dstore_ddport_history { + u32 link_fc:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_DDPORT_HISTORY_DWLEN]; +} __packed; +#define NBL_DSTORE_DDPORT_HISTORY_REG(r) (NBL_DSTORE_DDPORT_HISTORY_ADDR + \ + (NBL_DSTORE_DDPORT_HISTORY_DWLEN * 4) * (r)) + +#define NBL_DSTORE_DDPORT_RSC_ADD_ADDR (0x707050) +#define NBL_DSTORE_DDPORT_RSC_ADD_DEPTH (5) +#define NBL_DSTORE_DDPORT_RSC_ADD_WIDTH (32) +#define NBL_DSTORE_DDPORT_RSC_ADD_DWLEN (1) +union dstore_ddport_rsc_add_u { + struct dstore_ddport_rsc_add { + u32 cnt:12; /* [11:0] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_DDPORT_RSC_ADD_DWLEN]; +} __packed; +#define NBL_DSTORE_DDPORT_RSC_ADD_REG(r) (NBL_DSTORE_DDPORT_RSC_ADD_ADDR + \ + (NBL_DSTORE_DDPORT_RSC_ADD_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ucar.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ucar.h new file mode 100644 index 0000000000000000000000000000000000000000..0f6add4268383d9ccf23ddd09f0139dacaaf5127 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ucar.h @@ -0,0 +1,409 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_UCAR_H +#define NBL_UCAR_H 1 + +#include + +#define NBL_UCAR_BASE (0x00E84000) + +#define NBL_UCAR_INT_STATUS_ADDR (0xe84000) +#define NBL_UCAR_INT_STATUS_DEPTH (1) +#define NBL_UCAR_INT_STATUS_WIDTH (32) +#define NBL_UCAR_INT_STATUS_DWLEN (1) +union ucar_int_status_u { + struct ucar_int_status { + u32 color_err:1; /* [0] Default:0x0 RWC */ + u32 parity_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RWC */ + u32 cif_err:1; /* [3] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [4] Default:0x0 RWC */ + u32 atid_nomat_err:1; /* [5] Default:0x0 RWC */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_UCAR_INT_MASK_ADDR (0xe84004) +#define NBL_UCAR_INT_MASK_DEPTH (1) +#define NBL_UCAR_INT_MASK_WIDTH (32) +#define NBL_UCAR_INT_MASK_DWLEN (1) +union ucar_int_mask_u { + struct ucar_int_mask { + u32 color_err:1; /* [0] Default:0x1 RW */ + u32 parity_err:1; /* [1] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RW */ + u32 cif_err:1; /* [3] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [4] Default:0x0 RW */ + u32 atid_nomat_err:1; /* [5] Default:0x1 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_INT_MASK_DWLEN]; +} __packed; + +#define NBL_UCAR_INT_SET_ADDR (0xe84008) +#define NBL_UCAR_INT_SET_DEPTH (1) +#define NBL_UCAR_INT_SET_WIDTH (32) +#define NBL_UCAR_INT_SET_DWLEN (1) +union ucar_int_set_u { + struct ucar_int_set { + u32 color_err:1; /* [0] Default:0x0 WO */ + u32 parity_err:1; /* [1] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 WO */ + u32 cif_err:1; /* [3] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [4] Default:0x0 WO */ + u32 atid_nomat_err:1; /* [5] Default:0x0 WO */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_INT_SET_DWLEN]; +} __packed; + +#define NBL_UCAR_PARITY_ERR_INFO_ADDR (0xe84104) +#define NBL_UCAR_PARITY_ERR_INFO_DEPTH (1) +#define NBL_UCAR_PARITY_ERR_INFO_WIDTH (32) +#define NBL_UCAR_PARITY_ERR_INFO_DWLEN (1) +union ucar_parity_err_info_u { + struct ucar_parity_err_info { + u32 ram_addr:12; /* [11:0] Default:0x0 RO */ + u32 ram_id:3; /* [14:12] Default:0x0 RO */ + u32 rsv:17; /* [31:15] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UCAR_CIF_ERR_INFO_ADDR (0xe8411c) +#define NBL_UCAR_CIF_ERR_INFO_DEPTH (1) +#define NBL_UCAR_CIF_ERR_INFO_WIDTH (32) +#define NBL_UCAR_CIF_ERR_INFO_DWLEN (1) +union ucar_cif_err_info_u { + struct ucar_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UCAR_ATID_NOMAT_ERR_INFO_ADDR (0xe84134) +#define NBL_UCAR_ATID_NOMAT_ERR_INFO_DEPTH (1) +#define NBL_UCAR_ATID_NOMAT_ERR_INFO_WIDTH (32) +#define NBL_UCAR_ATID_NOMAT_ERR_INFO_DWLEN (1) +union ucar_atid_nomat_err_info_u { + struct ucar_atid_nomat_err_info { + u32 id:2; /* [1:0] Default:0x0 RO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_ATID_NOMAT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UCAR_CAR_CTRL_ADDR (0xe84200) +#define NBL_UCAR_CAR_CTRL_DEPTH (1) +#define NBL_UCAR_CAR_CTRL_WIDTH (32) +#define NBL_UCAR_CAR_CTRL_DWLEN (1) +union ucar_car_ctrl_u { + struct ucar_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_UCAR_INIT_START_ADDR (0xe84204) +#define NBL_UCAR_INIT_START_DEPTH (1) +#define NBL_UCAR_INIT_START_WIDTH (32) +#define NBL_UCAR_INIT_START_DWLEN (1) +union ucar_init_start_u { + struct ucar_init_start { + u32 start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_INIT_START_DWLEN]; +} __packed; + +#define NBL_UCAR_FWD_CARID_ADDR (0xe84210) +#define NBL_UCAR_FWD_CARID_DEPTH (1) +#define NBL_UCAR_FWD_CARID_WIDTH (32) +#define NBL_UCAR_FWD_CARID_DWLEN (1) +union ucar_fwd_carid_u { + struct ucar_fwd_carid { + u32 act_id:6; /* [5:0] Default:0x5 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_FWD_CARID_DWLEN]; +} __packed; + +#define NBL_UCAR_FWD_FLOW_CAR_ADDR (0xe84214) +#define NBL_UCAR_FWD_FLOW_CAR_DEPTH (1) +#define NBL_UCAR_FWD_FLOW_CAR_WIDTH (32) +#define NBL_UCAR_FWD_FLOW_CAR_DWLEN (1) +union ucar_fwd_flow_car_u { + struct ucar_fwd_flow_car { + u32 act_id:6; /* [5:0] Default:0x6 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_FWD_FLOW_CAR_DWLEN]; +} __packed; + +#define NBL_UCAR_PBS_SUB_ADDR (0xe84224) +#define NBL_UCAR_PBS_SUB_DEPTH (1) +#define NBL_UCAR_PBS_SUB_WIDTH (32) +#define NBL_UCAR_PBS_SUB_DWLEN (1) +union ucar_pbs_sub_u { + struct ucar_pbs_sub { + u32 sel:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_PBS_SUB_DWLEN]; +} __packed; + +#define NBL_UCAR_FLOW_TIMMING_ADD_ADDR (0xe84400) +#define NBL_UCAR_FLOW_TIMMING_ADD_DEPTH (1) +#define NBL_UCAR_FLOW_TIMMING_ADD_WIDTH (32) +#define NBL_UCAR_FLOW_TIMMING_ADD_DWLEN (1) +union ucar_flow_timming_add_u { + struct ucar_flow_timming_add { + u32 cycle_max:12; /* [11:0] Default:0x4 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 depth:14; /* [29:16] Default:0x4B0 RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_FLOW_TIMMING_ADD_DWLEN]; +} __packed; + +#define NBL_UCAR_FLOW_4K_TIMMING_ADD_ADDR (0xe84404) +#define NBL_UCAR_FLOW_4K_TIMMING_ADD_DEPTH (1) +#define NBL_UCAR_FLOW_4K_TIMMING_ADD_WIDTH (32) +#define NBL_UCAR_FLOW_4K_TIMMING_ADD_DWLEN (1) +union ucar_flow_4k_timming_add_u { + struct ucar_flow_4k_timming_add { + u32 cycle_max:12; /* [11:0] Default:0x4 RW */ + u32 depth:18; /* [29:12] Default:0x12C0 RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_FLOW_4K_TIMMING_ADD_DWLEN]; +} __packed; + +#define NBL_UCAR_INIT_DONE_ADDR (0xe84408) +#define NBL_UCAR_INIT_DONE_DEPTH (1) +#define NBL_UCAR_INIT_DONE_WIDTH (32) +#define NBL_UCAR_INIT_DONE_DWLEN (1) +union ucar_init_done_u { + struct ucar_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_UCAR_INPUT_CELL_ADDR (0xe8441c) +#define NBL_UCAR_INPUT_CELL_DEPTH (1) +#define NBL_UCAR_INPUT_CELL_WIDTH (32) +#define NBL_UCAR_INPUT_CELL_DWLEN (1) +union ucar_input_cell_u { + struct ucar_input_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_INPUT_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_RD_CELL_ADDR (0xe84420) +#define NBL_UCAR_RD_CELL_DEPTH (1) +#define NBL_UCAR_RD_CELL_WIDTH (32) +#define NBL_UCAR_RD_CELL_DWLEN (1) +union ucar_rd_cell_u { + struct ucar_rd_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_RD_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_CAR_CELL_ADDR (0xe84424) +#define NBL_UCAR_CAR_CELL_DEPTH (1) +#define NBL_UCAR_CAR_CELL_WIDTH (32) +#define NBL_UCAR_CAR_CELL_DWLEN (1) +union ucar_car_cell_u { + struct ucar_car_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_CAR_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_CAR_FLOW_CELL_ADDR (0xe84428) +#define NBL_UCAR_CAR_FLOW_CELL_DEPTH (1) +#define NBL_UCAR_CAR_FLOW_CELL_WIDTH (32) +#define NBL_UCAR_CAR_FLOW_CELL_DWLEN (1) +union ucar_car_flow_cell_u { + struct ucar_car_flow_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_CAR_FLOW_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_CAR_FLOW_4K_CELL_ADDR (0xe8442c) +#define NBL_UCAR_CAR_FLOW_4K_CELL_DEPTH (1) +#define NBL_UCAR_CAR_FLOW_4K_CELL_WIDTH (32) +#define NBL_UCAR_CAR_FLOW_4K_CELL_DWLEN (1) +union ucar_car_flow_4k_cell_u { + struct ucar_car_flow_4k_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_CAR_FLOW_4K_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_NOCAR_CELL_ADDR (0xe84430) +#define NBL_UCAR_NOCAR_CELL_DEPTH (1) +#define NBL_UCAR_NOCAR_CELL_WIDTH (32) +#define NBL_UCAR_NOCAR_CELL_DWLEN (1) +union ucar_nocar_cell_u { + struct ucar_nocar_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_NOCAR_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_NOCAR_ERR_ADDR (0xe84434) +#define NBL_UCAR_NOCAR_ERR_DEPTH (1) +#define NBL_UCAR_NOCAR_ERR_WIDTH (32) +#define NBL_UCAR_NOCAR_ERR_DWLEN (1) +union ucar_nocar_err_u { + struct ucar_nocar_err { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_NOCAR_ERR_DWLEN]; +} __packed; + +#define NBL_UCAR_GREEN_CELL_ADDR (0xe84438) +#define NBL_UCAR_GREEN_CELL_DEPTH (1) +#define NBL_UCAR_GREEN_CELL_WIDTH (32) +#define NBL_UCAR_GREEN_CELL_DWLEN (1) +union ucar_green_cell_u { + struct ucar_green_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_GREEN_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_YELLOW_CELL_ADDR (0xe8443c) +#define NBL_UCAR_YELLOW_CELL_DEPTH (1) +#define NBL_UCAR_YELLOW_CELL_WIDTH (32) +#define NBL_UCAR_YELLOW_CELL_DWLEN (1) +union ucar_yellow_cell_u { + struct ucar_yellow_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_YELLOW_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_RED_CELL_ADDR (0xe84440) +#define NBL_UCAR_RED_CELL_DEPTH (1) +#define NBL_UCAR_RED_CELL_WIDTH (32) +#define NBL_UCAR_RED_CELL_DWLEN (1) +union ucar_red_cell_u { + struct ucar_red_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_RED_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_NOCAR_PKT_ADDR (0xe84444) +#define NBL_UCAR_NOCAR_PKT_DEPTH (1) +#define NBL_UCAR_NOCAR_PKT_WIDTH (48) +#define NBL_UCAR_NOCAR_PKT_DWLEN (2) +union ucar_nocar_pkt_u { + struct ucar_nocar_pkt { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_NOCAR_PKT_DWLEN]; +} __packed; + +#define NBL_UCAR_GREEN_PKT_ADDR (0xe8444c) +#define NBL_UCAR_GREEN_PKT_DEPTH (1) +#define NBL_UCAR_GREEN_PKT_WIDTH (48) +#define NBL_UCAR_GREEN_PKT_DWLEN (2) +union ucar_green_pkt_u { + struct ucar_green_pkt { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_GREEN_PKT_DWLEN]; +} __packed; + +#define NBL_UCAR_YELLOW_PKT_ADDR (0xe84454) +#define NBL_UCAR_YELLOW_PKT_DEPTH (1) +#define NBL_UCAR_YELLOW_PKT_WIDTH (48) +#define NBL_UCAR_YELLOW_PKT_DWLEN (2) +union ucar_yellow_pkt_u { + struct ucar_yellow_pkt { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_YELLOW_PKT_DWLEN]; +} __packed; + +#define NBL_UCAR_RED_PKT_ADDR (0xe8445c) +#define NBL_UCAR_RED_PKT_DEPTH (1) +#define NBL_UCAR_RED_PKT_WIDTH (48) +#define NBL_UCAR_RED_PKT_DWLEN (2) +union ucar_red_pkt_u { + struct ucar_red_pkt { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_RED_PKT_DWLEN]; +} __packed; + +#define NBL_UCAR_FWD_TYPE_WRONG_CELL_ADDR (0xe84464) +#define NBL_UCAR_FWD_TYPE_WRONG_CELL_DEPTH (1) +#define NBL_UCAR_FWD_TYPE_WRONG_CELL_WIDTH (32) +#define NBL_UCAR_FWD_TYPE_WRONG_CELL_DWLEN (1) +union ucar_fwd_type_wrong_cell_u { + struct ucar_fwd_type_wrong_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_FWD_TYPE_WRONG_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_FLOW_ADDR (0xe88000) +#define NBL_UCAR_FLOW_DEPTH (1024) +#define NBL_UCAR_FLOW_WIDTH (128) +#define NBL_UCAR_FLOW_DWLEN (4) +union ucar_flow_u { + struct ucar_flow { + u32 valid:1; /* [0] Default:0x0 RW */ + u32 depth:19; /* [19:1] Default:0x0 RW */ + u32 cir:19; /* [38:20] Default:0x0 RW */ + u32 pir:19; /* [57:39] Default:0x0 RW */ + u32 cbs:21; /* [78:58] Default:0x0 RW */ + u32 pbs:21; /* [99:79] Default:0x0 RW */ + u32 rsv:28; /* [127:100] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_FLOW_DWLEN]; +} __packed; +#define NBL_UCAR_FLOW_REG(r) (NBL_UCAR_FLOW_ADDR + \ + (NBL_UCAR_FLOW_DWLEN * 4) * (r)) + +#define NBL_UCAR_FLOW_4K_ADDR (0xe94000) +#define NBL_UCAR_FLOW_4K_DEPTH (4096) +#define NBL_UCAR_FLOW_4K_WIDTH (128) +#define NBL_UCAR_FLOW_4K_DWLEN (4) +union ucar_flow_4k_u { + struct ucar_flow_4k { + u32 valid:1; /* [0] Default:0x0 RW */ + u32 depth:21; /* [21:1] Default:0x0 RW */ + u32 cir:21; /* [42:22] Default:0x0 RW */ + u32 pir:21; /* [63:43] Default:0x0 RW */ + u32 cbs:23; /* [86:64] Default:0x0 RW */ + u32 pbs:23; /* [109:87] Default:0x0 RW */ + u32 rsv:18; /* [127:110] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_FLOW_4K_DWLEN]; +} __packed; +#define NBL_UCAR_FLOW_4K_REG(r) (NBL_UCAR_FLOW_4K_ADDR + \ + (NBL_UCAR_FLOW_4K_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_upa.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_upa.h new file mode 100644 index 0000000000000000000000000000000000000000..16061974d449522a016295b465f774e84ffea7ca --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_upa.h @@ -0,0 +1,817 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_UPA_H +#define NBL_UPA_H 1 + +#include + +#define NBL_UPA_BASE (0x0008C000) + +#define NBL_UPA_INT_STATUS_ADDR (0x8c000) +#define NBL_UPA_INT_STATUS_DEPTH (1) +#define NBL_UPA_INT_STATUS_WIDTH (32) +#define NBL_UPA_INT_STATUS_DWLEN (1) +union upa_int_status_u { + struct upa_int_status { + u32 fatal_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_underflow:1; /* [1] Default:0x0 RWC */ + u32 fifo_overflow:1; /* [2] Default:0x0 RWC */ + u32 fsm_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 rsv1:1; /* [5] Default:0x0 RO */ + u32 cfg_err:1; /* [6] Default:0x0 RWC */ + u32 ucor_err:1; /* [7] Default:0x0 RWC */ + u32 cor_err:1; /* [8] Default:0x0 RWC */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_UPA_INT_MASK_ADDR (0x8c004) +#define NBL_UPA_INT_MASK_DEPTH (1) +#define NBL_UPA_INT_MASK_WIDTH (32) +#define NBL_UPA_INT_MASK_DWLEN (1) +union upa_int_mask_u { + struct upa_int_mask { + u32 fatal_err:1; /* [0] Default:0x0 RW */ + u32 fifo_underflow:1; /* [1] Default:0x0 RW */ + u32 fifo_overflow:1; /* [2] Default:0x0 RW */ + u32 fsm_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 rsv1:1; /* [5] Default:0x0 RO */ + u32 cfg_err:1; /* [6] Default:0x0 RW */ + u32 ucor_err:1; /* [7] Default:0x0 RW */ + u32 cor_err:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_INT_MASK_DWLEN]; +} __packed; + +#define NBL_UPA_INT_SET_ADDR (0x8c008) +#define NBL_UPA_INT_SET_DEPTH (1) +#define NBL_UPA_INT_SET_WIDTH (32) +#define NBL_UPA_INT_SET_DWLEN (1) +union upa_int_set_u { + struct upa_int_set { + u32 fatal_err:1; /* [0] Default:0x0 WO */ + u32 fifo_underflow:1; /* [1] Default:0x0 WO */ + u32 fifo_overflow:1; /* [2] Default:0x0 WO */ + u32 fsm_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 rsv1:1; /* [5] Default:0x0 RO */ + u32 cfg_err:1; /* [6] Default:0x0 WO */ + u32 ucor_err:1; /* [7] Default:0x0 WO */ + u32 cor_err:1; /* [8] Default:0x0 WO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_INT_SET_DWLEN]; +} __packed; + +#define NBL_UPA_INIT_DONE_ADDR (0x8c00c) +#define NBL_UPA_INIT_DONE_DEPTH (1) +#define NBL_UPA_INIT_DONE_WIDTH (32) +#define NBL_UPA_INIT_DONE_DWLEN (1) +union upa_init_done_u { + struct upa_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_UPA_CIF_ERR_INFO_ADDR (0x8c040) +#define NBL_UPA_CIF_ERR_INFO_DEPTH (1) +#define NBL_UPA_CIF_ERR_INFO_WIDTH (32) +#define NBL_UPA_CIF_ERR_INFO_DWLEN (1) +union upa_cif_err_info_u { + struct upa_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPA_CFG_ERR_INFO_ADDR (0x8c050) +#define NBL_UPA_CFG_ERR_INFO_DEPTH (1) +#define NBL_UPA_CFG_ERR_INFO_WIDTH (32) +#define NBL_UPA_CFG_ERR_INFO_DWLEN (1) +union upa_cfg_err_info_u { + struct upa_cfg_err_info { + u32 id0:2; /* [1:0] Default:0x0 RO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPA_CAR_CTRL_ADDR (0x8c100) +#define NBL_UPA_CAR_CTRL_DEPTH (1) +#define NBL_UPA_CAR_CTRL_WIDTH (32) +#define NBL_UPA_CAR_CTRL_DWLEN (1) +union upa_car_ctrl_u { + struct upa_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_UPA_INIT_START_ADDR (0x8c180) +#define NBL_UPA_INIT_START_DEPTH (1) +#define NBL_UPA_INIT_START_WIDTH (32) +#define NBL_UPA_INIT_START_DWLEN (1) +union upa_init_start_u { + struct upa_init_start { + u32 start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_INIT_START_DWLEN]; +} __packed; + +#define NBL_UPA_LAYO_CKSUM0_CTRL_ADDR (0x8c1b0) +#define NBL_UPA_LAYO_CKSUM0_CTRL_DEPTH (4) +#define NBL_UPA_LAYO_CKSUM0_CTRL_WIDTH (32) +#define NBL_UPA_LAYO_CKSUM0_CTRL_DWLEN (1) +union upa_layo_cksum0_ctrl_u { + struct upa_layo_cksum0_ctrl { + u32 data:32; /* [31:0] Default:0xFFFFFFFF RW */ + } __packed info; + u32 data[NBL_UPA_LAYO_CKSUM0_CTRL_DWLEN]; +} __packed; +#define NBL_UPA_LAYO_CKSUM0_CTRL_REG(r) (NBL_UPA_LAYO_CKSUM0_CTRL_ADDR + \ + (NBL_UPA_LAYO_CKSUM0_CTRL_DWLEN * 4) * (r)) + +#define NBL_UPA_LAYI_CKSUM0_CTRL_ADDR (0x8c1c0) +#define NBL_UPA_LAYI_CKSUM0_CTRL_DEPTH (4) +#define NBL_UPA_LAYI_CKSUM0_CTRL_WIDTH (32) +#define NBL_UPA_LAYI_CKSUM0_CTRL_DWLEN (1) +union upa_layi_cksum0_ctrl_u { + struct upa_layi_cksum0_ctrl { + u32 data:32; /* [31:0] Default:0xFFFFFFFF RW */ + } __packed info; + u32 data[NBL_UPA_LAYI_CKSUM0_CTRL_DWLEN]; +} __packed; +#define NBL_UPA_LAYI_CKSUM0_CTRL_REG(r) (NBL_UPA_LAYI_CKSUM0_CTRL_ADDR + \ + (NBL_UPA_LAYI_CKSUM0_CTRL_DWLEN * 4) * (r)) + +#define NBL_UPA_FWD_TYPE_STAGE_0_ADDR (0x8c1d0) +#define NBL_UPA_FWD_TYPE_STAGE_0_DEPTH (1) +#define NBL_UPA_FWD_TYPE_STAGE_0_WIDTH (32) +#define NBL_UPA_FWD_TYPE_STAGE_0_DWLEN (1) +union upa_fwd_type_stage_0_u { + struct upa_fwd_type_stage_0 { + u32 tbl:32; /* [31:0] Default:0xF3FFFFF2 RW */ + } __packed info; + u32 data[NBL_UPA_FWD_TYPE_STAGE_0_DWLEN]; +} __packed; + +#define NBL_UPA_FWD_TYPE_STAGE_1_ADDR (0x8c1d4) +#define NBL_UPA_FWD_TYPE_STAGE_1_DEPTH (1) +#define NBL_UPA_FWD_TYPE_STAGE_1_WIDTH (32) +#define NBL_UPA_FWD_TYPE_STAGE_1_DWLEN (1) +union upa_fwd_type_stage_1_u { + struct upa_fwd_type_stage_1 { + u32 tbl:32; /* [31:0] Default:0xFFFFFFFF RW */ + } __packed info; + u32 data[NBL_UPA_FWD_TYPE_STAGE_1_DWLEN]; +} __packed; + +#define NBL_UPA_FWD_TYPE_STAGE_2_ADDR (0x8c1d8) +#define NBL_UPA_FWD_TYPE_STAGE_2_DEPTH (1) +#define NBL_UPA_FWD_TYPE_STAGE_2_WIDTH (32) +#define NBL_UPA_FWD_TYPE_STAGE_2_DWLEN (1) +union upa_fwd_type_stage_2_u { + struct upa_fwd_type_stage_2 { + u32 tbl:32; /* [31:0] Default:0xFFFFFFFF RW */ + } __packed info; + u32 data[NBL_UPA_FWD_TYPE_STAGE_2_DWLEN]; +} __packed; + +#define NBL_UPA_FWD_TYPE_BYPASS_0_ADDR (0x8c1e0) +#define NBL_UPA_FWD_TYPE_BYPASS_0_DEPTH (1) +#define NBL_UPA_FWD_TYPE_BYPASS_0_WIDTH (32) +#define NBL_UPA_FWD_TYPE_BYPASS_0_DWLEN (1) +union upa_fwd_type_bypass_0_u { + struct upa_fwd_type_bypass_0 { + u32 tbl:8; /* [7:0] Default:0x80 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_FWD_TYPE_BYPASS_0_DWLEN]; +} __packed; + +#define NBL_UPA_FWD_TYPE_BYPASS_1_ADDR (0x8c1e4) +#define NBL_UPA_FWD_TYPE_BYPASS_1_DEPTH (1) +#define NBL_UPA_FWD_TYPE_BYPASS_1_WIDTH (32) +#define NBL_UPA_FWD_TYPE_BYPASS_1_DWLEN (1) +union upa_fwd_type_bypass_1_u { + struct upa_fwd_type_bypass_1 { + u32 tbl:8; /* [7:0] Default:0x80 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_FWD_TYPE_BYPASS_1_DWLEN]; +} __packed; + +#define NBL_UPA_FWD_TYPE_BYPASS_2_ADDR (0x8c1e8) +#define NBL_UPA_FWD_TYPE_BYPASS_2_DEPTH (1) +#define NBL_UPA_FWD_TYPE_BYPASS_2_WIDTH (32) +#define NBL_UPA_FWD_TYPE_BYPASS_2_DWLEN (1) +union upa_fwd_type_bypass_2_u { + struct upa_fwd_type_bypass_2 { + u32 tbl:8; /* [7:0] Default:0x80 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_FWD_TYPE_BYPASS_2_DWLEN]; +} __packed; + +#define NBL_UPA_DPORT_EXTRACT_ADDR (0x8c1ec) +#define NBL_UPA_DPORT_EXTRACT_DEPTH (1) +#define NBL_UPA_DPORT_EXTRACT_WIDTH (32) +#define NBL_UPA_DPORT_EXTRACT_DWLEN (1) +union upa_dport_extract_u { + struct upa_dport_extract { + u32 id:6; /* [5:0] Default:0x9 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_DPORT_EXTRACT_DWLEN]; +} __packed; + +#define NBL_UPA_LAYO_PHV_ADDR (0x8c1f0) +#define NBL_UPA_LAYO_PHV_DEPTH (1) +#define NBL_UPA_LAYO_PHV_WIDTH (32) +#define NBL_UPA_LAYO_PHV_DWLEN (1) +union upa_layo_phv_u { + struct upa_layo_phv { + u32 len:7; /* [6:0] Default:0x46 RW */ + u32 change_en:1; /* [7] Default:0x1 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_LAYO_PHV_DWLEN]; +} __packed; + +#define NBL_UPA_L4S_PAD_ADDR (0x8c1f4) +#define NBL_UPA_L4S_PAD_DEPTH (1) +#define NBL_UPA_L4S_PAD_WIDTH (32) +#define NBL_UPA_L4S_PAD_DWLEN (1) +union upa_l4s_pad_u { + struct upa_l4s_pad { + u32 p_length:7; /* [6:0] Default:0x3C RW */ + u32 en:1; /* [7] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_L4S_PAD_DWLEN]; +} __packed; + +#define NBL_UPA_LAYO_FLAG_ADDR (0x8c1f8) +#define NBL_UPA_LAYO_FLAG_DEPTH (1) +#define NBL_UPA_LAYO_FLAG_WIDTH (32) +#define NBL_UPA_LAYO_FLAG_DWLEN (1) +union upa_layo_flag_u { + struct upa_layo_flag { + u32 mask:32; /* [31:0] Default:0x00 RW */ + } __packed info; + u32 data[NBL_UPA_LAYO_FLAG_DWLEN]; +} __packed; + +#define NBL_UPA_IP_EXT_PROTOCOL_ADDR (0x8c1fc) +#define NBL_UPA_IP_EXT_PROTOCOL_DEPTH (1) +#define NBL_UPA_IP_EXT_PROTOCOL_WIDTH (32) +#define NBL_UPA_IP_EXT_PROTOCOL_DWLEN (1) +union upa_ip_ext_protocol_u { + struct upa_ip_ext_protocol { + u32 tcp:8; /* [7:0] Default:0x6 RW */ + u32 udp:8; /* [15:8] Default:0x11 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_IP_EXT_PROTOCOL_DWLEN]; +} __packed; + +#define NBL_UPA_L3V6_ML_DA_ADDR (0x8c204) +#define NBL_UPA_L3V6_ML_DA_DEPTH (1) +#define NBL_UPA_L3V6_ML_DA_WIDTH (32) +#define NBL_UPA_L3V6_ML_DA_DWLEN (1) +union upa_l3v6_ml_da_u { + struct upa_l3v6_ml_da { + u32 ml_da:16; /* [15:0] Default:0x3333 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_L3V6_ML_DA_DWLEN]; +} __packed; + +#define NBL_UPA_NEXT_KEY_ADDR (0x8c208) +#define NBL_UPA_NEXT_KEY_DEPTH (1) +#define NBL_UPA_NEXT_KEY_WIDTH (32) +#define NBL_UPA_NEXT_KEY_DWLEN (1) +union upa_next_key_u { + struct upa_next_key { + u32 key_b:8; /* [7:0] Default:0x10 RW */ + u32 key_a:8; /* [15:8] Default:0x0C RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_NEXT_KEY_DWLEN]; +} __packed; + +#define NBL_UPA_L3_ML_DA_ADDR (0x8c20c) +#define NBL_UPA_L3_ML_DA_DEPTH (1) +#define NBL_UPA_L3_ML_DA_WIDTH (32) +#define NBL_UPA_L3_ML_DA_DWLEN (1) +union upa_l3_ml_da_u { + struct upa_l3_ml_da { + u32 ml_da_0:16; /* [15:0] Default:0x5e00 RW */ + u32 ml_da_1:16; /* [31:16] Default:0x0100 RW */ + } __packed info; + u32 data[NBL_UPA_L3_ML_DA_DWLEN]; +} __packed; + +#define NBL_UPA_CK_CTRL_ADDR (0x8c210) +#define NBL_UPA_CK_CTRL_DEPTH (1) +#define NBL_UPA_CK_CTRL_WIDTH (32) +#define NBL_UPA_CK_CTRL_DWLEN (1) +union upa_ck_ctrl_u { + struct upa_ck_ctrl { + u32 tcp_csum_en:1; /* [0] Default:0x1 RW */ + u32 udp_csum_en:1; /* [1] Default:0x1 RW */ + u32 sctp_crc32c_en:1; /* [2] Default:0x1 RW */ + u32 ipv4_ck_en:1; /* [3] Default:0x1 RW */ + u32 ipv6_ck_en:1; /* [4] Default:0x1 RW */ + u32 DA_ck_en:1; /* [5] Default:0x1 RW */ + u32 ipv6_ext_en:1; /* [6] Default:0x0 RW */ + u32 vlan_error_en:1; /* [7] Default:0x1 RW */ + u32 ctrl_p_en:1; /* [8] Default:0x0 RW */ + u32 ip_tlen_ck_en:1; /* [9] Default:0x0 RW */ + u32 not_uc_p_plck_aux_en:1; /* [10] Default:0x0 RW */ + u32 sctp_crc_plck_aux_en:1; /* [11] Default:0x1 RW */ + u32 tcp_csum_offset_id:2; /* [13:12] Default:0x2 RW */ + u32 udp_csum_offset_id:2; /* [15:14] Default:0x2 RW */ + u32 sctp_crc32c_offset_id:2; /* [17:16] Default:0x2 RW */ + u32 ipv4_ck_offset_id:2; /* [19:18] Default:0x1 RW */ + u32 ipv6_ck_offset_id:2; /* [21:20] Default:0x1 RW */ + u32 DA_ck_offset_id:2; /* [23:22] Default:0x0 RW */ + u32 plck_offset_id:2; /* [25:24] Default:0x3 RW */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_CK_CTRL_DWLEN]; +} __packed; + +#define NBL_UPA_MC_INDEX_ADDR (0x8c214) +#define NBL_UPA_MC_INDEX_DEPTH (1) +#define NBL_UPA_MC_INDEX_WIDTH (32) +#define NBL_UPA_MC_INDEX_DWLEN (1) +union upa_mc_index_u { + struct upa_mc_index { + u32 l2_mc_index:5; /* [4:0] Default:0x8 RW */ + u32 rsv2:3; /* [7:5] Default:0x00 RO */ + u32 l3_mc_index:5; /* [12:8] Default:0x9 RW */ + u32 rsv1:3; /* [15:13] Default:0x00 RO */ + u32 ctrl_p_index:5; /* [20:16] Default:0xF RW */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_MC_INDEX_DWLEN]; +} __packed; + +#define NBL_UPA_CTRL_P_DA_ADDR (0x8c218) +#define NBL_UPA_CTRL_P_DA_DEPTH (1) +#define NBL_UPA_CTRL_P_DA_WIDTH (32) +#define NBL_UPA_CTRL_P_DA_DWLEN (1) +union upa_ctrl_p_da_u { + struct upa_ctrl_p_da { + u32 ctrl_da_0:16; /* [15:0] Default:0xC200 RW */ + u32 ctrl_da_1:16; /* [31:16] Default:0x0180 RW */ + } __packed info; + u32 data[NBL_UPA_CTRL_P_DA_DWLEN]; +} __packed; + +#define NBL_UPA_VLAN_INDEX_ADDR (0x8c220) +#define NBL_UPA_VLAN_INDEX_DEPTH (1) +#define NBL_UPA_VLAN_INDEX_WIDTH (32) +#define NBL_UPA_VLAN_INDEX_DWLEN (1) +union upa_vlan_index_u { + struct upa_vlan_index { + u32 i_vlan2_index:5; /* [4:0] Default:0x7 RW */ + u32 rsv3:3; /* [7:5] Default:0x00 RO */ + u32 i_vlan1_index:5; /* [12:8] Default:0x6 RW */ + u32 rsv2:3; /* [15:13] Default:0x00 RO */ + u32 o_vlan2_index:5; /* [20:16] Default:0x11 RW */ + u32 rsv1:3; /* [23:21] Default:0x0 RO */ + u32 o_vlan1_index:5; /* [28:24] Default:0x10 RW */ + u32 rsv:3; /* [31:29] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_VLAN_INDEX_DWLEN]; +} __packed; + +#define NBL_UPA_PRI_VLAN_INDEX_ADDR (0x8c224) +#define NBL_UPA_PRI_VLAN_INDEX_DEPTH (1) +#define NBL_UPA_PRI_VLAN_INDEX_WIDTH (32) +#define NBL_UPA_PRI_VLAN_INDEX_DWLEN (1) +union upa_pri_vlan_index_u { + struct upa_pri_vlan_index { + u32 int_vlan2:7; /* [6:0] Default:0x30 RW */ + u32 rsv3:1; /* [7] Default:0x0 RO */ + u32 int_vlan1:7; /* [14:8] Default:0x2E RW */ + u32 rsv2:1; /* [15] Default:0x0 RO */ + u32 ext_vlan2:7; /* [22:16] Default:0x10 RW */ + u32 rsv1:1; /* [23] Default:0x0 RO */ + u32 ext_vlan1:7; /* [30:24] Default:0xE RW */ + u32 rsv:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PRI_VLAN_INDEX_DWLEN]; +} __packed; + +#define NBL_UPA_PRI_DSCP_INDEX_ADDR (0x8c228) +#define NBL_UPA_PRI_DSCP_INDEX_DEPTH (1) +#define NBL_UPA_PRI_DSCP_INDEX_WIDTH (32) +#define NBL_UPA_PRI_DSCP_INDEX_DWLEN (1) +union upa_pri_dscp_index_u { + struct upa_pri_dscp_index { + u32 int_dscp:7; /* [6:0] Default:0x32 RW */ + u32 rsv3:1; /* [7] Default:0x0 RO */ + u32 ext_dscp:7; /* [14:8] Default:0x12 RW */ + u32 rsv2:1; /* [15] Default:0x0 RO */ + u32 ipv4_flag:5; /* [20:16] Default:0x1 RW */ + u32 rsv1:3; /* [23:21] Default:0x0 RO */ + u32 ipv6_flag:5; /* [28:24] Default:0x2 RW */ + u32 rsv:3; /* [31:29] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PRI_DSCP_INDEX_DWLEN]; +} __packed; + +#define NBL_UPA_RDMA_INDEX_ADDR (0x8c22c) +#define NBL_UPA_RDMA_INDEX_DEPTH (1) +#define NBL_UPA_RDMA_INDEX_WIDTH (32) +#define NBL_UPA_RDMA_INDEX_DWLEN (1) +union upa_rdma_index_u { + struct upa_rdma_index { + u32 ext_qpn:7; /* [6:0] Default:0x42 RW */ + u32 rsv1:1; /* [7] Default:0x0 RO */ + u32 rdma_index:5; /* [12:8] Default:0xA RW */ + u32 rsv:19; /* [31:13] Default:0x00 RO */ + } __packed info; + u32 data[NBL_UPA_RDMA_INDEX_DWLEN]; +} __packed; + +#define NBL_UPA_PRI_SEL_CONF_ADDR (0x8c230) +#define NBL_UPA_PRI_SEL_CONF_DEPTH (5) +#define NBL_UPA_PRI_SEL_CONF_WIDTH (32) +#define NBL_UPA_PRI_SEL_CONF_DWLEN (1) +union upa_pri_sel_conf_u { + struct upa_pri_sel_conf { + u32 pri_sel:5; /* [4:0] Default:0x0 RW */ + u32 pri_default:3; /* [7:5] Default:0x0 RW */ + u32 pri_disen:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PRI_SEL_CONF_DWLEN]; +} __packed; +#define NBL_UPA_PRI_SEL_CONF_REG(r) (NBL_UPA_PRI_SEL_CONF_ADDR + \ + (NBL_UPA_PRI_SEL_CONF_DWLEN * 4) * (r)) + +#define NBL_UPA_ERROR_DROP_ADDR (0x8c248) +#define NBL_UPA_ERROR_DROP_DEPTH (1) +#define NBL_UPA_ERROR_DROP_WIDTH (32) +#define NBL_UPA_ERROR_DROP_DWLEN (1) +union upa_error_drop_u { + struct upa_error_drop { + u32 en:7; /* [6:0] Default:0x0 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_ERROR_DROP_DWLEN]; +} __packed; + +#define NBL_UPA_ERROR_CODE_ADDR (0x8c24c) +#define NBL_UPA_ERROR_CODE_DEPTH (1) +#define NBL_UPA_ERROR_CODE_WIDTH (32) +#define NBL_UPA_ERROR_CODE_DWLEN (1) +union upa_error_code_u { + struct upa_error_code { + u32 no:32; /* [31:0] Default:0x09123456 RW */ + } __packed info; + u32 data[NBL_UPA_ERROR_CODE_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_SCAN_ADDR (0x8c250) +#define NBL_UPA_PTYPE_SCAN_DEPTH (1) +#define NBL_UPA_PTYPE_SCAN_WIDTH (32) +#define NBL_UPA_PTYPE_SCAN_DWLEN (1) +union upa_ptype_scan_u { + struct upa_ptype_scan { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PTYPE_SCAN_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_SCAN_TH_ADDR (0x8c254) +#define NBL_UPA_PTYPE_SCAN_TH_DEPTH (1) +#define NBL_UPA_PTYPE_SCAN_TH_WIDTH (32) +#define NBL_UPA_PTYPE_SCAN_TH_DWLEN (1) +union upa_ptype_scan_th_u { + struct upa_ptype_scan_th { + u32 th:32; /* [31:00] Default:0x40 RW */ + } __packed info; + u32 data[NBL_UPA_PTYPE_SCAN_TH_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_SCAN_MASK_ADDR (0x8c258) +#define NBL_UPA_PTYPE_SCAN_MASK_DEPTH (1) +#define NBL_UPA_PTYPE_SCAN_MASK_WIDTH (32) +#define NBL_UPA_PTYPE_SCAN_MASK_DWLEN (1) +union upa_ptype_scan_mask_u { + struct upa_ptype_scan_mask { + u32 addr:8; /* [7:0] Default:0x0 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PTYPE_SCAN_MASK_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_INSERT_SEARCH_ADDR (0x8c25c) +#define NBL_UPA_PTYPE_INSERT_SEARCH_DEPTH (1) +#define NBL_UPA_PTYPE_INSERT_SEARCH_WIDTH (32) +#define NBL_UPA_PTYPE_INSERT_SEARCH_DWLEN (1) +union upa_ptype_insert_search_u { + struct upa_ptype_insert_search { + u32 ctrl:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PTYPE_INSERT_SEARCH_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_INSERT_SEARCH_0_ADDR (0x8c260) +#define NBL_UPA_PTYPE_INSERT_SEARCH_0_DEPTH (1) +#define NBL_UPA_PTYPE_INSERT_SEARCH_0_WIDTH (32) +#define NBL_UPA_PTYPE_INSERT_SEARCH_0_DWLEN (1) +union upa_ptype_insert_search_0_u { + struct upa_ptype_insert_search_0 { + u32 key0:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPA_PTYPE_INSERT_SEARCH_0_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_INSERT_SEARCH_1_ADDR (0x8c264) +#define NBL_UPA_PTYPE_INSERT_SEARCH_1_DEPTH (1) +#define NBL_UPA_PTYPE_INSERT_SEARCH_1_WIDTH (32) +#define NBL_UPA_PTYPE_INSERT_SEARCH_1_DWLEN (1) +union upa_ptype_insert_search_1_u { + struct upa_ptype_insert_search_1 { + u32 key1:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPA_PTYPE_INSERT_SEARCH_1_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_ADDR (0x8c268) +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_DEPTH (1) +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_WIDTH (32) +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_DWLEN (1) +union upa_ptype_insert_search_result_u { + struct upa_ptype_insert_search_result { + u32 result:8; /* [7:0] Default:0x0 RO */ + u32 hit:1; /* [8] Default:0x0 RO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_ACK_ADDR (0x8c270) +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_ACK_DEPTH (1) +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_ACK_WIDTH (32) +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_ACK_DWLEN (1) +union upa_ptype_insert_search_result_ack_u { + struct upa_ptype_insert_search_result_ack { + u32 vld:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_ACK_DWLEN]; +} __packed; + +#define NBL_UPA_CFG_TEST_ADDR (0x8c80c) +#define NBL_UPA_CFG_TEST_DEPTH (1) +#define NBL_UPA_CFG_TEST_WIDTH (32) +#define NBL_UPA_CFG_TEST_DWLEN (1) +union upa_cfg_test_u { + struct upa_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPA_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_UPA_BP_STATE_ADDR (0x8cb00) +#define NBL_UPA_BP_STATE_DEPTH (1) +#define NBL_UPA_BP_STATE_WIDTH (32) +#define NBL_UPA_BP_STATE_DWLEN (1) +union upa_bp_state_u { + struct upa_bp_state { + u32 pa_rmux_data_bp:1; /* [0] Default:0x0 RO */ + u32 pa_rmux_info_bp:1; /* [1] Default:0x0 RO */ + u32 store_pa_data_bp:1; /* [2] Default:0x0 RO */ + u32 store_pa_info_bp:1; /* [3] Default:0x0 RO */ + u32 rx_data_fifo_afull:1; /* [4] Default:0x0 RO */ + u32 rx_info_fifo_afull:1; /* [5] Default:0x0 RO */ + u32 rx_ctrl_fifo_afull:1; /* [6] Default:0x0 RO */ + u32 cinf1_fifo_afull:1; /* [7] Default:0x0 RO */ + u32 ctrl_cinf1_fifo_afull:1; /* [8] Default:0x0 RO */ + u32 layo_info_fifo_afull:1; /* [9] Default:0x0 RO */ + u32 cinf2_fifo_afull:1; /* [10] Default:0x0 RO */ + u32 ctrl_cinf2_fifo_afull:1; /* [11] Default:0x0 RO */ + u32 layi_info_fifo_afull:1; /* [12] Default:0x0 RO */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_BP_STATE_DWLEN]; +} __packed; + +#define NBL_UPA_BP_HISTORY_ADDR (0x8cb04) +#define NBL_UPA_BP_HISTORY_DEPTH (1) +#define NBL_UPA_BP_HISTORY_WIDTH (32) +#define NBL_UPA_BP_HISTORY_DWLEN (1) +union upa_bp_history_u { + struct upa_bp_history { + u32 pa_rmux_data_bp:1; /* [0] Default:0x0 RC */ + u32 pa_rmux_info_bp:1; /* [1] Default:0x0 RC */ + u32 store_pa_data_bp:1; /* [2] Default:0x0 RC */ + u32 store_pa_info_bp:1; /* [3] Default:0x0 RC */ + u32 rx_data_fifo_afull:1; /* [4] Default:0x0 RC */ + u32 rx_info_fifo_afull:1; /* [5] Default:0x0 RC */ + u32 rx_ctrl_fifo_afull:1; /* [6] Default:0x0 RC */ + u32 cinf1_fifo_afull:1; /* [7] Default:0x0 RC */ + u32 ctrl_cinf1_fifo_afull:1; /* [8] Default:0x0 RC */ + u32 layo_info_fifo_afull:1; /* [9] Default:0x0 RC */ + u32 cinf2_fifo_afull:1; /* [10] Default:0x0 RC */ + u32 ctrl_cinf2_fifo_afull:1; /* [11] Default:0x0 RC */ + u32 layi_info_fifo_afull:1; /* [12] Default:0x0 RC */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_UPA_PRI_CONF_TABLE_ADDR (0x8e000) +#define NBL_UPA_PRI_CONF_TABLE_DEPTH (40) +#define NBL_UPA_PRI_CONF_TABLE_WIDTH (32) +#define NBL_UPA_PRI_CONF_TABLE_DWLEN (1) +union upa_pri_conf_table_u { + struct upa_pri_conf_table { + u32 pri0:4; /* [3:0] Default:0x0 RW */ + u32 pri1:4; /* [7:4] Default:0x0 RW */ + u32 pri2:4; /* [11:8] Default:0x0 RW */ + u32 pri3:4; /* [15:12] Default:0x0 RW */ + u32 pri4:4; /* [19:16] Default:0x0 RW */ + u32 pri5:4; /* [23:20] Default:0x0 RW */ + u32 pri6:4; /* [27:24] Default:0x0 RW */ + u32 pri7:4; /* [31:28] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPA_PRI_CONF_TABLE_DWLEN]; +} __packed; +#define NBL_UPA_PRI_CONF_TABLE_REG(r) (NBL_UPA_PRI_CONF_TABLE_ADDR + \ + (NBL_UPA_PRI_CONF_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPA_KEY_TCAM_ADDR (0x8f000) +#define NBL_UPA_KEY_TCAM_DEPTH (256) +#define NBL_UPA_KEY_TCAM_WIDTH (64) +#define NBL_UPA_KEY_TCAM_DWLEN (2) +union upa_key_tcam_u { + struct upa_key_tcam { + u32 key_b:16; /* [15:0] Default:0x0 RW */ + u32 key_a:16; /* [31:16] Default:0x0 RW */ + u32 key_valid:1; /* [32] Default:0x0 RW */ + u32 rsv:31; /* [63:33] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_KEY_TCAM_DWLEN]; +} __packed; +#define NBL_UPA_KEY_TCAM_REG(r) (NBL_UPA_KEY_TCAM_ADDR + \ + (NBL_UPA_KEY_TCAM_DWLEN * 4) * (r)) + +#define NBL_UPA_MASK_TCAM_ADDR (0x8f800) +#define NBL_UPA_MASK_TCAM_DEPTH (256) +#define NBL_UPA_MASK_TCAM_WIDTH (32) +#define NBL_UPA_MASK_TCAM_DWLEN (1) +union upa_mask_tcam_u { + struct upa_mask_tcam { + u32 mask_b:16; /* [15:0] Default:0x0 RW */ + u32 mask_a:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPA_MASK_TCAM_DWLEN]; +} __packed; +#define NBL_UPA_MASK_TCAM_REG(r) (NBL_UPA_MASK_TCAM_ADDR + \ + (NBL_UPA_MASK_TCAM_DWLEN * 4) * (r)) + +#define NBL_UPA_ACT_TABLE_ADDR (0x90000) +#define NBL_UPA_ACT_TABLE_DEPTH (256) +#define NBL_UPA_ACT_TABLE_WIDTH (128) +#define NBL_UPA_ACT_TABLE_DWLEN (4) +union upa_act_table_u { + struct upa_act_table { + u32 flag_control_0:8; /* [7:0] Default:0x0 RW */ + u32 flag_control_1:8; /* [15:8] Default:0x0 RW */ + u32 flag_control_2:8; /* [23:16] Default:0x0 RW */ + u32 legality_check:8; /* [31:24] Default:0x0 RW */ + u32 nxt_off_B:8; /* [39:32] Default:0x0 RW */ + u32 nxt_off_A:8; /* [47:40] Default:0x0 RW */ + u32 protocol_header_off:8; /* [55:48] Default:0x0 RW */ + u32 payload_length:8; /* [63:56] Default:0x0 RW */ + u32 mask:8; /* [71:64] Default:0x0 RW */ + u32 nxt_stg:4; /* [75:72] Default:0x0 RW */ + u32 rsv_l:32; /* [127:76] Default:0x0 RO */ + u32 rsv_h:20; /* [127:76] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_ACT_TABLE_DWLEN]; +} __packed; +#define NBL_UPA_ACT_TABLE_REG(r) (NBL_UPA_ACT_TABLE_ADDR + \ + (NBL_UPA_ACT_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPA_EXT_CONF_TABLE_ADDR (0x91000) +#define NBL_UPA_EXT_CONF_TABLE_DEPTH (1024) +#define NBL_UPA_EXT_CONF_TABLE_WIDTH (32) +#define NBL_UPA_EXT_CONF_TABLE_DWLEN (1) +union upa_ext_conf_table_u { + struct upa_ext_conf_table { + u32 dst_offset:8; /* [7:0] Default:0x0 RW */ + u32 source_offset:6; /* [13:8] Default:0x0 RW */ + u32 mode_start_off:2; /* [15:14] Default:0x0 RW */ + u32 lx_sel:2; /* [17:16] Default:0x0 RW */ + u32 mode_sel:1; /* [18] Default:0x0 RW */ + u32 op_en:1; /* [19] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_EXT_CONF_TABLE_DWLEN]; +} __packed; +#define NBL_UPA_EXT_CONF_TABLE_REG(r) (NBL_UPA_EXT_CONF_TABLE_ADDR + \ + (NBL_UPA_EXT_CONF_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPA_EXT_INDEX_TCAM_ADDR (0x92000) +#define NBL_UPA_EXT_INDEX_TCAM_DEPTH (64) +#define NBL_UPA_EXT_INDEX_TCAM_WIDTH (64) +#define NBL_UPA_EXT_INDEX_TCAM_DWLEN (2) +union upa_ext_index_tcam_u { + struct upa_ext_index_tcam { + u32 type_index:32; /* [31:0] Default:0x0 RW */ + u32 type_valid:1; /* [32] Default:0x0 RW */ + u32 rsv:31; /* [63:33] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_EXT_INDEX_TCAM_DWLEN]; +} __packed; +#define NBL_UPA_EXT_INDEX_TCAM_REG(r) (NBL_UPA_EXT_INDEX_TCAM_ADDR + \ + (NBL_UPA_EXT_INDEX_TCAM_DWLEN * 4) * (r)) + +#define NBL_UPA_EXT_INDEX_TCAM_MASK_ADDR (0x92200) +#define NBL_UPA_EXT_INDEX_TCAM_MASK_DEPTH (64) +#define NBL_UPA_EXT_INDEX_TCAM_MASK_WIDTH (32) +#define NBL_UPA_EXT_INDEX_TCAM_MASK_DWLEN (1) +union upa_ext_index_tcam_mask_u { + struct upa_ext_index_tcam_mask { + u32 mask:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPA_EXT_INDEX_TCAM_MASK_DWLEN]; +} __packed; +#define NBL_UPA_EXT_INDEX_TCAM_MASK_REG(r) (NBL_UPA_EXT_INDEX_TCAM_MASK_ADDR + \ + (NBL_UPA_EXT_INDEX_TCAM_MASK_DWLEN * 4) * (r)) + +#define NBL_UPA_EXT_INDEX_TABLE_ADDR (0x92300) +#define NBL_UPA_EXT_INDEX_TABLE_DEPTH (64) +#define NBL_UPA_EXT_INDEX_TABLE_WIDTH (32) +#define NBL_UPA_EXT_INDEX_TABLE_DWLEN (1) +union upa_ext_index_table_u { + struct upa_ext_index_table { + u32 p_index:3; /* [2:0] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_EXT_INDEX_TABLE_DWLEN]; +} __packed; +#define NBL_UPA_EXT_INDEX_TABLE_REG(r) (NBL_UPA_EXT_INDEX_TABLE_ADDR + \ + (NBL_UPA_EXT_INDEX_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPA_TYPE_INDEX_TCAM_ADDR (0x94000) +#define NBL_UPA_TYPE_INDEX_TCAM_DEPTH (256) +#define NBL_UPA_TYPE_INDEX_TCAM_WIDTH (256) +#define NBL_UPA_TYPE_INDEX_TCAM_DWLEN (8) +union upa_type_index_tcam_u { + struct upa_type_index_tcam { + u32 layi_x:32; /* [31:0] Default:0xFFFFFFFF RW */ + u32 layo_x:32; /* [63:32] Default:0xFFFFFFFF RW */ + u32 layi_y:32; /* [95:64] Default:0xFFFFFFFF RW */ + u32 layo_y:32; /* [127:96] Default:0xFFFFFFFF RW */ + u32 type_valid:1; /* [128] Default:0x0 RW */ + u32 rsv_l:32; /* [255:129] Default:0x0 RO */ + u32 rsv_h:31; /* [255:129] Default:0x0 RO */ + u32 rsv_arr[2]; /* [255:129] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_TYPE_INDEX_TCAM_DWLEN]; +} __packed; +#define NBL_UPA_TYPE_INDEX_TCAM_REG(r) (NBL_UPA_TYPE_INDEX_TCAM_ADDR + \ + (NBL_UPA_TYPE_INDEX_TCAM_DWLEN * 4) * (r)) + +#define NBL_UPA_PACKET_TYPE_TABLE_ADDR (0x96000) +#define NBL_UPA_PACKET_TYPE_TABLE_DEPTH (256) +#define NBL_UPA_PACKET_TYPE_TABLE_WIDTH (32) +#define NBL_UPA_PACKET_TYPE_TABLE_DWLEN (1) +union upa_packet_type_table_u { + struct upa_packet_type_table { + u32 p_type:8; /* [7:0] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PACKET_TYPE_TABLE_DWLEN]; +} __packed; +#define NBL_UPA_PACKET_TYPE_TABLE_REG(r) (NBL_UPA_PACKET_TYPE_TABLE_ADDR + \ + (NBL_UPA_PACKET_TYPE_TABLE_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uped.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uped.h new file mode 100644 index 0000000000000000000000000000000000000000..1a88c44380efd1caca314fbe69a07cb8b1e0449e --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uped.h @@ -0,0 +1,1494 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_UPED_H +#define NBL_UPED_H 1 + +#include + +#define NBL_UPED_BASE (0x0015C000) + +#define NBL_UPED_INT_STATUS_ADDR (0x15c000) +#define NBL_UPED_INT_STATUS_DEPTH (1) +#define NBL_UPED_INT_STATUS_WIDTH (32) +#define NBL_UPED_INT_STATUS_DWLEN (1) +union uped_int_status_u { + struct uped_int_status { + u32 pkt_length_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RWC */ + u32 fsm_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 input_err:1; /* [5] Default:0x0 RWC */ + u32 cfg_err:1; /* [6] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [7] Default:0x0 RWC */ + u32 inmeta_ucor_err:1; /* [8] Default:0x0 RWC */ + u32 meta_ucor_err:1; /* [9] Default:0x0 RWC */ + u32 meta_cor_ecc_err:1; /* [10] Default:0x0 RWC */ + u32 fwd_atid_nomat_err:1; /* [11] Default:0x0 RWC */ + u32 meta_value_err:1; /* [12] Default:0x0 RWC */ + u32 edit_atnum_err:1; /* [13] Default:0x0 RWC */ + u32 header_oft_ovf:1; /* [14] Default:0x0 RWC */ + u32 edit_pos_err:1; /* [15] Default:0x0 RWC */ + u32 da_oft_len_ovf:1; /* [16] Default:0x0 RWC */ + u32 lxoffset_ovf:1; /* [17] Default:0x0 RWC */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_UPED_INT_MASK_ADDR (0x15c004) +#define NBL_UPED_INT_MASK_DEPTH (1) +#define NBL_UPED_INT_MASK_WIDTH (32) +#define NBL_UPED_INT_MASK_DWLEN (1) +union uped_int_mask_u { + struct uped_int_mask { + u32 pkt_length_err:1; /* [0] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RW */ + u32 fsm_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 input_err:1; /* [5] Default:0x0 RW */ + u32 cfg_err:1; /* [6] Default:0x0 RW */ + u32 data_ucor_err:1; /* [7] Default:0x0 RW */ + u32 inmeta_ucor_err:1; /* [8] Default:0x0 RW */ + u32 meta_ucor_err:1; /* [9] Default:0x0 RW */ + u32 meta_cor_ecc_err:1; /* [10] Default:0x0 RW */ + u32 fwd_atid_nomat_err:1; /* [11] Default:0x1 RW */ + u32 meta_value_err:1; /* [12] Default:0x0 RW */ + u32 edit_atnum_err:1; /* [13] Default:0x0 RW */ + u32 header_oft_ovf:1; /* [14] Default:0x0 RW */ + u32 edit_pos_err:1; /* [15] Default:0x0 RW */ + u32 da_oft_len_ovf:1; /* [16] Default:0x0 RW */ + u32 lxoffset_ovf:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INT_MASK_DWLEN]; +} __packed; + +#define NBL_UPED_INT_SET_ADDR (0x15c008) +#define NBL_UPED_INT_SET_DEPTH (1) +#define NBL_UPED_INT_SET_WIDTH (32) +#define NBL_UPED_INT_SET_DWLEN (1) +union uped_int_set_u { + struct uped_int_set { + u32 pkt_length_err:1; /* [0] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 WO */ + u32 fsm_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 input_err:1; /* [5] Default:0x0 WO */ + u32 cfg_err:1; /* [6] Default:0x0 WO */ + u32 data_ucor_err:1; /* [7] Default:0x0 WO */ + u32 inmeta_ucor_err:1; /* [8] Default:0x0 WO */ + u32 meta_ucor_err:1; /* [9] Default:0x0 WO */ + u32 meta_cor_ecc_err:1; /* [10] Default:0x0 WO */ + u32 fwd_atid_nomat_err:1; /* [11] Default:0x0 WO */ + u32 meta_value_err:1; /* [12] Default:0x0 WO */ + u32 edit_atnum_err:1; /* [13] Default:0x0 WO */ + u32 header_oft_ovf:1; /* [14] Default:0x0 WO */ + u32 edit_pos_err:1; /* [15] Default:0x0 WO */ + u32 da_oft_len_ovf:1; /* [16] Default:0x0 WO */ + u32 lxoffset_ovf:1; /* [17] Default:0x0 WO */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INT_SET_DWLEN]; +} __packed; + +#define NBL_UPED_INIT_DONE_ADDR (0x15c00c) +#define NBL_UPED_INIT_DONE_DEPTH (1) +#define NBL_UPED_INIT_DONE_WIDTH (32) +#define NBL_UPED_INIT_DONE_DWLEN (1) +union uped_init_done_u { + struct uped_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_UPED_PKT_LENGTH_ERR_INFO_ADDR (0x15c020) +#define NBL_UPED_PKT_LENGTH_ERR_INFO_DEPTH (1) +#define NBL_UPED_PKT_LENGTH_ERR_INFO_WIDTH (32) +#define NBL_UPED_PKT_LENGTH_ERR_INFO_DWLEN (1) +union uped_pkt_length_err_info_u { + struct uped_pkt_length_err_info { + u32 ptr_eop:1; /* [0] Default:0x0 RC */ + u32 pkt_eop:1; /* [1] Default:0x0 RC */ + u32 pkt_mod:1; /* [2] Default:0x0 RC */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_PKT_LENGTH_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_CIF_ERR_INFO_ADDR (0x15c040) +#define NBL_UPED_CIF_ERR_INFO_DEPTH (1) +#define NBL_UPED_CIF_ERR_INFO_WIDTH (32) +#define NBL_UPED_CIF_ERR_INFO_DWLEN (1) +union uped_cif_err_info_u { + struct uped_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_INPUT_ERR_INFO_ADDR (0x15c048) +#define NBL_UPED_INPUT_ERR_INFO_DEPTH (1) +#define NBL_UPED_INPUT_ERR_INFO_WIDTH (32) +#define NBL_UPED_INPUT_ERR_INFO_DWLEN (1) +union uped_input_err_info_u { + struct uped_input_err_info { + u32 eoc_miss:1; /* [0] Default:0x0 RC */ + u32 soc_miss:1; /* [1] Default:0x0 RC */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INPUT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_CFG_ERR_INFO_ADDR (0x15c050) +#define NBL_UPED_CFG_ERR_INFO_DEPTH (1) +#define NBL_UPED_CFG_ERR_INFO_WIDTH (32) +#define NBL_UPED_CFG_ERR_INFO_DWLEN (1) +union uped_cfg_err_info_u { + struct uped_cfg_err_info { + u32 length:1; /* [0] Default:0x0 RC */ + u32 rd_conflict:1; /* [1] Default:0x0 RC */ + u32 rd_addr:8; /* [9:2] Default:0x0 RC */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_FWD_ATID_NOMAT_ERR_INFO_ADDR (0x15c06c) +#define NBL_UPED_FWD_ATID_NOMAT_ERR_INFO_DEPTH (1) +#define NBL_UPED_FWD_ATID_NOMAT_ERR_INFO_WIDTH (32) +#define NBL_UPED_FWD_ATID_NOMAT_ERR_INFO_DWLEN (1) +union uped_fwd_atid_nomat_err_info_u { + struct uped_fwd_atid_nomat_err_info { + u32 dport:1; /* [0] Default:0x0 RC */ + u32 dqueue:1; /* [1] Default:0x0 RC */ + u32 hash0:1; /* [2] Default:0x0 RC */ + u32 hash1:1; /* [3] Default:0x0 RC */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_FWD_ATID_NOMAT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_META_VALUE_ERR_INFO_ADDR (0x15c070) +#define NBL_UPED_META_VALUE_ERR_INFO_DEPTH (1) +#define NBL_UPED_META_VALUE_ERR_INFO_WIDTH (32) +#define NBL_UPED_META_VALUE_ERR_INFO_DWLEN (1) +union uped_meta_value_err_info_u { + struct uped_meta_value_err_info { + u32 sport:1; /* [0] Default:0x0 RC */ + u32 dport:1; /* [1] Default:0x0 RC */ + u32 dscp_ecn:1; /* [2] Default:0x0 RC */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_META_VALUE_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_EDIT_ATNUM_ERR_INFO_ADDR (0x15c078) +#define NBL_UPED_EDIT_ATNUM_ERR_INFO_DEPTH (1) +#define NBL_UPED_EDIT_ATNUM_ERR_INFO_WIDTH (32) +#define NBL_UPED_EDIT_ATNUM_ERR_INFO_DWLEN (1) +union uped_edit_atnum_err_info_u { + struct uped_edit_atnum_err_info { + u32 replace:1; /* [0] Default:0x0 RC */ + u32 del_add:1; /* [1] Default:0x0 RC */ + u32 ttl:1; /* [2] Default:0x0 RC */ + u32 dscp:1; /* [3] Default:0x0 RC */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_EDIT_ATNUM_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_HEADER_OFT_OVF_ADDR (0x15c080) +#define NBL_UPED_HEADER_OFT_OVF_DEPTH (1) +#define NBL_UPED_HEADER_OFT_OVF_WIDTH (32) +#define NBL_UPED_HEADER_OFT_OVF_DWLEN (1) +union uped_header_oft_ovf_u { + struct uped_header_oft_ovf { + u32 replace:1; /* [0] Default:0x0 RC */ + u32 rsv2:7; /* [7:1] Default:0x0 RO */ + u32 add_del:6; /* [13:8] Default:0x0 RC */ + u32 dscp_ecn:1; /* [14] Default:0x0 RC */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 ttl:1; /* [16] Default:0x0 RC */ + u32 sctp:1; /* [17] Default:0x0 RC */ + u32 ck_len0:1; /* [18] Default:0x0 RC */ + u32 ck_len1:1; /* [19] Default:0x0 RC */ + u32 len0:1; /* [20] Default:0x0 RC */ + u32 len1:1; /* [21] Default:0x0 RC */ + u32 ck0:1; /* [22] Default:0x0 RC */ + u32 ck1:1; /* [23] Default:0x0 RC */ + u32 ck_start0_0:1; /* [24] Default:0x0 RC */ + u32 ck_start0_1:1; /* [25] Default:0x0 RC */ + u32 ck_start1_0:1; /* [26] Default:0x0 RC */ + u32 ck_start1_1:1; /* [27] Default:0x0 RC */ + u32 head:1; /* [28] Default:0x0 RC */ + u32 head_out:1; /* [29] Default:0x0 RC */ + u32 l4_head:1; /* [30] Default:0x0 RC */ + u32 rsv:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HEADER_OFT_OVF_DWLEN]; +} __packed; + +#define NBL_UPED_EDIT_POS_ERR_ADDR (0x15c088) +#define NBL_UPED_EDIT_POS_ERR_DEPTH (1) +#define NBL_UPED_EDIT_POS_ERR_WIDTH (32) +#define NBL_UPED_EDIT_POS_ERR_DWLEN (1) +union uped_edit_pos_err_u { + struct uped_edit_pos_err { + u32 replace:1; /* [0] Default:0x0 RC */ + u32 cross_level:6; /* [6:1] Default:0x0 RC */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 add_del:6; /* [13:8] Default:0x0 RC */ + u32 dscp_ecn:1; /* [14] Default:0x0 RC */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 ttl:1; /* [16] Default:0x0 RC */ + u32 sctp:1; /* [17] Default:0x0 RC */ + u32 ck_len0:1; /* [18] Default:0x0 RC */ + u32 ck_len1:1; /* [19] Default:0x0 RC */ + u32 len0:1; /* [20] Default:0x0 RC */ + u32 len1:1; /* [21] Default:0x0 RC */ + u32 ck0:1; /* [22] Default:0x0 RC */ + u32 ck1:1; /* [23] Default:0x0 RC */ + u32 ck_start0_0:1; /* [24] Default:0x0 RC */ + u32 ck_start0_1:1; /* [25] Default:0x0 RC */ + u32 ck_start1_0:1; /* [26] Default:0x0 RC */ + u32 ck_start1_1:1; /* [27] Default:0x0 RC */ + u32 bth_header:1; /* [28] Default:0x0 RC */ + u32 rsv:3; /* [31:29] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_EDIT_POS_ERR_DWLEN]; +} __packed; + +#define NBL_UPED_DA_OFT_LEN_OVF_ADDR (0x15c090) +#define NBL_UPED_DA_OFT_LEN_OVF_DEPTH (1) +#define NBL_UPED_DA_OFT_LEN_OVF_WIDTH (32) +#define NBL_UPED_DA_OFT_LEN_OVF_DWLEN (1) +union uped_da_oft_len_ovf_u { + struct uped_da_oft_len_ovf { + u32 at0:5; /* [4:0] Default:0x0 RC */ + u32 at1:5; /* [9:5] Default:0x0 RC */ + u32 at2:5; /* [14:10] Default:0x0 RC */ + u32 at3:5; /* [19:15] Default:0x0 RC */ + u32 at4:5; /* [24:20] Default:0x0 RC */ + u32 at5:5; /* [29:25] Default:0x0 RC */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_DA_OFT_LEN_OVF_DWLEN]; +} __packed; + +#define NBL_UPED_LXOFFSET_OVF_ADDR (0x15c098) +#define NBL_UPED_LXOFFSET_OVF_DEPTH (1) +#define NBL_UPED_LXOFFSET_OVF_WIDTH (32) +#define NBL_UPED_LXOFFSET_OVF_DWLEN (1) +union uped_lxoffset_ovf_u { + struct uped_lxoffset_ovf { + u32 l2:1; /* [0] Default:0x0 RC */ + u32 l3:1; /* [1] Default:0x0 RC */ + u32 l4:1; /* [2] Default:0x0 RC */ + u32 pld:1; /* [3] Default:0x0 RC */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_LXOFFSET_OVF_DWLEN]; +} __packed; + +#define NBL_UPED_CAR_CTRL_ADDR (0x15c100) +#define NBL_UPED_CAR_CTRL_DEPTH (1) +#define NBL_UPED_CAR_CTRL_WIDTH (32) +#define NBL_UPED_CAR_CTRL_DWLEN (1) +union uped_car_ctrl_u { + struct uped_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_UPED_INIT_START_ADDR (0x15c10c) +#define NBL_UPED_INIT_START_DEPTH (1) +#define NBL_UPED_INIT_START_WIDTH (32) +#define NBL_UPED_INIT_START_DWLEN (1) +union uped_init_start_u { + struct uped_init_start { + u32 start:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INIT_START_DWLEN]; +} __packed; + +#define NBL_UPED_TIMEOUT_CFG_ADDR (0x15c110) +#define NBL_UPED_TIMEOUT_CFG_DEPTH (1) +#define NBL_UPED_TIMEOUT_CFG_WIDTH (32) +#define NBL_UPED_TIMEOUT_CFG_DWLEN (1) +union uped_timeout_cfg_u { + struct uped_timeout_cfg { + u32 fsm_max_num:16; /* [15:00] Default:0xfff RW */ + u32 tab:8; /* [23:16] Default:0x40 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_TIMEOUT_CFG_DWLEN]; +} __packed; + +#define NBL_UPED_PKT_DROP_EN_ADDR (0x15c170) +#define NBL_UPED_PKT_DROP_EN_DEPTH (1) +#define NBL_UPED_PKT_DROP_EN_WIDTH (32) +#define NBL_UPED_PKT_DROP_EN_DWLEN (1) +union uped_pkt_drop_en_u { + struct uped_pkt_drop_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_PKT_DROP_EN_DWLEN]; +} __packed; + +#define NBL_UPED_PKT_HERR_DROP_EN_ADDR (0x15c174) +#define NBL_UPED_PKT_HERR_DROP_EN_DEPTH (1) +#define NBL_UPED_PKT_HERR_DROP_EN_WIDTH (32) +#define NBL_UPED_PKT_HERR_DROP_EN_DWLEN (1) +union uped_pkt_herr_drop_en_u { + struct uped_pkt_herr_drop_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_PKT_HERR_DROP_EN_DWLEN]; +} __packed; + +#define NBL_UPED_PKT_PARITY_DROP_EN_ADDR (0x15c178) +#define NBL_UPED_PKT_PARITY_DROP_EN_DEPTH (1) +#define NBL_UPED_PKT_PARITY_DROP_EN_WIDTH (32) +#define NBL_UPED_PKT_PARITY_DROP_EN_DWLEN (1) +union uped_pkt_parity_drop_en_u { + struct uped_pkt_parity_drop_en { + u32 en0:1; /* [0] Default:0x1 RW */ + u32 en1:1; /* [1] Default:0x1 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_PKT_PARITY_DROP_EN_DWLEN]; +} __packed; + +#define NBL_UPED_TTL_DROP_EN_ADDR (0x15c17c) +#define NBL_UPED_TTL_DROP_EN_DEPTH (1) +#define NBL_UPED_TTL_DROP_EN_WIDTH (32) +#define NBL_UPED_TTL_DROP_EN_DWLEN (1) +union uped_ttl_drop_en_u { + struct uped_ttl_drop_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_TTL_DROP_EN_DWLEN]; +} __packed; + +#define NBL_UPED_DQUEUE_DROP_EN_ADDR (0x15c180) +#define NBL_UPED_DQUEUE_DROP_EN_DEPTH (1) +#define NBL_UPED_DQUEUE_DROP_EN_WIDTH (32) +#define NBL_UPED_DQUEUE_DROP_EN_DWLEN (1) +union uped_dqueue_drop_en_u { + struct uped_dqueue_drop_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_DQUEUE_DROP_EN_DWLEN]; +} __packed; + +#define NBL_UPED_INTF_ECC_ERR_EN_ADDR (0x15c184) +#define NBL_UPED_INTF_ECC_ERR_EN_DEPTH (1) +#define NBL_UPED_INTF_ECC_ERR_EN_WIDTH (32) +#define NBL_UPED_INTF_ECC_ERR_EN_DWLEN (1) +union uped_intf_ecc_err_en_u { + struct uped_intf_ecc_err_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INTF_ECC_ERR_EN_DWLEN]; +} __packed; + +#define NBL_UPED_TTL_ERROR_CODE_ADDR (0x15c188) +#define NBL_UPED_TTL_ERROR_CODE_DEPTH (1) +#define NBL_UPED_TTL_ERROR_CODE_WIDTH (32) +#define NBL_UPED_TTL_ERROR_CODE_DWLEN (1) +union uped_ttl_error_code_u { + struct uped_ttl_error_code { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv1:7; /* [7:1] Default:0x0 RO */ + u32 id:4; /* [11:8] Default:0x6 RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_TTL_ERROR_CODE_DWLEN]; +} __packed; + +#define NBL_UPED_HIGH_PRI_PKT_EN_ADDR (0x15c190) +#define NBL_UPED_HIGH_PRI_PKT_EN_DEPTH (1) +#define NBL_UPED_HIGH_PRI_PKT_EN_WIDTH (32) +#define NBL_UPED_HIGH_PRI_PKT_EN_DWLEN (1) +union uped_high_pri_pkt_en_u { + struct uped_high_pri_pkt_en { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HIGH_PRI_PKT_EN_DWLEN]; +} __packed; + +#define NBL_UPED_HW_EDIT_FLAG_SEL0_ADDR (0x15c204) +#define NBL_UPED_HW_EDIT_FLAG_SEL0_DEPTH (1) +#define NBL_UPED_HW_EDIT_FLAG_SEL0_WIDTH (32) +#define NBL_UPED_HW_EDIT_FLAG_SEL0_DWLEN (1) +union uped_hw_edit_flag_sel0_u { + struct uped_hw_edit_flag_sel0 { + u32 oft:5; /* [4:0] Default:0x1 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HW_EDIT_FLAG_SEL0_DWLEN]; +} __packed; + +#define NBL_UPED_HW_EDIT_FLAG_SEL1_ADDR (0x15c208) +#define NBL_UPED_HW_EDIT_FLAG_SEL1_DEPTH (1) +#define NBL_UPED_HW_EDIT_FLAG_SEL1_WIDTH (32) +#define NBL_UPED_HW_EDIT_FLAG_SEL1_DWLEN (1) +union uped_hw_edit_flag_sel1_u { + struct uped_hw_edit_flag_sel1 { + u32 oft:5; /* [4:0] Default:0x2 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HW_EDIT_FLAG_SEL1_DWLEN]; +} __packed; + +#define NBL_UPED_HW_EDIT_FLAG_SEL2_ADDR (0x15c20c) +#define NBL_UPED_HW_EDIT_FLAG_SEL2_DEPTH (1) +#define NBL_UPED_HW_EDIT_FLAG_SEL2_WIDTH (32) +#define NBL_UPED_HW_EDIT_FLAG_SEL2_DWLEN (1) +union uped_hw_edit_flag_sel2_u { + struct uped_hw_edit_flag_sel2 { + u32 oft:5; /* [4:0] Default:0x3 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HW_EDIT_FLAG_SEL2_DWLEN]; +} __packed; + +#define NBL_UPED_HW_EDIT_FLAG_SEL3_ADDR (0x15c210) +#define NBL_UPED_HW_EDIT_FLAG_SEL3_DEPTH (1) +#define NBL_UPED_HW_EDIT_FLAG_SEL3_WIDTH (32) +#define NBL_UPED_HW_EDIT_FLAG_SEL3_DWLEN (1) +union uped_hw_edit_flag_sel3_u { + struct uped_hw_edit_flag_sel3 { + u32 oft:5; /* [4:0] Default:0x4 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HW_EDIT_FLAG_SEL3_DWLEN]; +} __packed; + +#define NBL_UPED_HW_EDIT_FLAG_SEL4_ADDR (0x15c214) +#define NBL_UPED_HW_EDIT_FLAG_SEL4_DEPTH (1) +#define NBL_UPED_HW_EDIT_FLAG_SEL4_WIDTH (32) +#define NBL_UPED_HW_EDIT_FLAG_SEL4_DWLEN (1) +union uped_hw_edit_flag_sel4_u { + struct uped_hw_edit_flag_sel4 { + u32 oft:5; /* [4:0] Default:0xe RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HW_EDIT_FLAG_SEL4_DWLEN]; +} __packed; + +#define NBL_UPED_FWD_DPORT_ADDR (0x15c230) +#define NBL_UPED_FWD_DPORT_DEPTH (1) +#define NBL_UPED_FWD_DPORT_WIDTH (32) +#define NBL_UPED_FWD_DPORT_DWLEN (1) +union uped_fwd_dport_u { + struct uped_fwd_dport { + u32 id:6; /* [5:0] Default:0x9 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_FWD_DPORT_DWLEN]; +} __packed; + +#define NBL_UPED_FWD_DQUEUE_ADDR (0x15c234) +#define NBL_UPED_FWD_DQUEUE_DEPTH (1) +#define NBL_UPED_FWD_DQUEUE_WIDTH (32) +#define NBL_UPED_FWD_DQUEUE_DWLEN (1) +union uped_fwd_dqueue_u { + struct uped_fwd_dqueue { + u32 id:6; /* [5:0] Default:0xa RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_FWD_DQUEUE_DWLEN]; +} __packed; + +#define NBL_UPED_FWD_MIRID_ADDR (0x15c238) +#define NBL_UPED_FWD_MIRID_DEPTH (1) +#define NBL_UPED_FWD_MIRID_WIDTH (32) +#define NBL_UPED_FWD_MIRID_DWLEN (1) +union uped_fwd_mirid_u { + struct uped_fwd_mirid { + u32 id:6; /* [5:0] Default:0x8 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_FWD_MIRID_DWLEN]; +} __packed; + +#define NBL_UPED_FWD_L4IDX_ADDR (0x15c23c) +#define NBL_UPED_FWD_L4IDX_DEPTH (1) +#define NBL_UPED_FWD_L4IDX_WIDTH (32) +#define NBL_UPED_FWD_L4IDX_DWLEN (1) +union uped_fwd_l4idx_u { + struct uped_fwd_l4idx { + u32 id:6; /* [5:0] Default:0x11 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_FWD_L4IDX_DWLEN]; +} __packed; + +#define NBL_UPED_FWD_HASH_0_ADDR (0x15c244) +#define NBL_UPED_FWD_HASH_0_DEPTH (1) +#define NBL_UPED_FWD_HASH_0_WIDTH (32) +#define NBL_UPED_FWD_HASH_0_DWLEN (1) +union uped_fwd_hash_0_u { + struct uped_fwd_hash_0 { + u32 id:6; /* [5:0] Default:0x13 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_FWD_HASH_0_DWLEN]; +} __packed; + +#define NBL_UPED_FWD_HASH_1_ADDR (0x15c248) +#define NBL_UPED_FWD_HASH_1_DEPTH (1) +#define NBL_UPED_FWD_HASH_1_WIDTH (32) +#define NBL_UPED_FWD_HASH_1_DWLEN (1) +union uped_fwd_hash_1_u { + struct uped_fwd_hash_1 { + u32 id:6; /* [5:0] Default:0x14 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_FWD_HASH_1_DWLEN]; +} __packed; + +#define NBL_UPED_L4_OFT_ADJUST_ADDR (0x15c250) +#define NBL_UPED_L4_OFT_ADJUST_DEPTH (1) +#define NBL_UPED_L4_OFT_ADJUST_WIDTH (32) +#define NBL_UPED_L4_OFT_ADJUST_DWLEN (1) +union uped_l4_oft_adjust_u { + struct uped_l4_oft_adjust { + u32 vau:8; /* [7:0] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_L4_OFT_ADJUST_DWLEN]; +} __packed; + +#define NBL_UPED_PLD_OFT_ADJUST_ADDR (0x15c254) +#define NBL_UPED_PLD_OFT_ADJUST_DEPTH (1) +#define NBL_UPED_PLD_OFT_ADJUST_WIDTH (32) +#define NBL_UPED_PLD_OFT_ADJUST_DWLEN (1) +union uped_pld_oft_adjust_u { + struct uped_pld_oft_adjust { + u32 vau:8; /* [7:0] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_PLD_OFT_ADJUST_DWLEN]; +} __packed; + +#define NBL_UPED_VLAN_TYPE0_ADDR (0x15c260) +#define NBL_UPED_VLAN_TYPE0_DEPTH (1) +#define NBL_UPED_VLAN_TYPE0_WIDTH (32) +#define NBL_UPED_VLAN_TYPE0_DWLEN (1) +union uped_vlan_type0_u { + struct uped_vlan_type0 { + u32 vau:16; /* [15:0] Default:0x8100 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_VLAN_TYPE0_DWLEN]; +} __packed; + +#define NBL_UPED_VLAN_TYPE1_ADDR (0x15c264) +#define NBL_UPED_VLAN_TYPE1_DEPTH (1) +#define NBL_UPED_VLAN_TYPE1_WIDTH (32) +#define NBL_UPED_VLAN_TYPE1_DWLEN (1) +union uped_vlan_type1_u { + struct uped_vlan_type1 { + u32 vau:16; /* [15:0] Default:0x88A8 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_VLAN_TYPE1_DWLEN]; +} __packed; + +#define NBL_UPED_VLAN_TYPE2_ADDR (0x15c268) +#define NBL_UPED_VLAN_TYPE2_DEPTH (1) +#define NBL_UPED_VLAN_TYPE2_WIDTH (32) +#define NBL_UPED_VLAN_TYPE2_DWLEN (1) +union uped_vlan_type2_u { + struct uped_vlan_type2 { + u32 vau:16; /* [15:0] Default:0x9100 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_VLAN_TYPE2_DWLEN]; +} __packed; + +#define NBL_UPED_VLAN_TYPE3_ADDR (0x15c26c) +#define NBL_UPED_VLAN_TYPE3_DEPTH (1) +#define NBL_UPED_VLAN_TYPE3_WIDTH (32) +#define NBL_UPED_VLAN_TYPE3_DWLEN (1) +union uped_vlan_type3_u { + struct uped_vlan_type3 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_VLAN_TYPE3_DWLEN]; +} __packed; + +#define NBL_UPED_L3_LEN_MDY_CMD_0_ADDR (0x15c300) +#define NBL_UPED_L3_LEN_MDY_CMD_0_DEPTH (1) +#define NBL_UPED_L3_LEN_MDY_CMD_0_WIDTH (32) +#define NBL_UPED_L3_LEN_MDY_CMD_0_DWLEN (1) +union uped_l3_len_mdy_cmd_0_u { + struct uped_l3_len_mdy_cmd_0 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 in_oft:7; /* [14:8] Default:0x2 RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x2 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x2 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x0 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L3_LEN_MDY_CMD_0_DWLEN]; +} __packed; + +#define NBL_UPED_L3_LEN_MDY_CMD_1_ADDR (0x15c304) +#define NBL_UPED_L3_LEN_MDY_CMD_1_DEPTH (1) +#define NBL_UPED_L3_LEN_MDY_CMD_1_WIDTH (32) +#define NBL_UPED_L3_LEN_MDY_CMD_1_DWLEN (1) +union uped_l3_len_mdy_cmd_1_u { + struct uped_l3_len_mdy_cmd_1 { + u32 value:8; /* [7:0] Default:0x28 RW */ + u32 in_oft:7; /* [14:8] Default:0x4 RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x2 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x1 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x0 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L3_LEN_MDY_CMD_1_DWLEN]; +} __packed; + +#define NBL_UPED_L4_LEN_MDY_CMD_0_ADDR (0x15c308) +#define NBL_UPED_L4_LEN_MDY_CMD_0_DEPTH (1) +#define NBL_UPED_L4_LEN_MDY_CMD_0_WIDTH (32) +#define NBL_UPED_L4_LEN_MDY_CMD_0_DWLEN (1) +union uped_l4_len_mdy_cmd_0_u { + struct uped_l4_len_mdy_cmd_0 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 in_oft:7; /* [14:8] Default:0xc RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x3 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x0 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x1 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPED_L4_LEN_MDY_CMD_0_DWLEN]; +} __packed; + +#define NBL_UPED_L4_LEN_MDY_CMD_1_ADDR (0x15c30c) +#define NBL_UPED_L4_LEN_MDY_CMD_1_DEPTH (1) +#define NBL_UPED_L4_LEN_MDY_CMD_1_WIDTH (32) +#define NBL_UPED_L4_LEN_MDY_CMD_1_DWLEN (1) +union uped_l4_len_mdy_cmd_1_u { + struct uped_l4_len_mdy_cmd_1 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 in_oft:7; /* [14:8] Default:0x4 RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x3 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x0 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x1 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_LEN_MDY_CMD_1_DWLEN]; +} __packed; + +#define NBL_UPED_L3_CK_CMD_00_ADDR (0x15c310) +#define NBL_UPED_L3_CK_CMD_00_DEPTH (1) +#define NBL_UPED_L3_CK_CMD_00_WIDTH (32) +#define NBL_UPED_L3_CK_CMD_00_DWLEN (1) +union uped_l3_ck_cmd_00_u { + struct uped_l3_ck_cmd_00 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0xa RW */ + u32 phid:2; /* [27:26] Default:0x2 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L3_CK_CMD_00_DWLEN]; +} __packed; + +#define NBL_UPED_L3_CK_CMD_01_ADDR (0x15c314) +#define NBL_UPED_L3_CK_CMD_01_DEPTH (1) +#define NBL_UPED_L3_CK_CMD_01_WIDTH (32) +#define NBL_UPED_L3_CK_CMD_01_DWLEN (1) +union uped_l3_ck_cmd_01_u { + struct uped_l3_ck_cmd_01 { + u32 ck_start0:6; /* [5:0] Default:0x0 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x0 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x0 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPED_L3_CK_CMD_01_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_00_ADDR (0x15c318) +#define NBL_UPED_L4_CK_CMD_00_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_00_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_00_DWLEN (1) +union uped_l4_ck_cmd_00_u { + struct uped_l4_ck_cmd_00 { + u32 value:8; /* [7:0] Default:0x6 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x2 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x10 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_00_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_01_ADDR (0x15c31c) +#define NBL_UPED_L4_CK_CMD_01_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_01_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_01_DWLEN (1) +union uped_l4_ck_cmd_01_u { + struct uped_l4_ck_cmd_01 { + u32 ck_start0:6; /* [5:0] Default:0xc RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x8 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_01_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_10_ADDR (0x15c320) +#define NBL_UPED_L4_CK_CMD_10_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_10_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_10_DWLEN (1) +union uped_l4_ck_cmd_10_u { + struct uped_l4_ck_cmd_10 { + u32 value:8; /* [7:0] Default:0x11 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x2 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x6 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x1 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_10_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_11_ADDR (0x15c324) +#define NBL_UPED_L4_CK_CMD_11_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_11_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_11_DWLEN (1) +union uped_l4_ck_cmd_11_u { + struct uped_l4_ck_cmd_11 { + u32 ck_start0:6; /* [5:0] Default:0xc RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x8 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_11_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_20_ADDR (0x15c328) +#define NBL_UPED_L4_CK_CMD_20_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_20_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_20_DWLEN (1) +union uped_l4_ck_cmd_20_u { + struct uped_l4_ck_cmd_20 { + u32 value:8; /* [7:0] Default:0x2e RW */ + u32 len_in_oft:7; /* [14:8] Default:0x4 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x10 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_20_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_21_ADDR (0x15c32c) +#define NBL_UPED_L4_CK_CMD_21_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_21_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_21_DWLEN (1) +union uped_l4_ck_cmd_21_u { + struct uped_l4_ck_cmd_21 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_21_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_30_ADDR (0x15c330) +#define NBL_UPED_L4_CK_CMD_30_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_30_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_30_DWLEN (1) +union uped_l4_ck_cmd_30_u { + struct uped_l4_ck_cmd_30 { + u32 value:8; /* [7:0] Default:0x39 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x4 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x6 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x1 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_30_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_31_ADDR (0x15c334) +#define NBL_UPED_L4_CK_CMD_31_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_31_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_31_DWLEN (1) +union uped_l4_ck_cmd_31_u { + struct uped_l4_ck_cmd_31 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_31_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_40_ADDR (0x15c338) +#define NBL_UPED_L4_CK_CMD_40_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_40_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_40_DWLEN (1) +union uped_l4_ck_cmd_40_u { + struct uped_l4_ck_cmd_40 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0x8 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x1 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_40_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_41_ADDR (0x15c33c) +#define NBL_UPED_L4_CK_CMD_41_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_41_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_41_DWLEN (1) +union uped_l4_ck_cmd_41_u { + struct uped_l4_ck_cmd_41 { + u32 ck_start0:6; /* [5:0] Default:0x0 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x0 RW */ + u32 ck_len0:7; /* [14:8] Default:0x0 RW */ + u32 ck_vld0:1; /* [15] Default:0x0 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x0 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_41_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_50_ADDR (0x15c340) +#define NBL_UPED_L4_CK_CMD_50_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_50_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_50_DWLEN (1) +union uped_l4_ck_cmd_50_u { + struct uped_l4_ck_cmd_50 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x2 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x2 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_50_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_51_ADDR (0x15c344) +#define NBL_UPED_L4_CK_CMD_51_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_51_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_51_DWLEN (1) +union uped_l4_ck_cmd_51_u { + struct uped_l4_ck_cmd_51 { + u32 ck_start0:6; /* [5:0] Default:0xc RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x8 RW */ + u32 ck_vld0:1; /* [15] Default:0x0 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_51_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_60_ADDR (0x15c348) +#define NBL_UPED_L4_CK_CMD_60_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_60_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_60_DWLEN (1) +union uped_l4_ck_cmd_60_u { + struct uped_l4_ck_cmd_60 { + u32 value:8; /* [7:0] Default:0x62 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x4 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x2 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_60_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_61_ADDR (0x15c34c) +#define NBL_UPED_L4_CK_CMD_61_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_61_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_61_DWLEN (1) +union uped_l4_ck_cmd_61_u { + struct uped_l4_ck_cmd_61 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_61_DWLEN]; +} __packed; + +#define NBL_UPED_CFG_TEST_ADDR (0x15c600) +#define NBL_UPED_CFG_TEST_DEPTH (1) +#define NBL_UPED_CFG_TEST_WIDTH (32) +#define NBL_UPED_CFG_TEST_DWLEN (1) +union uped_cfg_test_u { + struct uped_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPED_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_UPED_BP_STATE_ADDR (0x15c608) +#define NBL_UPED_BP_STATE_DEPTH (1) +#define NBL_UPED_BP_STATE_WIDTH (32) +#define NBL_UPED_BP_STATE_DWLEN (1) +union uped_bp_state_u { + struct uped_bp_state { + u32 bm_rtn_tout:1; /* [0] Default:0x0 RO */ + u32 bm_not_rdy:1; /* [1] Default:0x0 RO */ + u32 rsv1:1; /* [2] Default:0x0 RO */ + u32 qm_fc:1; /* [3] Default:0x0 RO */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_BP_STATE_DWLEN]; +} __packed; + +#define NBL_UPED_BP_HISTORY_ADDR (0x15c60c) +#define NBL_UPED_BP_HISTORY_DEPTH (1) +#define NBL_UPED_BP_HISTORY_WIDTH (32) +#define NBL_UPED_BP_HISTORY_DWLEN (1) +union uped_bp_history_u { + struct uped_bp_history { + u32 bm_rtn_tout:1; /* [0] Default:0x0 RC */ + u32 bm_not_rdy:1; /* [1] Default:0x0 RC */ + u32 rsv1:1; /* [2] Default:0x0 RC */ + u32 qm_fc:1; /* [3] Default:0x0 RC */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_UPED_MIRID_IND_ADDR (0x15c900) +#define NBL_UPED_MIRID_IND_DEPTH (1) +#define NBL_UPED_MIRID_IND_WIDTH (32) +#define NBL_UPED_MIRID_IND_DWLEN (1) +union uped_mirid_ind_u { + struct uped_mirid_ind { + u32 nomat:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MIRID_IND_DWLEN]; +} __packed; + +#define NBL_UPED_MD_AUX_OFT_ADDR (0x15c904) +#define NBL_UPED_MD_AUX_OFT_DEPTH (1) +#define NBL_UPED_MD_AUX_OFT_WIDTH (32) +#define NBL_UPED_MD_AUX_OFT_DWLEN (1) +union uped_md_aux_oft_u { + struct uped_md_aux_oft { + u32 l2_oft:8; /* [7:0] Default:0x0 RO */ + u32 l3_oft:8; /* [15:8] Default:0x0 RO */ + u32 l4_oft:8; /* [23:16] Default:0x0 RO */ + u32 pld_oft:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_AUX_OFT_DWLEN]; +} __packed; + +#define NBL_UPED_MD_AUX_PKT_LEN_ADDR (0x15c908) +#define NBL_UPED_MD_AUX_PKT_LEN_DEPTH (1) +#define NBL_UPED_MD_AUX_PKT_LEN_WIDTH (32) +#define NBL_UPED_MD_AUX_PKT_LEN_DWLEN (1) +union uped_md_aux_pkt_len_u { + struct uped_md_aux_pkt_len { + u32 len:14; /* [13:0] Default:0x0 RO */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_AUX_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_UPED_MD_FWD_DPORT_ADDR (0x15c910) +#define NBL_UPED_MD_FWD_DPORT_DEPTH (1) +#define NBL_UPED_MD_FWD_DPORT_WIDTH (32) +#define NBL_UPED_MD_FWD_DPORT_DWLEN (1) +union uped_md_fwd_dport_u { + struct uped_md_fwd_dport { + u32 id:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_FWD_DPORT_DWLEN]; +} __packed; + +#define NBL_UPED_MD_AUX_PLD_CKSUM_ADDR (0x15c914) +#define NBL_UPED_MD_AUX_PLD_CKSUM_DEPTH (1) +#define NBL_UPED_MD_AUX_PLD_CKSUM_WIDTH (32) +#define NBL_UPED_MD_AUX_PLD_CKSUM_DWLEN (1) +union uped_md_aux_pld_cksum_u { + struct uped_md_aux_pld_cksum { + u32 ck:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_AUX_PLD_CKSUM_DWLEN]; +} __packed; + +#define NBL_UPED_INNER_PKT_CKSUM_ADDR (0x15c918) +#define NBL_UPED_INNER_PKT_CKSUM_DEPTH (1) +#define NBL_UPED_INNER_PKT_CKSUM_WIDTH (32) +#define NBL_UPED_INNER_PKT_CKSUM_DWLEN (1) +union uped_inner_pkt_cksum_u { + struct uped_inner_pkt_cksum { + u32 ck:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INNER_PKT_CKSUM_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_0_ADDR (0x15c920) +#define NBL_UPED_MD_EDIT_0_DEPTH (1) +#define NBL_UPED_MD_EDIT_0_WIDTH (32) +#define NBL_UPED_MD_EDIT_0_DWLEN (1) +union uped_md_edit_0_u { + struct uped_md_edit_0 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_0_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_1_ADDR (0x15c924) +#define NBL_UPED_MD_EDIT_1_DEPTH (1) +#define NBL_UPED_MD_EDIT_1_WIDTH (32) +#define NBL_UPED_MD_EDIT_1_DWLEN (1) +union uped_md_edit_1_u { + struct uped_md_edit_1 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_1_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_2_ADDR (0x15c928) +#define NBL_UPED_MD_EDIT_2_DEPTH (1) +#define NBL_UPED_MD_EDIT_2_WIDTH (32) +#define NBL_UPED_MD_EDIT_2_DWLEN (1) +union uped_md_edit_2_u { + struct uped_md_edit_2 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_2_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_3_ADDR (0x15c92c) +#define NBL_UPED_MD_EDIT_3_DEPTH (1) +#define NBL_UPED_MD_EDIT_3_WIDTH (32) +#define NBL_UPED_MD_EDIT_3_DWLEN (1) +union uped_md_edit_3_u { + struct uped_md_edit_3 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_3_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_4_ADDR (0x15c930) +#define NBL_UPED_MD_EDIT_4_DEPTH (1) +#define NBL_UPED_MD_EDIT_4_WIDTH (32) +#define NBL_UPED_MD_EDIT_4_DWLEN (1) +union uped_md_edit_4_u { + struct uped_md_edit_4 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_4_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_5_ADDR (0x15c934) +#define NBL_UPED_MD_EDIT_5_DEPTH (1) +#define NBL_UPED_MD_EDIT_5_WIDTH (32) +#define NBL_UPED_MD_EDIT_5_DWLEN (1) +union uped_md_edit_5_u { + struct uped_md_edit_5 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_5_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_6_ADDR (0x15c938) +#define NBL_UPED_MD_EDIT_6_DEPTH (1) +#define NBL_UPED_MD_EDIT_6_WIDTH (32) +#define NBL_UPED_MD_EDIT_6_DWLEN (1) +union uped_md_edit_6_u { + struct uped_md_edit_6 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_6_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_7_ADDR (0x15c93c) +#define NBL_UPED_MD_EDIT_7_DEPTH (1) +#define NBL_UPED_MD_EDIT_7_WIDTH (32) +#define NBL_UPED_MD_EDIT_7_DWLEN (1) +union uped_md_edit_7_u { + struct uped_md_edit_7 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_7_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_8_ADDR (0x15c940) +#define NBL_UPED_MD_EDIT_8_DEPTH (1) +#define NBL_UPED_MD_EDIT_8_WIDTH (32) +#define NBL_UPED_MD_EDIT_8_DWLEN (1) +union uped_md_edit_8_u { + struct uped_md_edit_8 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_8_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_9_ADDR (0x15c944) +#define NBL_UPED_MD_EDIT_9_DEPTH (1) +#define NBL_UPED_MD_EDIT_9_WIDTH (32) +#define NBL_UPED_MD_EDIT_9_DWLEN (1) +union uped_md_edit_9_u { + struct uped_md_edit_9 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_9_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_10_ADDR (0x15c948) +#define NBL_UPED_MD_EDIT_10_DEPTH (1) +#define NBL_UPED_MD_EDIT_10_WIDTH (32) +#define NBL_UPED_MD_EDIT_10_DWLEN (1) +union uped_md_edit_10_u { + struct uped_md_edit_10 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_10_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_11_ADDR (0x15c94c) +#define NBL_UPED_MD_EDIT_11_DEPTH (1) +#define NBL_UPED_MD_EDIT_11_WIDTH (32) +#define NBL_UPED_MD_EDIT_11_DWLEN (1) +union uped_md_edit_11_u { + struct uped_md_edit_11 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_11_DWLEN]; +} __packed; + +#define NBL_UPED_ADD_DEL_LEN_ADDR (0x15c950) +#define NBL_UPED_ADD_DEL_LEN_DEPTH (1) +#define NBL_UPED_ADD_DEL_LEN_WIDTH (32) +#define NBL_UPED_ADD_DEL_LEN_DWLEN (1) +union uped_add_del_len_u { + struct uped_add_del_len { + u32 len:9; /* [8:0] Default:0x0 RO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_ADD_DEL_LEN_DWLEN]; +} __packed; + +#define NBL_UPED_TTL_INFO_ADDR (0x15c970) +#define NBL_UPED_TTL_INFO_DEPTH (1) +#define NBL_UPED_TTL_INFO_WIDTH (32) +#define NBL_UPED_TTL_INFO_DWLEN (1) +union uped_ttl_info_u { + struct uped_ttl_info { + u32 old_ttl:8; /* [7:0] Default:0x0 RO */ + u32 new_ttl:8; /* [15:8] Default:0x0 RO */ + u32 ttl_val:1; /* [16] Default:0x0 RC */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_TTL_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_LEN_INFO_VLD_ADDR (0x15c974) +#define NBL_UPED_LEN_INFO_VLD_DEPTH (1) +#define NBL_UPED_LEN_INFO_VLD_WIDTH (32) +#define NBL_UPED_LEN_INFO_VLD_DWLEN (1) +union uped_len_info_vld_u { + struct uped_len_info_vld { + u32 length0:1; /* [0] Default:0x0 RC */ + u32 length1:1; /* [1] Default:0x0 RC */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_LEN_INFO_VLD_DWLEN]; +} __packed; + +#define NBL_UPED_LEN0_INFO_ADDR (0x15c978) +#define NBL_UPED_LEN0_INFO_DEPTH (1) +#define NBL_UPED_LEN0_INFO_WIDTH (32) +#define NBL_UPED_LEN0_INFO_DWLEN (1) +union uped_len0_info_u { + struct uped_len0_info { + u32 old_len:16; /* [15:0] Default:0x0 RO */ + u32 new_len:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_LEN0_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_LEN1_INFO_ADDR (0x15c97c) +#define NBL_UPED_LEN1_INFO_DEPTH (1) +#define NBL_UPED_LEN1_INFO_WIDTH (32) +#define NBL_UPED_LEN1_INFO_DWLEN (1) +union uped_len1_info_u { + struct uped_len1_info { + u32 old_len:16; /* [15:0] Default:0x0 RO */ + u32 new_len:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_LEN1_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_EDIT_ATNUM_INFO_ADDR (0x15c980) +#define NBL_UPED_EDIT_ATNUM_INFO_DEPTH (1) +#define NBL_UPED_EDIT_ATNUM_INFO_WIDTH (32) +#define NBL_UPED_EDIT_ATNUM_INFO_DWLEN (1) +union uped_edit_atnum_info_u { + struct uped_edit_atnum_info { + u32 replace:4; /* [3:0] Default:0x0 RO */ + u32 del:4; /* [7:4] Default:0x0 RO */ + u32 add:4; /* [11:8] Default:0x0 RO */ + u32 ttl:4; /* [15:12] Default:0x0 RO */ + u32 dscp:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_EDIT_ATNUM_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_EDIT_NO_AT_INFO_ADDR (0x15c984) +#define NBL_UPED_EDIT_NO_AT_INFO_DEPTH (1) +#define NBL_UPED_EDIT_NO_AT_INFO_WIDTH (32) +#define NBL_UPED_EDIT_NO_AT_INFO_DWLEN (1) +union uped_edit_no_at_info_u { + struct uped_edit_no_at_info { + u32 l3_len:1; /* [0] Default:0x0 RC */ + u32 l4_len:1; /* [1] Default:0x0 RC */ + u32 l3_ck:1; /* [2] Default:0x0 RC */ + u32 l4_ck:1; /* [3] Default:0x0 RC */ + u32 sctp_ck:1; /* [4] Default:0x0 RC */ + u32 rsv:27; /* [31:05] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_EDIT_NO_AT_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_UL4S_TOTAL_LENGTH_ADDR (0x15c988) +#define NBL_UPED_UL4S_TOTAL_LENGTH_DEPTH (1) +#define NBL_UPED_UL4S_TOTAL_LENGTH_WIDTH (32) +#define NBL_UPED_UL4S_TOTAL_LENGTH_DWLEN (1) +union uped_ul4s_total_length_u { + struct uped_ul4s_total_length { + u32 vau:14; /* [13:0] Default:0x0 RO */ + u32 rsv:16; /* [29:14] Default:0x0 RO */ + u32 tls_ind:1; /* [30] Default:0x0 RO */ + u32 vld:1; /* [31] Default:0x0 RC */ + } __packed info; + u32 data[NBL_UPED_UL4S_TOTAL_LENGTH_DWLEN]; +} __packed; + +#define NBL_UPED_HW_EDT_PROF_ADDR (0x15d000) +#define NBL_UPED_HW_EDT_PROF_DEPTH (32) +#define NBL_UPED_HW_EDT_PROF_WIDTH (32) +#define NBL_UPED_HW_EDT_PROF_DWLEN (1) +union uped_hw_edt_prof_u { + struct uped_hw_edt_prof { + u32 l4_len:2; /* [1:0] Default:0x2 RW */ + u32 l3_len:2; /* [3:2] Default:0x2 RW */ + u32 l4_ck:3; /* [6:4] Default:0x7 RW */ + u32 l3_ck:1; /* [7:7] Default:0x0 RW */ + u32 l4_ck_zero_free:1; /* [8:8] Default:0x1 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HW_EDT_PROF_DWLEN]; +} __packed; +#define NBL_UPED_HW_EDT_PROF_REG(r) (NBL_UPED_HW_EDT_PROF_ADDR + \ + (NBL_UPED_HW_EDT_PROF_DWLEN * 4) * (r)) + +#define NBL_UPED_OUT_MASK_ADDR (0x15e000) +#define NBL_UPED_OUT_MASK_DEPTH (24) +#define NBL_UPED_OUT_MASK_WIDTH (64) +#define NBL_UPED_OUT_MASK_DWLEN (2) +union uped_out_mask_u { + struct uped_out_mask { + u32 flag:32; /* [31:0] Default:0x0 RW */ + u32 fwd:30; /* [61:32] Default:0x0 RW */ + u32 rsv:2; /* [63:62] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_OUT_MASK_DWLEN]; +} __packed; +#define NBL_UPED_OUT_MASK_REG(r) (NBL_UPED_OUT_MASK_ADDR + \ + (NBL_UPED_OUT_MASK_DWLEN * 4) * (r)) + +#define NBL_UPED_TAB_EDIT_CMD_ADDR (0x15f000) +#define NBL_UPED_TAB_EDIT_CMD_DEPTH (32) +#define NBL_UPED_TAB_EDIT_CMD_WIDTH (32) +#define NBL_UPED_TAB_EDIT_CMD_DWLEN (1) +union uped_tab_edit_cmd_u { + struct uped_tab_edit_cmd { + u32 in_offset:8; /* [7:0] Default:0x0 RW */ + u32 phid:2; /* [9:8] Default:0x0 RW */ + u32 len:7; /* [16:10] Default:0x0 RW */ + u32 mode:4; /* [20:17] Default:0xf RW */ + u32 l4_ck_ofld_upt:1; /* [21] Default:0x1 RW */ + u32 l3_ck_ofld_upt:1; /* [22] Default:0x1 RW */ + u32 rsv:9; /* [31:23] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_TAB_EDIT_CMD_DWLEN]; +} __packed; +#define NBL_UPED_TAB_EDIT_CMD_REG(r) (NBL_UPED_TAB_EDIT_CMD_ADDR + \ + (NBL_UPED_TAB_EDIT_CMD_DWLEN * 4) * (r)) + +#define NBL_UPED_TAB_VSI_TYPE_ADDR (0x161000) +#define NBL_UPED_TAB_VSI_TYPE_DEPTH (1031) +#define NBL_UPED_TAB_VSI_TYPE_WIDTH (32) +#define NBL_UPED_TAB_VSI_TYPE_DWLEN (1) +union uped_tab_vsi_type_u { + struct uped_tab_vsi_type { + u32 sel:4; /* [3:0] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_TAB_VSI_TYPE_DWLEN]; +} __packed; +#define NBL_UPED_TAB_VSI_TYPE_REG(r) (NBL_UPED_TAB_VSI_TYPE_ADDR + \ + (NBL_UPED_TAB_VSI_TYPE_DWLEN * 4) * (r)) + +#define NBL_UPED_TAB_REPLACE_ADDR (0x164000) +#define NBL_UPED_TAB_REPLACE_DEPTH (2048) +#define NBL_UPED_TAB_REPLACE_WIDTH (64) +#define NBL_UPED_TAB_REPLACE_DWLEN (2) +union uped_tab_replace_u { + struct uped_tab_replace { + u32 vau_arr[2]; /* [63:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPED_TAB_REPLACE_DWLEN]; +} __packed; +#define NBL_UPED_TAB_REPLACE_REG(r) (NBL_UPED_TAB_REPLACE_ADDR + \ + (NBL_UPED_TAB_REPLACE_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf.h new file mode 100644 index 0000000000000000000000000000000000000000..b285dd39c46a4f3611669c196c9f0c86c27932a9 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf.h @@ -0,0 +1,6 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#include "nbl_intf_cmdq.h" +#include "nbl_intf_vdpa.h" +#include "nbl_intf_pcompleter_host.h" diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_cmdq.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_cmdq.h new file mode 100644 index 0000000000000000000000000000000000000000..b0238f1c3742c84924a0f9c8db49f747a98575d2 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_cmdq.h @@ -0,0 +1,438 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_CMDQ_H +#define NBL_CMDQ_H 1 + +#include + +#define NBL_CMDQ_BASE (0x00FA0000) + +#define NBL_CMDQ_INT_STATUS_ADDR (0xfa0000) +#define NBL_CMDQ_INT_STATUS_DEPTH (1) +#define NBL_CMDQ_INT_STATUS_WIDTH (32) +#define NBL_CMDQ_INT_STATUS_DWLEN (1) +union cmdq_int_status_u { + struct cmdq_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RWC */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RWC */ + u32 cif_err:1; /* [05:05] Default:0x0 RWC */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_CMDQ_INT_MASK_ADDR (0xfa0004) +#define NBL_CMDQ_INT_MASK_DEPTH (1) +#define NBL_CMDQ_INT_MASK_WIDTH (32) +#define NBL_CMDQ_INT_MASK_DWLEN (1) +union cmdq_int_mask_u { + struct cmdq_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RW */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RW */ + u32 cif_err:1; /* [05:05] Default:0x0 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_INT_MASK_DWLEN]; +} __packed; + +#define NBL_CMDQ_INT_SET_ADDR (0xfa0008) +#define NBL_CMDQ_INT_SET_DEPTH (1) +#define NBL_CMDQ_INT_SET_WIDTH (32) +#define NBL_CMDQ_INT_SET_DWLEN (1) +union cmdq_int_set_u { + struct cmdq_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 WO */ + u32 data_cor_err:1; /* [04:04] Default:0x0 WO */ + u32 cif_err:1; /* [05:05] Default:0x0 WO */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_INT_SET_DWLEN]; +} __packed; + +#define NBL_CMDQ_INIT_DONE_ADDR (0xfa000c) +#define NBL_CMDQ_INIT_DONE_DEPTH (1) +#define NBL_CMDQ_INIT_DONE_WIDTH (32) +#define NBL_CMDQ_INIT_DONE_DWLEN (1) +union cmdq_init_done_u { + struct cmdq_init_done { + u32 done:1; /* [00:00] Default:0x1 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_CMDQ_WARNING_ADDR (0xfa0010) +#define NBL_CMDQ_WARNING_DEPTH (1) +#define NBL_CMDQ_WARNING_WIDTH (32) +#define NBL_CMDQ_WARNING_DWLEN (1) +union cmdq_warning_u { + struct cmdq_warning { + u32 ecpu_wr_seq_err:1; /* [00:00] Default:0x0 RO */ + u32 ecpu_data_len_err:1; /* [01:01] Default:0x0 RO */ + u32 ecpu_data_olen:1; /* [02:02] Default:0x0 RO */ + u32 ecpu_desc_rdif_rerr:1; /* [03:03] Default:0x0 RO */ + u32 ecpu_data_rdif_rerr:1; /* [04:04] Default:0x0 RO */ + u32 host_wr_seq_err:1; /* [05:05] Default:0x0 RO */ + u32 host_data_len_err:1; /* [06:06] Default:0x0 RO */ + u32 host_data_olen:1; /* [07:07] Default:0x0 RO */ + u32 host_desc_rdif_rerr:1; /* [08:08] Default:0x0 RO */ + u32 host_data_rdif_rerr:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_WARNING_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_WR_SEQ_ERR_LATCH_ADDR (0xfa00ac) +#define NBL_CMDQ_ECPU_WR_SEQ_ERR_LATCH_DEPTH (1) +#define NBL_CMDQ_ECPU_WR_SEQ_ERR_LATCH_WIDTH (32) +#define NBL_CMDQ_ECPU_WR_SEQ_ERR_LATCH_DWLEN (1) +union cmdq_ecpu_wr_seq_err_latch_u { + struct cmdq_ecpu_wr_seq_err_latch { + u32 err_latch:8; /* [07:00] Default:0x0 RO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_WR_SEQ_ERR_LATCH_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_WR_SEQ_ERR_LATCH_ADDR (0xfa00c8) +#define NBL_CMDQ_HOST_WR_SEQ_ERR_LATCH_DEPTH (1) +#define NBL_CMDQ_HOST_WR_SEQ_ERR_LATCH_WIDTH (32) +#define NBL_CMDQ_HOST_WR_SEQ_ERR_LATCH_DWLEN (1) +union cmdq_host_wr_seq_err_latch_u { + struct cmdq_host_wr_seq_err_latch { + u32 err_latch:8; /* [07:00] Default:0x0 RO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_WR_SEQ_ERR_LATCH_DWLEN]; +} __packed; + +#define NBL_CMDQ_CAR_CTRL_ADDR (0xfa00d0) +#define NBL_CMDQ_CAR_CTRL_DEPTH (1) +#define NBL_CMDQ_CAR_CTRL_WIDTH (32) +#define NBL_CMDQ_CAR_CTRL_DWLEN (1) +union cmdq_car_ctrl_u { + struct cmdq_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_CMDQ_FLOW_EN_ADDR (0xfa00d4) +#define NBL_CMDQ_FLOW_EN_DEPTH (1) +#define NBL_CMDQ_FLOW_EN_WIDTH (32) +#define NBL_CMDQ_FLOW_EN_DWLEN (1) +union cmdq_flow_en_u { + struct cmdq_flow_en { + u32 ecpu_desc_rdif_ack_cnt_en:1; /* [00:00] Default:0x1 RW */ + u32 ecpu_data_rdif_ack_cnt_en:1; /* [01:01] Default:0x1 RW */ + u32 ecpu_desc_wdif_ack_cnt_en:1; /* [02:02] Default:0x1 RW */ + u32 ecpu_data_wdif_ack_cnt_en:1; /* [03:03] Default:0x1 RW */ + u32 host_desc_rdif_ack_cnt_en:1; /* [04:04] Default:0x1 RW */ + u32 host_data_rdif_ack_cnt_en:1; /* [05:05] Default:0x1 RW */ + u32 host_desc_wdif_ack_cnt_en:1; /* [06:06] Default:0x1 RW */ + u32 host_data_wdif_ack_cnt_en:1; /* [07:07] Default:0x1 RW */ + u32 cmdq_inside_cnt_en:1; /* [08:08] Default:0x1 RW */ + u32 ecpu_cmdq_desc_rdif_rerr_cnt_en:1; /* [09:09] Default:0x1 RW */ + u32 ecpu_cmdq_data_rdif_rerr_cnt_en:1; /* [10:10] Default:0x1 RW */ + u32 ecpu_data_len_err_cnt_en:1; /* [11:11] Default:0x1 RW */ + u32 ecpu_data_olen_cnt_en:1; /* [12:12] Default:0x1 RW */ + u32 ecpu_wr_seq_err_cnt_en:1; /* [13:13] Default:0x1 RW */ + u32 ecpu_rst_cnt:1; /* [14:14] Default:0x1 RW */ + u32 host_cmdq_desc_rdif_rerr_cnt_en:1; /* [15:15] Default:0x1 RW */ + u32 host_cmdq_data_rdif_rerr_cnt_en:1; /* [16:16] Default:0x1 RW */ + u32 host_data_len_err_cnt_en:1; /* [17:17] Default:0x1 RW */ + u32 host_data_olen_cnt_en:1; /* [18:18] Default:0x1 RW */ + u32 host_wr_seq_err_cnt_en:1; /* [19:19] Default:0x1 RW */ + u32 host_rst_cnt:1; /* [20:20] Default:0x1 RW */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_FLOW_EN_DWLEN]; +} __packed; + +#define NBL_CMDQ_CIF_ERR_INFO_ADDR (0xfa00d8) +#define NBL_CMDQ_CIF_ERR_INFO_DEPTH (1) +#define NBL_CMDQ_CIF_ERR_INFO_WIDTH (32) +#define NBL_CMDQ_CIF_ERR_INFO_DWLEN (1) +union cmdq_cif_err_info_u { + struct cmdq_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_BADDR_L_ADDR (0xfa1000) +#define NBL_CMDQ_ECPU_CMDQ_BADDR_L_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_BADDR_L_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_BADDR_L_DWLEN (1) +union cmdq_ecpu_cmdq_baddr_l_u { + struct cmdq_ecpu_cmdq_baddr_l { + u32 baddr_l:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_BADDR_L_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_BADDR_H_ADDR (0xfa1004) +#define NBL_CMDQ_ECPU_CMDQ_BADDR_H_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_BADDR_H_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_BADDR_H_DWLEN (1) +union cmdq_ecpu_cmdq_baddr_h_u { + struct cmdq_ecpu_cmdq_baddr_h { + u32 baddr_h:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_BADDR_H_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_SIZE_ADDR (0xfa1008) +#define NBL_CMDQ_ECPU_CMDQ_SIZE_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_SIZE_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_SIZE_DWLEN (1) +union cmdq_ecpu_cmdq_size_u { + struct cmdq_ecpu_cmdq_size { + u32 cmdq_size:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_SIZE_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_CURR_ADDR (0xfa100c) +#define NBL_CMDQ_ECPU_CMDQ_CURR_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_CURR_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_CURR_DWLEN (1) +union cmdq_ecpu_cmdq_curr_u { + struct cmdq_ecpu_cmdq_curr { + u32 cmdq_curr:17; /* [16:00] Default:0x0 RO */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_CURR_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_TAIL_ADDR (0xfa1010) +#define NBL_CMDQ_ECPU_CMDQ_TAIL_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_TAIL_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_TAIL_DWLEN (1) +union cmdq_ecpu_cmdq_tail_u { + struct cmdq_ecpu_cmdq_tail { + u32 cmdq_tail:17; /* [16:00] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_TAIL_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_EN_ADDR (0xfa1014) +#define NBL_CMDQ_ECPU_CMDQ_EN_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_EN_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_EN_DWLEN (1) +union cmdq_ecpu_cmdq_en_u { + struct cmdq_ecpu_cmdq_en { + u32 cmdq_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_EN_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_DIF_MODE_ADDR (0xfa1018) +#define NBL_CMDQ_ECPU_CMDQ_DIF_MODE_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_DIF_MODE_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_DIF_MODE_DWLEN (1) +union cmdq_ecpu_cmdq_dif_mode_u { + struct cmdq_ecpu_cmdq_dif_mode { + u32 dif_mode:3; /* [02:00] Default:0x2 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_DIF_MODE_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_DIF_INFO_ADDR (0xfa101c) +#define NBL_CMDQ_ECPU_CMDQ_DIF_INFO_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_DIF_INFO_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_DIF_INFO_DWLEN (1) +union cmdq_ecpu_cmdq_dif_info_u { + struct cmdq_ecpu_cmdq_dif_info { + u32 dif_info:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_DIF_INFO_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_DIF_BDF_ADDR (0xfa1020) +#define NBL_CMDQ_ECPU_CMDQ_DIF_BDF_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_DIF_BDF_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_DIF_BDF_DWLEN (1) +union cmdq_ecpu_cmdq_dif_bdf_u { + struct cmdq_ecpu_cmdq_dif_bdf { + u32 dif_bdf:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_DIF_BDF_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_DIF_INT_ADDR (0xfa1024) +#define NBL_CMDQ_ECPU_CMDQ_DIF_INT_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_DIF_INT_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_DIF_INT_DWLEN (1) +union cmdq_ecpu_cmdq_dif_int_u { + struct cmdq_ecpu_cmdq_dif_int { + u32 dif_int:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_DIF_INT_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_BADDR_L_ADDR (0xfa1100) +#define NBL_CMDQ_HOST_CMDQ_BADDR_L_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_BADDR_L_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_BADDR_L_DWLEN (1) +union cmdq_host_cmdq_baddr_l_u { + struct cmdq_host_cmdq_baddr_l { + u32 cmdq_baddr_l:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_BADDR_L_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_BADDR_H_ADDR (0xfa1104) +#define NBL_CMDQ_HOST_CMDQ_BADDR_H_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_BADDR_H_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_BADDR_H_DWLEN (1) +union cmdq_host_cmdq_baddr_h_u { + struct cmdq_host_cmdq_baddr_h { + u32 cmdq_baddr_h:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_BADDR_H_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_SIZE_ADDR (0xfa1108) +#define NBL_CMDQ_HOST_CMDQ_SIZE_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_SIZE_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_SIZE_DWLEN (1) +union cmdq_host_cmdq_size_u { + struct cmdq_host_cmdq_size { + u32 cmdq_size:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_SIZE_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_CURR_ADDR (0xfa110c) +#define NBL_CMDQ_HOST_CMDQ_CURR_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_CURR_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_CURR_DWLEN (1) +union cmdq_host_cmdq_curr_u { + struct cmdq_host_cmdq_curr { + u32 cmdq_curr:17; /* [16:00] Default:0x0 RO */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_CURR_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_TAIL_ADDR (0xfa1110) +#define NBL_CMDQ_HOST_CMDQ_TAIL_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_TAIL_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_TAIL_DWLEN (1) +union cmdq_host_cmdq_tail_u { + struct cmdq_host_cmdq_tail { + u32 cmdq_tail:17; /* [16:00] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_TAIL_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_EN_ADDR (0xfa1114) +#define NBL_CMDQ_HOST_CMDQ_EN_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_EN_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_EN_DWLEN (1) +union cmdq_host_cmdq_en_u { + struct cmdq_host_cmdq_en { + u32 cmdq_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_EN_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_DIF_MODE_ADDR (0xfa1118) +#define NBL_CMDQ_HOST_CMDQ_DIF_MODE_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_DIF_MODE_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_DIF_MODE_DWLEN (1) +union cmdq_host_cmdq_dif_mode_u { + struct cmdq_host_cmdq_dif_mode { + u32 dif_mode:3; /* [02:00] Default:0x2 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_DIF_MODE_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_DIF_INFO_ADDR (0xfa111c) +#define NBL_CMDQ_HOST_CMDQ_DIF_INFO_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_DIF_INFO_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_DIF_INFO_DWLEN (1) +union cmdq_host_cmdq_dif_info_u { + struct cmdq_host_cmdq_dif_info { + u32 dif_info:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_DIF_INFO_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_DIF_BDF_ADDR (0xfa1120) +#define NBL_CMDQ_HOST_CMDQ_DIF_BDF_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_DIF_BDF_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_DIF_BDF_DWLEN (1) +union cmdq_host_cmdq_dif_bdf_u { + struct cmdq_host_cmdq_dif_bdf { + u32 dif_bdf:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_DIF_BDF_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_DIF_INT_ADDR (0xfa1124) +#define NBL_CMDQ_HOST_CMDQ_DIF_INT_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_DIF_INT_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_DIF_INT_DWLEN (1) +union cmdq_host_cmdq_dif_int_u { + struct cmdq_host_cmdq_dif_int { + u32 dif_int:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_DIF_INT_DWLEN]; +} __packed; + +#define NBL_CMDQ_MDL_INFO_ADDR (0xfa1300) +#define NBL_CMDQ_MDL_INFO_DEPTH (1) +#define NBL_CMDQ_MDL_INFO_WIDTH (32) +#define NBL_CMDQ_MDL_INFO_DWLEN (1) +union cmdq_mdl_info_u { + struct cmdq_mdl_info { + u32 verison_id:16; /* [15:00] Default:0x0001 RO */ + u32 prj_id:16; /* [31:16] Default:0x0020 RO */ + } __packed info; + u32 data[NBL_CMDQ_MDL_INFO_DWLEN]; +} __packed; + +#define NBL_CMDQ_VERSION_ADDR (0xfa1304) +#define NBL_CMDQ_VERSION_DEPTH (1) +#define NBL_CMDQ_VERSION_WIDTH (32) +#define NBL_CMDQ_VERSION_DWLEN (1) +union cmdq_version_u { + struct cmdq_version { + u32 date:32; /* [31:00] Default:0x20220803 RO */ + } __packed info; + u32 data[NBL_CMDQ_VERSION_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcompleter_host.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcompleter_host.h new file mode 100644 index 0000000000000000000000000000000000000000..f65341a8054fbd5380732e811b25c5a4caacadef --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcompleter_host.h @@ -0,0 +1,949 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_PCOMPLETER_HOST_H +#define NBL_PCOMPLETER_HOST_H 1 + +#include + +#define NBL_PCOMPLETER_HOST_BASE (0x00F08000) + +#define NBL_PCOMPLETER_HOST_INT_STATUS_ADDR (0xf08000) +#define NBL_PCOMPLETER_HOST_INT_STATUS_DEPTH (1) +#define NBL_PCOMPLETER_HOST_INT_STATUS_WIDTH (32) +#define NBL_PCOMPLETER_HOST_INT_STATUS_DWLEN (1) +union pcompleter_host_int_status_u { + struct pcompleter_host_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv3:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RWC */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_INT_MASK_ADDR (0xf08004) +#define NBL_PCOMPLETER_HOST_INT_MASK_DEPTH (1) +#define NBL_PCOMPLETER_HOST_INT_MASK_WIDTH (32) +#define NBL_PCOMPLETER_HOST_INT_MASK_DWLEN (1) +union pcompleter_host_int_mask_u { + struct pcompleter_host_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv3:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RW */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RW */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_INT_MASK_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_INT_SET_ADDR (0xf08008) +#define NBL_PCOMPLETER_HOST_INT_SET_DEPTH (1) +#define NBL_PCOMPLETER_HOST_INT_SET_WIDTH (32) +#define NBL_PCOMPLETER_HOST_INT_SET_DWLEN (1) +union pcompleter_host_int_set_u { + struct pcompleter_host_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv3:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 WO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 data_cor_err:1; /* [08:08] Default:0x0 WO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_INT_SET_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_INIT_DONE_ADDR (0xf0800c) +#define NBL_PCOMPLETER_HOST_INIT_DONE_DEPTH (1) +#define NBL_PCOMPLETER_HOST_INIT_DONE_WIDTH (32) +#define NBL_PCOMPLETER_HOST_INIT_DONE_DWLEN (1) +union pcompleter_host_init_done_u { + struct pcompleter_host_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CIF_ERR_INFO_ADDR (0xf08040) +#define NBL_PCOMPLETER_HOST_CIF_ERR_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CIF_ERR_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CIF_ERR_INFO_DWLEN (1) +union pcompleter_host_cif_err_info_u { + struct pcompleter_host_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CAR_CTRL_ADDR (0xf08100) +#define NBL_PCOMPLETER_HOST_CAR_CTRL_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CAR_CTRL_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CAR_CTRL_DWLEN (1) +union pcompleter_host_car_ctrl_u { + struct pcompleter_host_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_ECPU_READY_ADDR (0xf090a0) +#define NBL_PCOMPLETER_HOST_ECPU_READY_DEPTH (1) +#define NBL_PCOMPLETER_HOST_ECPU_READY_WIDTH (32) +#define NBL_PCOMPLETER_HOST_ECPU_READY_DWLEN (1) +union pcompleter_host_ecpu_ready_u { + struct pcompleter_host_ecpu_ready { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_ECPU_READY_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_ECPU_STATUS_ADDR (0xf090a4) +#define NBL_PCOMPLETER_HOST_ECPU_STATUS_DEPTH (1) +#define NBL_PCOMPLETER_HOST_ECPU_STATUS_WIDTH (32) +#define NBL_PCOMPLETER_HOST_ECPU_STATUS_DWLEN (1) +union pcompleter_host_ecpu_status_u { + struct pcompleter_host_ecpu_status { + u32 dbg:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_ECPU_STATUS_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_TIMES_ADDR (0xf090a8) +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_TIMES_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_TIMES_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_TIMES_DWLEN (1) +union pcompleter_host_cfg_heartbeat_times_u { + struct pcompleter_host_cfg_heartbeat_times { + u32 cnt:31; /* [30:0] Default:0x0 RW */ + u32 vld:1; /* [31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_TIMES_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_ENABLE_ADDR (0xf090ac) +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_ENABLE_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_ENABLE_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_ENABLE_DWLEN (1) +union pcompleter_host_cfg_heartbeat_enable_u { + struct pcompleter_host_cfg_heartbeat_enable { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_ENABLE_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_HEARTBEAT_TIMES_ADDR (0xf090b0) +#define NBL_PCOMPLETER_HOST_HEARTBEAT_TIMES_DEPTH (1) +#define NBL_PCOMPLETER_HOST_HEARTBEAT_TIMES_WIDTH (32) +#define NBL_PCOMPLETER_HOST_HEARTBEAT_TIMES_DWLEN (1) +union pcompleter_host_heartbeat_times_u { + struct pcompleter_host_heartbeat_times { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_HEARTBEAT_TIMES_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_DATA_BIG_END_ADDR (0xf090b4) +#define NBL_PCOMPLETER_HOST_DATA_BIG_END_DEPTH (1) +#define NBL_PCOMPLETER_HOST_DATA_BIG_END_WIDTH (32) +#define NBL_PCOMPLETER_HOST_DATA_BIG_END_DWLEN (1) +union pcompleter_host_data_big_end_u { + struct pcompleter_host_data_big_end { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_DATA_BIG_END_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MSG_ADDR_SIZE_ADDR (0xf090b8) +#define NBL_PCOMPLETER_HOST_MSG_ADDR_SIZE_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MSG_ADDR_SIZE_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSG_ADDR_SIZE_DWLEN (1) +union pcompleter_host_msg_addr_size_u { + struct pcompleter_host_msg_addr_size { + u32 dbg:32; /* [31:0] Default:0x100000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSG_ADDR_SIZE_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CIF_AXI_BASE_ADDR_ADDR (0xf090bc) +#define NBL_PCOMPLETER_HOST_CIF_AXI_BASE_ADDR_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CIF_AXI_BASE_ADDR_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CIF_AXI_BASE_ADDR_DWLEN (1) +union pcompleter_host_cif_axi_base_addr_u { + struct pcompleter_host_cif_axi_base_addr { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CIF_AXI_BASE_ADDR_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CIF_UP_BASE_ADDR_ADDR (0xf090c0) +#define NBL_PCOMPLETER_HOST_CIF_UP_BASE_ADDR_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CIF_UP_BASE_ADDR_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CIF_UP_BASE_ADDR_DWLEN (1) +union pcompleter_host_cif_up_base_addr_u { + struct pcompleter_host_cif_up_base_addr { + u32 dbg:32; /* [31:0] Default:0xfac000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CIF_UP_BASE_ADDR_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MSG_AXI_BASE_ADDR_ADDR (0xf090c4) +#define NBL_PCOMPLETER_HOST_MSG_AXI_BASE_ADDR_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MSG_AXI_BASE_ADDR_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSG_AXI_BASE_ADDR_DWLEN (1) +union pcompleter_host_msg_axi_base_addr_u { + struct pcompleter_host_msg_axi_base_addr { + u32 dbg:32; /* [31:0] Default:0x60000000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSG_AXI_BASE_ADDR_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MSG_SWITCH_ADDR (0xf090c8) +#define NBL_PCOMPLETER_HOST_MSG_SWITCH_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MSG_SWITCH_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSG_SWITCH_DWLEN (1) +union pcompleter_host_msg_switch_u { + struct pcompleter_host_msg_switch { + u32 dbg:4; /* [3:0] Default:0x9 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSG_SWITCH_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MBX_SOFT_MODE_ADDR (0xf090cc) +#define NBL_PCOMPLETER_HOST_MBX_SOFT_MODE_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MBX_SOFT_MODE_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MBX_SOFT_MODE_DWLEN (1) +union pcompleter_host_mbx_soft_mode_u { + struct pcompleter_host_mbx_soft_mode { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MBX_SOFT_MODE_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MSG_CLR_INT_ADDR (0xf09104) +#define NBL_PCOMPLETER_HOST_MSG_CLR_INT_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MSG_CLR_INT_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSG_CLR_INT_DWLEN (1) +union pcompleter_host_msg_clr_int_u { + struct pcompleter_host_msg_clr_int { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSG_CLR_INT_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_AXI_READY_ADDR (0xf09108) +#define NBL_PCOMPLETER_HOST_AXI_READY_DEPTH (1) +#define NBL_PCOMPLETER_HOST_AXI_READY_WIDTH (32) +#define NBL_PCOMPLETER_HOST_AXI_READY_DWLEN (1) +union pcompleter_host_axi_ready_u { + struct pcompleter_host_axi_ready { + u32 overtime:32; /* [31:0] Default:0x5fffff RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_AXI_READY_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_READY_ADDR (0xf0910c) +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_READY_DEPTH (1) +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_READY_WIDTH (32) +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_READY_DWLEN (1) +union pcompleter_host_virtio_table_ready_u { + struct pcompleter_host_virtio_table_ready { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_VIRTIO_TABLE_READY_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_SELECT_ADDR (0xf09110) +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_SELECT_DEPTH (1) +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_SELECT_WIDTH (32) +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_SELECT_DWLEN (1) +union pcompleter_host_virtio_table_select_u { + struct pcompleter_host_virtio_table_select { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_VIRTIO_TABLE_SELECT_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_READY_ADDR (0xf09114) +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_READY_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_READY_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_READY_DWLEN (1) +union pcompleter_host_rdma_table_ready_u { + struct pcompleter_host_rdma_table_ready { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_TABLE_READY_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_SELECT_ADDR (0xf09118) +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_SELECT_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_SELECT_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_SELECT_DWLEN (1) +union pcompleter_host_rdma_table_select_u { + struct pcompleter_host_rdma_table_select { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_TABLE_SELECT_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_OPROM_OFFSET_ADDR (0xf0911c) +#define NBL_PCOMPLETER_HOST_CFG_OPROM_OFFSET_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_OPROM_OFFSET_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_OPROM_OFFSET_DWLEN (1) +union pcompleter_host_cfg_oprom_offset_u { + struct pcompleter_host_cfg_oprom_offset { + u32 addr:32; /* [31:00] Default:0x4000000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_OPROM_OFFSET_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR0_ADDR (0xf09120) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR0_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR0_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR0_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr0_u { + struct pcompleter_host_cfg_rdma_base_addr0 { + u32 dbg:32; /* [31:00] Default:0x01110000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR0_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR1_ADDR (0xf09124) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR1_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR1_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR1_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr1_u { + struct pcompleter_host_cfg_rdma_base_addr1 { + u32 dbg:32; /* [31:00] Default:0x00400000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR1_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR2_ADDR (0xf09128) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR2_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR2_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR2_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr2_u { + struct pcompleter_host_cfg_rdma_base_addr2 { + u32 dbg:32; /* [31:00] Default:0x011A0000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR2_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR3_ADDR (0xf0912c) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR3_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR3_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR3_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr3_u { + struct pcompleter_host_cfg_rdma_base_addr3 { + u32 dbg:32; /* [31:00] Default:0x011A0000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR3_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR4_ADDR (0xf09130) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR4_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR4_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR4_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr4_u { + struct pcompleter_host_cfg_rdma_base_addr4 { + u32 dbg:32; /* [31:00] Default:0x011A0000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR4_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR5_ADDR (0xf09134) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR5_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR5_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR5_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr5_u { + struct pcompleter_host_cfg_rdma_base_addr5 { + u32 dbg:32; /* [31:00] Default:0x011A0000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR5_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR6_ADDR (0xf09138) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR6_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR6_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR6_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr6_u { + struct pcompleter_host_cfg_rdma_base_addr6 { + u32 dbg:32; /* [31:00] Default:0x01110000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR6_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR7_ADDR (0xf0913c) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR7_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR7_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR7_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr7_u { + struct pcompleter_host_cfg_rdma_base_addr7 { + u32 dbg:32; /* [31:00] Default:0x01110000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR7_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_MAILBOX_OFFSET_ADDR (0xf09140) +#define NBL_PCOMPLETER_HOST_CFG_MAILBOX_OFFSET_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_MAILBOX_OFFSET_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_MAILBOX_OFFSET_DWLEN (1) +union pcompleter_host_cfg_mailbox_offset_u { + struct pcompleter_host_cfg_mailbox_offset { + u32 addr:32; /* [31:00] Default:0xFB2000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_MAILBOX_OFFSET_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_MSIX_OFFSET_ADDR (0xf09144) +#define NBL_PCOMPLETER_HOST_CFG_MSIX_OFFSET_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_MSIX_OFFSET_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_MSIX_OFFSET_DWLEN (1) +union pcompleter_host_cfg_msix_offset_u { + struct pcompleter_host_cfg_msix_offset { + u32 addr:32; /* [31:00] Default:0xF6C000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_MSIX_OFFSET_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_MSIX_INVLD_OFFSET_ADDR (0xf09148) +#define NBL_PCOMPLETER_HOST_CFG_MSIX_INVLD_OFFSET_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_MSIX_INVLD_OFFSET_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_MSIX_INVLD_OFFSET_DWLEN (1) +union pcompleter_host_cfg_msix_invld_offset_u { + struct pcompleter_host_cfg_msix_invld_offset { + u32 addr:32; /* [31:00] Default:0xF4C300 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_MSIX_INVLD_OFFSET_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_PBA_OFFSET_ADDR (0xf0914c) +#define NBL_PCOMPLETER_HOST_CFG_PBA_OFFSET_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_PBA_OFFSET_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_PBA_OFFSET_DWLEN (1) +union pcompleter_host_cfg_pba_offset_u { + struct pcompleter_host_cfg_pba_offset { + u32 addr:32; /* [31:00] Default:0xF4D000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_PBA_OFFSET_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MSG_PTR_ADDR (0xf09154) +#define NBL_PCOMPLETER_HOST_MSG_PTR_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MSG_PTR_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSG_PTR_DWLEN (1) +union pcompleter_host_msg_ptr_u { + struct pcompleter_host_msg_ptr { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSG_PTR_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_OPROM_DEBUG_ADDR (0xf09160) +#define NBL_PCOMPLETER_HOST_OPROM_DEBUG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_OPROM_DEBUG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_OPROM_DEBUG_DWLEN (1) +union pcompleter_host_oprom_debug_u { + struct pcompleter_host_oprom_debug { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_OPROM_DEBUG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_CMDQ_ADDR (0xf09170) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_CMDQ_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_CMDQ_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_CMDQ_DWLEN (1) +union pcompleter_host_cfg_funtion_id_cmdq_u { + struct pcompleter_host_cfg_funtion_id_cmdq { + u32 dbg:10; /* [9:0] Default:0x0 RW */ + u32 vld:1; /* [10] Default:0x0 RW */ + u32 Rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_CMDQ_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_CTRLQ_ADDR (0xf09174) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_CTRLQ_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_CTRLQ_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_CTRLQ_DWLEN (1) +union pcompleter_host_cfg_funtion_id_ctrlq_u { + struct pcompleter_host_cfg_funtion_id_ctrlq { + u32 dbg:10; /* [9:0] Default:0x0 RW */ + u32 vld:1; /* [10] Default:0x0 RW */ + u32 Rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_CTRLQ_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_MSGQ_AGED_ADDR (0xf09178) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_MSGQ_AGED_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_MSGQ_AGED_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_MSGQ_AGED_DWLEN (1) +union pcompleter_host_cfg_funtion_id_msgq_aged_u { + struct pcompleter_host_cfg_funtion_id_msgq_aged { + u32 dbg:10; /* [9:0] Default:0x0 RW */ + u32 vld:1; /* [10] Default:0x0 RW */ + u32 Rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_MSGQ_AGED_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_MSGQ_ADDR (0xf0917c) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_MSGQ_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_MSGQ_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_MSGQ_DWLEN (1) +union pcompleter_host_cfg_funtion_id_msgq_u { + struct pcompleter_host_cfg_funtion_id_msgq { + u32 dbg:10; /* [9:0] Default:0x0 RW */ + u32 vld:1; /* [10] Default:0x0 RW */ + u32 Rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_MSGQ_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_VDPA_NET_ADDR (0xf09180) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_VDPA_NET_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_VDPA_NET_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_VDPA_NET_DWLEN (1) +union pcompleter_host_cfg_funtion_id_vdpa_net_u { + struct pcompleter_host_cfg_funtion_id_vdpa_net { + u32 dbg:10; /* [9:0] Default:0x0 RW */ + u32 vld:1; /* [10] Default:0x0 RW */ + u32 Rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_VDPA_NET_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_VDPA_BLK_ADDR (0xf09184) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_VDPA_BLK_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_VDPA_BLK_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_VDPA_BLK_DWLEN (1) +union pcompleter_host_cfg_funtion_id_vdpa_blk_u { + struct pcompleter_host_cfg_funtion_id_vdpa_blk { + u32 dbg:10; /* [9:0] Default:0x0 RW */ + u32 vld:1; /* [10] Default:0x0 RW */ + u32 Rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_VDPA_BLK_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_ERROR_ADDR (0xf0a000) +#define NBL_PCOMPLETER_HOST_ERROR_DEPTH (1) +#define NBL_PCOMPLETER_HOST_ERROR_WIDTH (32) +#define NBL_PCOMPLETER_HOST_ERROR_DWLEN (1) +union pcompleter_host_error_u { + struct pcompleter_host_error { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_ERROR_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_WARNING_ADDR (0xf0a004) +#define NBL_PCOMPLETER_HOST_WARNING_DEPTH (1) +#define NBL_PCOMPLETER_HOST_WARNING_WIDTH (32) +#define NBL_PCOMPLETER_HOST_WARNING_DWLEN (1) +union pcompleter_host_warning_u { + struct pcompleter_host_warning { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_WARNING_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_ADDR (0xf0a048) +#define NBL_PCOMPLETER_HOST_CFG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_DWLEN (1) +union pcompleter_host_cfg_u { + struct pcompleter_host_cfg { + u32 dmux_fifo_drop_err:1; /* [0] Default:0x0 RC */ + u32 tlp_out_drop_err:1; /* [1] Default:0x0 RC */ + u32 cif_axi_werr:1; /* [2] Default:0x0 RC */ + u32 cif_axi_rerr:1; /* [3] Default:0x0 RC */ + u32 cif_axi_ready_err:1; /* [4] Default:0x0 RC */ + u32 msg_axi_werr:1; /* [5] Default:0x0 RC */ + u32 msg_axi_ready_err:1; /* [6] Default:0x0 RC */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_DMUX_DEBUG_INFO_ADDR (0xf0a204) +#define NBL_PCOMPLETER_HOST_DMUX_DEBUG_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_DMUX_DEBUG_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_DMUX_DEBUG_INFO_DWLEN (1) +union pcompleter_host_dmux_debug_info_u { + struct pcompleter_host_dmux_debug_info { + u32 dbg:32; /* [31:0] Default:0x1 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_DMUX_DEBUG_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_L1_MAP_RAM_ERR_INFO_REG_ADDR (0xf0a208) +#define NBL_PCOMPLETER_HOST_L1_MAP_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_L1_MAP_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_L1_MAP_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_l1_map_ram_err_info_reg_u { + struct pcompleter_host_l1_map_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_L1_MAP_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_L2_MAP_RAM_ERR_INFO_REG_ADDR (0xf0a20c) +#define NBL_PCOMPLETER_HOST_L2_MAP_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_L2_MAP_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_L2_MAP_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_l2_map_ram_err_info_reg_u { + struct pcompleter_host_l2_map_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_L2_MAP_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_L3_MAP_RAM_ERR_INFO_REG_ADDR (0xf0a210) +#define NBL_PCOMPLETER_HOST_L3_MAP_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_L3_MAP_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_L3_MAP_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_l3_map_ram_err_info_reg_u { + struct pcompleter_host_l3_map_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_L3_MAP_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_TLP_INFO_RAM_ERR_INFO_REG_ADDR (0xf0a214) +#define NBL_PCOMPLETER_HOST_TLP_INFO_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_TLP_INFO_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_TLP_INFO_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_tlp_info_ram_err_info_reg_u { + struct pcompleter_host_tlp_info_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_TLP_INFO_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_NOTIFY_INFO_RAM_ERR_INFO_REG_ADDR (0xf0a218) +#define NBL_PCOMPLETER_HOST_NOTIFY_INFO_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_NOTIFY_INFO_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_NOTIFY_INFO_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_notify_info_ram_err_info_reg_u { + struct pcompleter_host_notify_info_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_NOTIFY_INFO_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_DEBUG_INFO_ADDR (0xf0a304) +#define NBL_PCOMPLETER_HOST_RDMA_DEBUG_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_DEBUG_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_DEBUG_INFO_DWLEN (1) +union pcompleter_host_rdma_debug_info_u { + struct pcompleter_host_rdma_debug_info { + u32 dbg:32; /* [31:0] Default:0x1 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_DEBUG_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_L1_MAP_RAM_ERR_INFO_REG_ADDR (0xf0a308) +#define NBL_PCOMPLETER_HOST_RDMA_L1_MAP_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_L1_MAP_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_L1_MAP_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_rdma_l1_map_ram_err_info_reg_u { + struct pcompleter_host_rdma_l1_map_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_L1_MAP_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_L2_MAP_RAM_ERR_INFO_REG_ADDR (0xf0a30c) +#define NBL_PCOMPLETER_HOST_RDMA_L2_MAP_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_L2_MAP_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_L2_MAP_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_rdma_l2_map_ram_err_info_reg_u { + struct pcompleter_host_rdma_l2_map_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_L2_MAP_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_L3_MAP_RAM_ERR_INFO_REG_ADDR (0xf0a310) +#define NBL_PCOMPLETER_HOST_RDMA_L3_MAP_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_L3_MAP_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_L3_MAP_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_rdma_l3_map_ram_err_info_reg_u { + struct pcompleter_host_rdma_l3_map_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_L3_MAP_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_TLP_INFO_RAM_ERR_INFO_REG_ADDR (0xf0a314) +#define NBL_PCOMPLETER_HOST_RDMA_TLP_INFO_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_TLP_INFO_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_TLP_INFO_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_rdma_tlp_info_ram_err_info_reg_u { + struct pcompleter_host_rdma_tlp_info_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_TLP_INFO_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_NOTIFY_INFO_RAM_ERR_INFO_REG_ADDR (0xf0a318) +#define NBL_PCOMPLETER_HOST_RDMA_NOTIFY_INFO_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_NOTIFY_INFO_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_NOTIFY_INFO_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_rdma_notify_info_ram_err_info_reg_u { + struct pcompleter_host_rdma_notify_info_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_NOTIFY_INFO_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_JUDGE_MSIX_FID_RAM_ERR_INFO_ADDR (0xf0a434) +#define NBL_PCOMPLETER_HOST_JUDGE_MSIX_FID_RAM_ERR_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_JUDGE_MSIX_FID_RAM_ERR_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_JUDGE_MSIX_FID_RAM_ERR_INFO_DWLEN (1) +union pcompleter_host_judge_msix_fid_ram_err_info_u { + struct pcompleter_host_judge_msix_fid_ram_err_info { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_JUDGE_MSIX_FID_RAM_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_PMSIX_DEBUG_INFO_ADDR (0xf0a500) +#define NBL_PCOMPLETER_HOST_PMSIX_DEBUG_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_PMSIX_DEBUG_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_PMSIX_DEBUG_INFO_DWLEN (1) +union pcompleter_host_pmsix_debug_info_u { + struct pcompleter_host_pmsix_debug_info { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_PMSIX_DEBUG_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_PMSIX_MAP_RAM_ERR_INFO_ADDR (0xf0a51c) +#define NBL_PCOMPLETER_HOST_PMSIX_MAP_RAM_ERR_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_PMSIX_MAP_RAM_ERR_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_PMSIX_MAP_RAM_ERR_INFO_DWLEN (1) +union pcompleter_host_pmsix_map_ram_err_info_u { + struct pcompleter_host_pmsix_map_ram_err_info { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_PMSIX_MAP_RAM_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MUX_DEBUG_INFO_ADDR (0xf0a700) +#define NBL_PCOMPLETER_HOST_MUX_DEBUG_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MUX_DEBUG_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MUX_DEBUG_INFO_DWLEN (1) +union pcompleter_host_mux_debug_info_u { + struct pcompleter_host_mux_debug_info { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MUX_DEBUG_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MUX_PORT_0_PRE_READ_DEBUG_REG_ADDR (0xf0a718) +#define NBL_PCOMPLETER_HOST_MUX_PORT_0_PRE_READ_DEBUG_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MUX_PORT_0_PRE_READ_DEBUG_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MUX_PORT_0_PRE_READ_DEBUG_REG_DWLEN (1) +union pcompleter_host_mux_port_0_pre_read_debug_reg_u { + struct pcompleter_host_mux_port_0_pre_read_debug_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MUX_PORT_0_PRE_READ_DEBUG_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MUX_PORT_1_PRE_READ_DEBUG_REG_ADDR (0xf0a71c) +#define NBL_PCOMPLETER_HOST_MUX_PORT_1_PRE_READ_DEBUG_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MUX_PORT_1_PRE_READ_DEBUG_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MUX_PORT_1_PRE_READ_DEBUG_REG_DWLEN (1) +union pcompleter_host_mux_port_1_pre_read_debug_reg_u { + struct pcompleter_host_mux_port_1_pre_read_debug_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MUX_PORT_1_PRE_READ_DEBUG_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MUX_PORT_2_PRE_READ_DEBUG_REG_ADDR (0xf0a720) +#define NBL_PCOMPLETER_HOST_MUX_PORT_2_PRE_READ_DEBUG_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MUX_PORT_2_PRE_READ_DEBUG_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MUX_PORT_2_PRE_READ_DEBUG_REG_DWLEN (1) +union pcompleter_host_mux_port_2_pre_read_debug_reg_u { + struct pcompleter_host_mux_port_2_pre_read_debug_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MUX_PORT_2_PRE_READ_DEBUG_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MUX_PORT_3_PRE_READ_DEBUG_REG_ADDR (0xf0a724) +#define NBL_PCOMPLETER_HOST_MUX_PORT_3_PRE_READ_DEBUG_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MUX_PORT_3_PRE_READ_DEBUG_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MUX_PORT_3_PRE_READ_DEBUG_REG_DWLEN (1) +union pcompleter_host_mux_port_3_pre_read_debug_reg_u { + struct pcompleter_host_mux_port_3_pre_read_debug_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MUX_PORT_3_PRE_READ_DEBUG_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CIF_AXIR_PRE_DEBUG_INFO_ADDR (0xf0a820) +#define NBL_PCOMPLETER_HOST_CIF_AXIR_PRE_DEBUG_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CIF_AXIR_PRE_DEBUG_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CIF_AXIR_PRE_DEBUG_INFO_DWLEN (1) +union pcompleter_host_cif_axir_pre_debug_info_u { + struct pcompleter_host_cif_axir_pre_debug_info { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CIF_AXIR_PRE_DEBUG_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_EMP2PCIE_RDY_ADDR (0xf0a918) +#define NBL_PCOMPLETER_HOST_EMP2PCIE_RDY_DEPTH (1) +#define NBL_PCOMPLETER_HOST_EMP2PCIE_RDY_WIDTH (32) +#define NBL_PCOMPLETER_HOST_EMP2PCIE_RDY_DWLEN (1) +union pcompleter_host_emp2pcie_rdy_u { + struct pcompleter_host_emp2pcie_rdy { + u32 rdy:1; /* [0:0] Default:0x1 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_EMP2PCIE_RDY_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_ADDR (0xf0c000) +#define NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_DEPTH (520) +#define NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_WIDTH (128) +#define NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_DWLEN (4) +union pcompleter_host_function_msix_map_table_u { + struct pcompleter_host_function_msix_map_table { + u32 msix_base_addr_l:32; /* [31:0] Default:0x0 RW */ + u32 msix_base_addr_h:32; /* [63:32] Default:0x0 RW */ + u32 bdf_id:16; /* [79:64] Default:0x0 RW */ + u32 valid:1; /* [80] Default:0x0 RW */ + u32 rsv_l:32; /* [127:81] Default:0x0 RO */ + u32 rsv_h:15; /* [127:81] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_DWLEN]; +} __packed; +#define NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_REG(r) (NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_ADDR + \ + (NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_DWLEN * 4) * (r)) + +#define NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_ADDR (0xf18000) +#define NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_DEPTH (8192) +#define NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_WIDTH (128) +#define NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_DWLEN (4) +union pcompleter_host_virtio_qid_map_table_u { + struct pcompleter_host_virtio_qid_map_table { + u32 local_qid:9; /* [8:0] Default:0x1ff RW */ + u32 bar_addr_l:32; /* [63:9] Default:0x7fffffffffffff RW */ + u32 bar_addr_h:23; /* [63:9] Default:0x7fffffffffffff RW */ + u32 global_qid:12; /* [75:64] Default:0xfff RW */ + u32 ctrlq_flag:1; /* [76] Default:0x1 RW */ + u32 rsv_l:32; /* [127:77] Default:0x0 RO */ + u32 Rsv_h:19; /* [127:77] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_DWLEN]; +} __packed; +#define NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_REG(r) (NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_ADDR + \ + (NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_DWLEN * 4) * (r)) + +#define NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_ADDR (0xf38000) +#define NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_DEPTH (128) +#define NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_WIDTH (128) +#define NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_DWLEN (4) +union pcompleter_host_rdma_pfid_map_table_u { + struct pcompleter_host_rdma_pfid_map_table { + u32 bar_add_rsv:13; /* [12:0] Default:0x1fff RO */ + u32 bar_addr_l:32; /* [63:13] Default:0x7ffffffffffff RW */ + u32 bar_addr_h:19; /* [63:13] Default:0x7ffffffffffff RW */ + u32 pfid:6; /* [69:64] Default:0x3f RW */ + u32 rsv_l:32; /* [127:70] Default:0x0 RO */ + u32 Rsv_h:26; /* [127:70] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_DWLEN]; +} __packed; +#define NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_REG(r) (NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_ADDR + \ + (NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_DWLEN * 4) * (r)) + +#define NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_ADDR (0xf40000) +#define NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_DEPTH (520) +#define NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_DWLEN (1) +union pcompleter_host_msixbar_tlp_mis_table_u { + struct pcompleter_host_msixbar_tlp_mis_table { + u32 miss:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_DWLEN]; +} __packed; +#define NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_REG(r) (NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_ADDR + \ + (NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_DWLEN * 4) * (r)) + +#define NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_ADDR (0xf41000) +#define NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_DEPTH (520) +#define NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_DWLEN (1) +union pcompleter_host_msixbar_invld_tbl_table_u { + struct pcompleter_host_msixbar_invld_tbl_table { + u32 invld:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_DWLEN]; +} __packed; +#define NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_REG(r) (NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_ADDR + \ + (NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_DWLEN * 4) * (r)) + +#define NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_ADDR (0xf42000) +#define NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_DEPTH (5120) +#define NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_DWLEN (1) +union pcompleter_host_msix_fid_table_u { + struct pcompleter_host_msix_fid_table { + u32 fid:10; /* [09:00] Default:0x0 RW */ + u32 valid:1; /* [10:10] Default:0x0 RW */ + u32 Rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_DWLEN]; +} __packed; +#define NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_REG(r) (NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_ADDR + \ + (NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_vdpa.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_vdpa.h new file mode 100644 index 0000000000000000000000000000000000000000..713a45a9257ffdd64795a4e67c75f645d045040a --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_vdpa.h @@ -0,0 +1,293 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_VDPA_H +#define NBL_VDPA_H 1 + +#include + +#define NBL_VDPA_BASE (0x00F98000) + +#define NBL_VDPA_INT_STATUS_ADDR (0xf98000) +#define NBL_VDPA_INT_STATUS_DEPTH (1) +#define NBL_VDPA_INT_STATUS_WIDTH (32) +#define NBL_VDPA_INT_STATUS_DWLEN (1) +union vdpa_int_status_u { + struct vdpa_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RWC */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RWC */ + u32 cif_err:1; /* [05:05] Default:0x0 RWC */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_VDPA_INT_MASK_ADDR (0xf98004) +#define NBL_VDPA_INT_MASK_DEPTH (1) +#define NBL_VDPA_INT_MASK_WIDTH (32) +#define NBL_VDPA_INT_MASK_DWLEN (1) +union vdpa_int_mask_u { + struct vdpa_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RW */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RW */ + u32 cif_err:1; /* [05:05] Default:0x0 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_INT_MASK_DWLEN]; +} __packed; + +#define NBL_VDPA_INT_SET_ADDR (0xf98008) +#define NBL_VDPA_INT_SET_DEPTH (1) +#define NBL_VDPA_INT_SET_WIDTH (32) +#define NBL_VDPA_INT_SET_DWLEN (1) +union vdpa_int_set_u { + struct vdpa_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 WO */ + u32 data_cor_err:1; /* [04:04] Default:0x0 WO */ + u32 cif_err:1; /* [05:05] Default:0x0 WO */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_INT_SET_DWLEN]; +} __packed; + +#define NBL_VDPA_INIT_DONE_ADDR (0xf9800c) +#define NBL_VDPA_INIT_DONE_DEPTH (1) +#define NBL_VDPA_INIT_DONE_WIDTH (32) +#define NBL_VDPA_INIT_DONE_DWLEN (1) +union vdpa_init_done_u { + struct vdpa_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_VDPA_CAR_CTRL_ADDR (0xf98058) +#define NBL_VDPA_CAR_CTRL_DEPTH (1) +#define NBL_VDPA_CAR_CTRL_WIDTH (32) +#define NBL_VDPA_CAR_CTRL_DWLEN (1) +union vdpa_car_ctrl_u { + struct vdpa_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_VDPA_FLOW_EN_ADDR (0xf9805c) +#define NBL_VDPA_FLOW_EN_DEPTH (1) +#define NBL_VDPA_FLOW_EN_WIDTH (32) +#define NBL_VDPA_FLOW_EN_DWLEN (1) +union vdpa_flow_en_u { + struct vdpa_flow_en { + u32 ivdpa_cnt_en:1; /* [00:00] Default:0x1 RW */ + u32 ovdpa_cnt_en:1; /* [01:01] Default:0x1 RW */ + u32 vdpa_drop_cnt_en:1; /* [02:02] Default:0x1 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_FLOW_EN_DWLEN]; +} __packed; + +#define NBL_VDPA_CIF_ERR_INFO_ADDR (0xf98060) +#define NBL_VDPA_CIF_ERR_INFO_DEPTH (1) +#define NBL_VDPA_CIF_ERR_INFO_WIDTH (32) +#define NBL_VDPA_CIF_ERR_INFO_DWLEN (1) +union vdpa_cif_err_info_u { + struct vdpa_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_VDPA_EN_ADDR (0xf98100) +#define NBL_VDPA_EN_DEPTH (1) +#define NBL_VDPA_EN_WIDTH (32) +#define NBL_VDPA_EN_DWLEN (1) +union vdpa_en_u { + struct vdpa_en { + u32 vdpa_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_EN_DWLEN]; +} __packed; + +#define NBL_VDPA_RING_BASE_ADDR_L_ADDR (0xf98104) +#define NBL_VDPA_RING_BASE_ADDR_L_DEPTH (1) +#define NBL_VDPA_RING_BASE_ADDR_L_WIDTH (32) +#define NBL_VDPA_RING_BASE_ADDR_L_DWLEN (1) +union vdpa_ring_base_addr_l_u { + struct vdpa_ring_base_addr_l { + u32 base_addr_l:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_VDPA_RING_BASE_ADDR_L_DWLEN]; +} __packed; + +#define NBL_VDPA_RING_BASE_ADDR_H_ADDR (0xf98108) +#define NBL_VDPA_RING_BASE_ADDR_H_DEPTH (1) +#define NBL_VDPA_RING_BASE_ADDR_H_WIDTH (32) +#define NBL_VDPA_RING_BASE_ADDR_H_DWLEN (1) +union vdpa_ring_base_addr_h_u { + struct vdpa_ring_base_addr_h { + u32 base_addr_h:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_VDPA_RING_BASE_ADDR_H_DWLEN]; +} __packed; + +#define NBL_VDPA_RING_SIZE_MASK_ADDR (0xf9810c) +#define NBL_VDPA_RING_SIZE_MASK_DEPTH (1) +#define NBL_VDPA_RING_SIZE_MASK_WIDTH (32) +#define NBL_VDPA_RING_SIZE_MASK_DWLEN (1) +union vdpa_ring_size_mask_u { + struct vdpa_ring_size_mask { + u32 size_mask:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_RING_SIZE_MASK_DWLEN]; +} __packed; + +#define NBL_VDPA_RING_TPNTR_ADDR (0xf98110) +#define NBL_VDPA_RING_TPNTR_DEPTH (1) +#define NBL_VDPA_RING_TPNTR_WIDTH (32) +#define NBL_VDPA_RING_TPNTR_DWLEN (1) +union vdpa_ring_tpntr_u { + struct vdpa_ring_tpntr { + u32 tpntr:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_RING_TPNTR_DWLEN]; +} __packed; + +#define NBL_VDPA_RING_HPNTR_ADDR (0xf98114) +#define NBL_VDPA_RING_HPNTR_DEPTH (1) +#define NBL_VDPA_RING_HPNTR_WIDTH (32) +#define NBL_VDPA_RING_HPNTR_DWLEN (1) +union vdpa_ring_hpntr_u { + struct vdpa_ring_hpntr { + u32 hpntr:16; /* [15:00] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_RING_HPNTR_DWLEN]; +} __packed; + +#define NBL_VDPA_RING_HPNTR_RST_ADDR (0xf98118) +#define NBL_VDPA_RING_HPNTR_RST_DEPTH (1) +#define NBL_VDPA_RING_HPNTR_RST_WIDTH (32) +#define NBL_VDPA_RING_HPNTR_RST_DWLEN (1) +union vdpa_ring_hpntr_rst_u { + struct vdpa_ring_hpntr_rst { + u32 hpntr_rst:1; /* [00:00] Default:0x0 WO */ + u32 rdy:1; /* [01:01] Default:0x1 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_RING_HPNTR_RST_DWLEN]; +} __packed; + +#define NBL_VDPA_BURST_LEN_ADDR (0xf9811c) +#define NBL_VDPA_BURST_LEN_DEPTH (1) +#define NBL_VDPA_BURST_LEN_WIDTH (32) +#define NBL_VDPA_BURST_LEN_DWLEN (1) +union vdpa_burst_len_u { + struct vdpa_burst_len { + u32 burst_len:6; /* [05:00] Default:0x10 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_BURST_LEN_DWLEN]; +} __packed; + +#define NBL_VDPA_TIMEOUT_VALUE_ADDR (0xf98120) +#define NBL_VDPA_TIMEOUT_VALUE_DEPTH (1) +#define NBL_VDPA_TIMEOUT_VALUE_WIDTH (32) +#define NBL_VDPA_TIMEOUT_VALUE_DWLEN (1) +union vdpa_timeout_value_u { + struct vdpa_timeout_value { + u32 timeout_value:32; /* [31:00] Default:0x190 RW */ + } __packed info; + u32 data[NBL_VDPA_TIMEOUT_VALUE_DWLEN]; +} __packed; + +#define NBL_VDPA_DIF_MODE_ADDR (0xf98124) +#define NBL_VDPA_DIF_MODE_DEPTH (1) +#define NBL_VDPA_DIF_MODE_WIDTH (32) +#define NBL_VDPA_DIF_MODE_DWLEN (1) +union vdpa_dif_mode_u { + struct vdpa_dif_mode { + u32 dif_mode:3; /* [02:00] Default:0x2 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_DIF_MODE_DWLEN]; +} __packed; + +#define NBL_VDPA_DIF_INFO_ADDR (0xf98128) +#define NBL_VDPA_DIF_INFO_DEPTH (1) +#define NBL_VDPA_DIF_INFO_WIDTH (32) +#define NBL_VDPA_DIF_INFO_DWLEN (1) +union vdpa_dif_info_u { + struct vdpa_dif_info { + u32 dif_info:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_DIF_INFO_DWLEN]; +} __packed; + +#define NBL_VDPA_DIF_BDF_ADDR (0xf9812c) +#define NBL_VDPA_DIF_BDF_DEPTH (1) +#define NBL_VDPA_DIF_BDF_WIDTH (32) +#define NBL_VDPA_DIF_BDF_DWLEN (1) +union vdpa_dif_bdf_u { + struct vdpa_dif_bdf { + u32 dif_bdf:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_DIF_BDF_DWLEN]; +} __packed; + +#define NBL_VDPA_DIF_INT_ADDR (0xf98130) +#define NBL_VDPA_DIF_INT_DEPTH (1) +#define NBL_VDPA_DIF_INT_WIDTH (32) +#define NBL_VDPA_DIF_INT_DWLEN (1) +union vdpa_dif_int_u { + struct vdpa_dif_int { + u32 dif_int:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_DIF_INT_DWLEN]; +} __packed; + +#define NBL_VDPA_MDL_INFO_ADDR (0xf98134) +#define NBL_VDPA_MDL_INFO_DEPTH (1) +#define NBL_VDPA_MDL_INFO_WIDTH (32) +#define NBL_VDPA_MDL_INFO_DWLEN (1) +union vdpa_mdl_info_u { + struct vdpa_mdl_info { + u32 verison_id:16; /* [15:00] Default:0x0001 RO */ + u32 prj_id:16; /* [31:16] Default:0x0020 RO */ + } __packed info; + u32 data[NBL_VDPA_MDL_INFO_DWLEN]; +} __packed; + +#define NBL_VDPA_VERSION_ADDR (0xf98138) +#define NBL_VDPA_VERSION_DEPTH (1) +#define NBL_VDPA_VERSION_WIDTH (32) +#define NBL_VDPA_VERSION_DWLEN (1) +union vdpa_version_u { + struct vdpa_version { + u32 date:32; /* [31:00] Default:0x20220615 RO */ + } __packed info; + u32 data[NBL_VDPA_VERSION_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe.h new file mode 100644 index 0000000000000000000000000000000000000000..05c939d96ceea3fa5e9e18ff3fbf642478ff4020 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe.h @@ -0,0 +1,11 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#include "nbl_ppe_ipro.h" +#include "nbl_ppe_epro.h" +#include "nbl_ppe_pp0.h" +#include "nbl_ppe_pp1.h" +#include "nbl_ppe_pp2.h" +#include "nbl_ppe_fem.h" +#include "nbl_ppe_mcc.h" +#include "nbl_ppe_acl.h" diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_acl.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_acl.h new file mode 100644 index 0000000000000000000000000000000000000000..2873e4815d3387e4972c85726c2e6331fc986e50 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_acl.h @@ -0,0 +1,2412 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_ACL_H +#define NBL_ACL_H 1 + +#include + +#define NBL_ACL_BASE (0x00B64000) + +#define NBL_ACL_INT_STATUS_ADDR (0xb64000) +#define NBL_ACL_INT_STATUS_DEPTH (1) +#define NBL_ACL_INT_STATUS_WIDTH (32) +#define NBL_ACL_INT_STATUS_DWLEN (1) +union acl_int_status_u { + struct acl_int_status { + u32 fifo_uflw_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [02:02] Default:0x0 RWC */ + u32 data_cor_err:1; /* [03:03] Default:0x0 RWC */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 set_dport_encode_cfg_err:1; /* [05:05] Default:0x0 RWC */ + u32 tcam_cor_err:1; /* [06:06] Default:0x0 RWC */ + u32 tcam_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 flow_id_err:1; /* [08:08] Default:0x0 RWC */ + u32 stat_id_conflict_int:1; /* [09:09] Default:0x0 RWC */ + u32 flow_id_conflict_int:1; /* [10:10] Default:0x0 RWC */ + u32 fsm_err:1; /* [11:11] Default:0x0 RWC */ + u32 nxt_stage_lp_cfg_err:1; /* [12:12] Default:0x0 RWC */ + u32 input_err:1; /* [13:13] Default:0x0 RWC */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_ACL_INT_MASK_ADDR (0xb64004) +#define NBL_ACL_INT_MASK_DEPTH (1) +#define NBL_ACL_INT_MASK_WIDTH (32) +#define NBL_ACL_INT_MASK_DWLEN (1) +union acl_int_mask_u { + struct acl_int_mask { + u32 fifo_uflw_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [01:01] Default:0x0 RW */ + u32 data_ucor_err:1; /* [02:02] Default:0x0 RW */ + u32 data_cor_err:1; /* [03:03] Default:0x0 RW */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 set_dport_encode_cfg_err:1; /* [05:05] Default:0x0 RW */ + u32 tcam_cor_err:1; /* [06:06] Default:0x0 RW */ + u32 tcam_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 flow_id_err:1; /* [08:08] Default:0x0 RW */ + u32 stat_id_conflict_int:1; /* [09:09] Default:0x0 RW */ + u32 flow_id_conflict_int:1; /* [10:10] Default:0x0 RW */ + u32 fsm_err:1; /* [11:11] Default:0x0 RW */ + u32 nxt_stage_lp_cfg_err:1; /* [12:12] Default:0x0 RW */ + u32 input_err:1; /* [13:13] Default:0x0 RW */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INT_MASK_DWLEN]; +} __packed; + +#define NBL_ACL_INT_SET_ADDR (0xb64008) +#define NBL_ACL_INT_SET_DEPTH (1) +#define NBL_ACL_INT_SET_WIDTH (32) +#define NBL_ACL_INT_SET_DWLEN (1) +union acl_int_set_u { + struct acl_int_set { + u32 fifo_uflw_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [01:01] Default:0x0 WO */ + u32 data_ucor_err:1; /* [02:02] Default:0x0 WO */ + u32 data_cor_err:1; /* [03:03] Default:0x0 WO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 set_dport_encode_cfg_err:1; /* [05:05] Default:0x0 WO */ + u32 tcam_cor_err:1; /* [06:06] Default:0x0 WO */ + u32 tcam_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 flow_id_err:1; /* [08:08] Default:0x0 WO */ + u32 stat_id_conflict_int:1; /* [09:09] Default:0x0 WO */ + u32 flow_id_conflict_int:1; /* [10:10] Default:0x0 WO */ + u32 fsm_err:1; /* [11:11] Default:0x0 WO */ + u32 nxt_stage_lp_cfg_err:1; /* [12:12] Default:0x0 WO */ + u32 input_err:1; /* [13:13] Default:0x0 WO */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INT_SET_DWLEN]; +} __packed; + +#define NBL_ACL_INIT_DONE_ADDR (0xb6400c) +#define NBL_ACL_INIT_DONE_DEPTH (1) +#define NBL_ACL_INIT_DONE_WIDTH (32) +#define NBL_ACL_INIT_DONE_DWLEN (1) +union acl_init_done_u { + struct acl_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_ACL_CIF_ERR_INFO_ADDR (0xb64084) +#define NBL_ACL_CIF_ERR_INFO_DEPTH (1) +#define NBL_ACL_CIF_ERR_INFO_WIDTH (32) +#define NBL_ACL_CIF_ERR_INFO_DWLEN (1) +union acl_cif_err_info_u { + struct acl_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_ACL_INIT_START_ADDR (0xb6409c) +#define NBL_ACL_INIT_START_DEPTH (1) +#define NBL_ACL_INIT_START_WIDTH (32) +#define NBL_ACL_INIT_START_DWLEN (1) +union acl_init_start_u { + struct acl_init_start { + u32 start:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INIT_START_DWLEN]; +} __packed; + +#define NBL_ACL_BYPASS_REG_ADDR (0xb64100) +#define NBL_ACL_BYPASS_REG_DEPTH (1) +#define NBL_ACL_BYPASS_REG_WIDTH (32) +#define NBL_ACL_BYPASS_REG_DWLEN (1) +union acl_bypass_reg_u { + struct acl_bypass_reg { + u32 acl_bypass:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_BYPASS_REG_DWLEN]; +} __packed; + +#define NBL_ACL_LOOP_BACK_EN_ADDR (0xb64108) +#define NBL_ACL_LOOP_BACK_EN_DEPTH (1) +#define NBL_ACL_LOOP_BACK_EN_WIDTH (32) +#define NBL_ACL_LOOP_BACK_EN_DWLEN (1) +union acl_loop_back_en_u { + struct acl_loop_back_en { + u32 loop_back_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_LOOP_BACK_EN_DWLEN]; +} __packed; + +#define NBL_ACL_LOOP_FLAG_EN_ADDR (0xb6410c) +#define NBL_ACL_LOOP_FLAG_EN_DEPTH (1) +#define NBL_ACL_LOOP_FLAG_EN_WIDTH (32) +#define NBL_ACL_LOOP_FLAG_EN_DWLEN (1) +union acl_loop_flag_en_u { + struct acl_loop_flag_en { + u32 flag_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_LOOP_FLAG_EN_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION0_ADDR (0xb64160) +#define NBL_ACL_DEFAULT_ACTION0_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION0_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION0_DWLEN (1) +union acl_default_action0_u { + struct acl_default_action0 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION0_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION1_ADDR (0xb64164) +#define NBL_ACL_DEFAULT_ACTION1_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION1_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION1_DWLEN (1) +union acl_default_action1_u { + struct acl_default_action1 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION1_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION2_ADDR (0xb64168) +#define NBL_ACL_DEFAULT_ACTION2_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION2_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION2_DWLEN (1) +union acl_default_action2_u { + struct acl_default_action2 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION2_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION3_ADDR (0xb6416c) +#define NBL_ACL_DEFAULT_ACTION3_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION3_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION3_DWLEN (1) +union acl_default_action3_u { + struct acl_default_action3 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION3_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION4_ADDR (0xb64170) +#define NBL_ACL_DEFAULT_ACTION4_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION4_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION4_DWLEN (1) +union acl_default_action4_u { + struct acl_default_action4 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION4_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION5_ADDR (0xb64174) +#define NBL_ACL_DEFAULT_ACTION5_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION5_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION5_DWLEN (1) +union acl_default_action5_u { + struct acl_default_action5 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION5_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION6_ADDR (0xb64178) +#define NBL_ACL_DEFAULT_ACTION6_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION6_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION6_DWLEN (1) +union acl_default_action6_u { + struct acl_default_action6 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION6_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION7_ADDR (0xb6417c) +#define NBL_ACL_DEFAULT_ACTION7_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION7_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION7_DWLEN (1) +union acl_default_action7_u { + struct acl_default_action7 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION7_DWLEN]; +} __packed; + +#define NBL_ACL_SET_FLAG_ADDR (0xb64200) +#define NBL_ACL_SET_FLAG_DEPTH (1) +#define NBL_ACL_SET_FLAG_WIDTH (32) +#define NBL_ACL_SET_FLAG_DWLEN (1) +union acl_set_flag_u { + struct acl_set_flag { + u32 set_flag0:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_SET_FLAG_DWLEN]; +} __packed; + +#define NBL_ACL_CLEAR_FLAG_ADDR (0xb64204) +#define NBL_ACL_CLEAR_FLAG_DEPTH (1) +#define NBL_ACL_CLEAR_FLAG_WIDTH (32) +#define NBL_ACL_CLEAR_FLAG_DWLEN (1) +union acl_clear_flag_u { + struct acl_clear_flag { + u32 clear_flag0:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_CLEAR_FLAG_DWLEN]; +} __packed; + +#define NBL_ACL_SET_FLAG0_ADDR (0xb64208) +#define NBL_ACL_SET_FLAG0_DEPTH (1) +#define NBL_ACL_SET_FLAG0_WIDTH (32) +#define NBL_ACL_SET_FLAG0_DWLEN (1) +union acl_set_flag0_u { + struct acl_set_flag0 { + u32 set_flag0:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_SET_FLAG0_DWLEN]; +} __packed; + +#define NBL_ACL_CLEAR_FLAG0_ADDR (0xb6420c) +#define NBL_ACL_CLEAR_FLAG0_DEPTH (1) +#define NBL_ACL_CLEAR_FLAG0_WIDTH (32) +#define NBL_ACL_CLEAR_FLAG0_DWLEN (1) +union acl_clear_flag0_u { + struct acl_clear_flag0 { + u32 clear_flag0:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_CLEAR_FLAG0_DWLEN]; +} __packed; + +#define NBL_ACL_DPORT_CFG_ADDR (0xb64220) +#define NBL_ACL_DPORT_CFG_DEPTH (1) +#define NBL_ACL_DPORT_CFG_WIDTH (32) +#define NBL_ACL_DPORT_CFG_DWLEN (1) +union acl_dport_cfg_u { + struct acl_dport_cfg { + u32 act_id:6; /* [05:00] Default:0x9 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DPORT_CFG_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY0_ADDR (0xb64230) +#define NBL_ACL_ACTION_PRIORITY0_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY0_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY0_DWLEN (1) +union acl_action_priority0_u { + struct acl_action_priority0 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY0_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY1_ADDR (0xb64234) +#define NBL_ACL_ACTION_PRIORITY1_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY1_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY1_DWLEN (1) +union acl_action_priority1_u { + struct acl_action_priority1 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY1_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY2_ADDR (0xb64238) +#define NBL_ACL_ACTION_PRIORITY2_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY2_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY2_DWLEN (1) +union acl_action_priority2_u { + struct acl_action_priority2 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY2_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY3_ADDR (0xb6423c) +#define NBL_ACL_ACTION_PRIORITY3_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY3_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY3_DWLEN (1) +union acl_action_priority3_u { + struct acl_action_priority3 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY3_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY4_ADDR (0xb64240) +#define NBL_ACL_ACTION_PRIORITY4_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY4_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY4_DWLEN (1) +union acl_action_priority4_u { + struct acl_action_priority4 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY4_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY5_ADDR (0xb64244) +#define NBL_ACL_ACTION_PRIORITY5_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY5_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY5_DWLEN (1) +union acl_action_priority5_u { + struct acl_action_priority5 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY5_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY6_ADDR (0xb64248) +#define NBL_ACL_ACTION_PRIORITY6_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY6_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY6_DWLEN (1) +union acl_action_priority6_u { + struct acl_action_priority6 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY6_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY7_ADDR (0xb6424c) +#define NBL_ACL_ACTION_PRIORITY7_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY7_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY7_DWLEN (1) +union acl_action_priority7_u { + struct acl_action_priority7 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY7_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_MASK_ADDR_ADDR (0xb64280) +#define NBL_ACL_TCAM_MASK_ADDR_DEPTH (1) +#define NBL_ACL_TCAM_MASK_ADDR_WIDTH (32) +#define NBL_ACL_TCAM_MASK_ADDR_DWLEN (1) +union acl_tcam_mask_addr_u { + struct acl_tcam_mask_addr { + u32 addr0:9; /* [08:00] Default:0x0 RW */ + u32 addr0_en:1; /* [09:09] Default:0x0 RW */ + u32 addr1:9; /* [18:10] Default:0x0 RW */ + u32 addr1_en:1; /* [19:19] Default:0x0 RW */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_MASK_ADDR_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_MASK_BTM_ADDR (0xb64284) +#define NBL_ACL_TCAM_MASK_BTM_DEPTH (1) +#define NBL_ACL_TCAM_MASK_BTM_WIDTH (32) +#define NBL_ACL_TCAM_MASK_BTM_DWLEN (1) +union acl_tcam_mask_btm_u { + struct acl_tcam_mask_btm { + u32 btm:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_MASK_BTM_DWLEN]; +} __packed; + +#define NBL_ACL_CAP_ADDR (0xb64288) +#define NBL_ACL_CAP_DEPTH (1) +#define NBL_ACL_CAP_WIDTH (32) +#define NBL_ACL_CAP_DWLEN (1) +union acl_cap_u { + struct acl_cap { + u32 onloop_cap_mode:1; /* [00:00] Default:0x0 RW */ + u32 noloop_cap_start:1; /* [01:01] Default:0x0 WO */ + u32 loop_cap_mode:1; /* [02:02] Default:0x0 RW */ + u32 loop_cap_start:1; /* [03:03] Default:0x0 WO */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_CAP_DWLEN]; +} __packed; + +#define NBL_ACL_FLOW_ID_STAT_ACT_ADDR (0xb64300) +#define NBL_ACL_FLOW_ID_STAT_ACT_DEPTH (1) +#define NBL_ACL_FLOW_ID_STAT_ACT_WIDTH (32) +#define NBL_ACL_FLOW_ID_STAT_ACT_DWLEN (1) +union acl_flow_id_stat_act_u { + struct acl_flow_id_stat_act { + u32 flow_id_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_FLOW_ID_STAT_ACT_DWLEN]; +} __packed; + +#define NBL_ACL_FLOW_ID_STAT_GLB_CLR_ADDR (0xb64304) +#define NBL_ACL_FLOW_ID_STAT_GLB_CLR_DEPTH (1) +#define NBL_ACL_FLOW_ID_STAT_GLB_CLR_WIDTH (32) +#define NBL_ACL_FLOW_ID_STAT_GLB_CLR_DWLEN (1) +union acl_flow_id_stat_glb_clr_u { + struct acl_flow_id_stat_glb_clr { + u32 glb_clr:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_FLOW_ID_STAT_GLB_CLR_DWLEN]; +} __packed; + +#define NBL_ACL_FLOW_ID_STAT_RD_CLR_ADDR (0xb64308) +#define NBL_ACL_FLOW_ID_STAT_RD_CLR_DEPTH (1) +#define NBL_ACL_FLOW_ID_STAT_RD_CLR_WIDTH (32) +#define NBL_ACL_FLOW_ID_STAT_RD_CLR_DWLEN (1) +union acl_flow_id_stat_rd_clr_u { + struct acl_flow_id_stat_rd_clr { + u32 cpu_rd_clr:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_FLOW_ID_STAT_RD_CLR_DWLEN]; +} __packed; + +#define NBL_ACL_FLOW_ID_STAT_DONE_ADDR (0xb64310) +#define NBL_ACL_FLOW_ID_STAT_DONE_DEPTH (1) +#define NBL_ACL_FLOW_ID_STAT_DONE_WIDTH (32) +#define NBL_ACL_FLOW_ID_STAT_DONE_DWLEN (1) +union acl_flow_id_stat_done_u { + struct acl_flow_id_stat_done { + u32 glb_clr_done:1; /* [00:00] Default:0x0 RO */ + u32 stat_init_done:1; /* [01:01] Default:0x0 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_FLOW_ID_STAT_DONE_DWLEN]; +} __packed; + +#define NBL_ACL_SCAN_TH_ADDR (0xb64318) +#define NBL_ACL_SCAN_TH_DEPTH (1) +#define NBL_ACL_SCAN_TH_WIDTH (32) +#define NBL_ACL_SCAN_TH_DWLEN (1) +union acl_scan_th_u { + struct acl_scan_th { + u32 scan_th:10; /* [09:00] Default:0xff RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_SCAN_TH_DWLEN]; +} __packed; + +#define NBL_ACL_SCAN_EN_ADDR (0xb6431c) +#define NBL_ACL_SCAN_EN_DEPTH (1) +#define NBL_ACL_SCAN_EN_WIDTH (32) +#define NBL_ACL_SCAN_EN_DWLEN (1) +union acl_scan_en_u { + struct acl_scan_en { + u32 scan_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_SCAN_EN_DWLEN]; +} __packed; + +#define NBL_ACL_STAT_ID_STAT_GLB_CLR_ADDR (0xb64320) +#define NBL_ACL_STAT_ID_STAT_GLB_CLR_DEPTH (1) +#define NBL_ACL_STAT_ID_STAT_GLB_CLR_WIDTH (32) +#define NBL_ACL_STAT_ID_STAT_GLB_CLR_DWLEN (1) +union acl_stat_id_stat_glb_clr_u { + struct acl_stat_id_stat_glb_clr { + u32 glb_clr:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_STAT_ID_STAT_GLB_CLR_DWLEN]; +} __packed; + +#define NBL_ACL_STAT_ID_STAT_RD_CLR_ADDR (0xb64324) +#define NBL_ACL_STAT_ID_STAT_RD_CLR_DEPTH (1) +#define NBL_ACL_STAT_ID_STAT_RD_CLR_WIDTH (32) +#define NBL_ACL_STAT_ID_STAT_RD_CLR_DWLEN (1) +union acl_stat_id_stat_rd_clr_u { + struct acl_stat_id_stat_rd_clr { + u32 cpu_rd_clr:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_STAT_ID_STAT_RD_CLR_DWLEN]; +} __packed; + +#define NBL_ACL_STAT_ID_STAT_DONE_ADDR (0xb64328) +#define NBL_ACL_STAT_ID_STAT_DONE_DEPTH (1) +#define NBL_ACL_STAT_ID_STAT_DONE_WIDTH (32) +#define NBL_ACL_STAT_ID_STAT_DONE_DWLEN (1) +union acl_stat_id_stat_done_u { + struct acl_stat_id_stat_done { + u32 glb_clr_done:1; /* [00:00] Default:0x0 RO */ + u32 stat_init_done:1; /* [01:01] Default:0x0 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_STAT_ID_STAT_DONE_DWLEN]; +} __packed; + +#define NBL_ACL_STAT_ID_ACT_ADDR (0xb6432c) +#define NBL_ACL_STAT_ID_ACT_DEPTH (1) +#define NBL_ACL_STAT_ID_ACT_WIDTH (32) +#define NBL_ACL_STAT_ID_ACT_DWLEN (1) +union acl_stat_id_act_u { + struct acl_stat_id_act { + u32 act_id:6; /* [05:00] Default:0x10 RW */ + u32 act_en:1; /* [06:06] Default:0x0 RW */ + u32 rsv:25; /* [31:07] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_STAT_ID_ACT_DWLEN]; +} __packed; + +#define NBL_ACL_CAR_CTRL_ADDR (0xb64410) +#define NBL_ACL_CAR_CTRL_DEPTH (1) +#define NBL_ACL_CAR_CTRL_WIDTH (32) +#define NBL_ACL_CAR_CTRL_DWLEN (1) +union acl_car_ctrl_u { + struct acl_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_ACL_IN_ADDR (0xb64600) +#define NBL_ACL_IN_DEPTH (1) +#define NBL_ACL_IN_WIDTH (32) +#define NBL_ACL_IN_DWLEN (1) +union acl_in_u { + struct acl_in { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_IN_DWLEN]; +} __packed; + +#define NBL_ACL_OUT_ADDR (0xb64608) +#define NBL_ACL_OUT_DEPTH (1) +#define NBL_ACL_OUT_WIDTH (32) +#define NBL_ACL_OUT_DWLEN (1) +union acl_out_u { + struct acl_out { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_OUT_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_SE_ADDR (0xb6461c) +#define NBL_ACL_TCAM_SE_DEPTH (1) +#define NBL_ACL_TCAM_SE_WIDTH (32) +#define NBL_ACL_TCAM_SE_DWLEN (1) +union acl_tcam_se_u { + struct acl_tcam_se { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_TCAM_SE_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR (0xb64624) +#define NBL_ACL_TCAM_HIT_DEPTH (1) +#define NBL_ACL_TCAM_HIT_WIDTH (32) +#define NBL_ACL_TCAM_HIT_DWLEN (1) +union acl_tcam_hit_u { + struct acl_tcam_hit { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR0_ADDR (0xb6462c) +#define NBL_ACL_TCAM_HIT_ADDR0_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR0_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR0_DWLEN (1) +union acl_tcam_hit_addr0_u { + struct acl_tcam_hit_addr0 { + u32 addr0:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id0:4; /* [12:09] Default:0x0 RO */ + u32 addr1:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id1:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR0_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR1_ADDR (0xb64630) +#define NBL_ACL_TCAM_HIT_ADDR1_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR1_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR1_DWLEN (1) +union acl_tcam_hit_addr1_u { + struct acl_tcam_hit_addr1 { + u32 addr2:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id2:4; /* [12:09] Default:0x0 RO */ + u32 addr3:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id3:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR1_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR2_ADDR (0xb64634) +#define NBL_ACL_TCAM_HIT_ADDR2_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR2_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR2_DWLEN (1) +union acl_tcam_hit_addr2_u { + struct acl_tcam_hit_addr2 { + u32 addr4:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id4:4; /* [12:09] Default:0x0 RO */ + u32 addr5:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id5:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR2_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR3_ADDR (0xb64638) +#define NBL_ACL_TCAM_HIT_ADDR3_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR3_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR3_DWLEN (1) +union acl_tcam_hit_addr3_u { + struct acl_tcam_hit_addr3 { + u32 addr6:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id6:4; /* [12:09] Default:0x0 RO */ + u32 addr7:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id7:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR3_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR4_ADDR (0xb6463c) +#define NBL_ACL_TCAM_HIT_ADDR4_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR4_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR4_DWLEN (1) +union acl_tcam_hit_addr4_u { + struct acl_tcam_hit_addr4 { + u32 addr8:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id8:4; /* [12:09] Default:0x0 RO */ + u32 addr9:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id9:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR4_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR5_ADDR (0xb64640) +#define NBL_ACL_TCAM_HIT_ADDR5_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR5_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR5_DWLEN (1) +union acl_tcam_hit_addr5_u { + struct acl_tcam_hit_addr5 { + u32 addr10:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id10:4; /* [12:09] Default:0x0 RO */ + u32 addr11:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id11:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR5_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR6_ADDR (0xb64644) +#define NBL_ACL_TCAM_HIT_ADDR6_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR6_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR6_DWLEN (1) +union acl_tcam_hit_addr6_u { + struct acl_tcam_hit_addr6 { + u32 addr12:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id12:4; /* [12:09] Default:0x0 RO */ + u32 addr13:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id13:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR6_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR7_ADDR (0xb64648) +#define NBL_ACL_TCAM_HIT_ADDR7_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR7_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR7_DWLEN (1) +union acl_tcam_hit_addr7_u { + struct acl_tcam_hit_addr7 { + u32 addr14:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id14:4; /* [12:09] Default:0x0 RO */ + u32 addr15:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id15:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR7_DWLEN]; +} __packed; + +#define NBL_ACL_CMP_SET_VEC_ADDR (0xb64650) +#define NBL_ACL_CMP_SET_VEC_DEPTH (1) +#define NBL_ACL_CMP_SET_VEC_WIDTH (32) +#define NBL_ACL_CMP_SET_VEC_DWLEN (1) +union acl_cmp_set_vec_u { + struct acl_cmp_set_vec { + u32 vec:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_CMP_SET_VEC_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_NOLOOP_HIT_VLD_ADDR (0xb64670) +#define NBL_ACL_TCAM_NOLOOP_HIT_VLD_DEPTH (1) +#define NBL_ACL_TCAM_NOLOOP_HIT_VLD_WIDTH (32) +#define NBL_ACL_TCAM_NOLOOP_HIT_VLD_DWLEN (1) +union acl_tcam_noloop_hit_vld_u { + struct acl_tcam_noloop_hit_vld { + u32 hit_vld:16; /* [15:00] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_NOLOOP_HIT_VLD_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_LOOP_HIT_VLD_ADDR (0xb64674) +#define NBL_ACL_TCAM_LOOP_HIT_VLD_DEPTH (1) +#define NBL_ACL_TCAM_LOOP_HIT_VLD_WIDTH (32) +#define NBL_ACL_TCAM_LOOP_HIT_VLD_DWLEN (1) +union acl_tcam_loop_hit_vld_u { + struct acl_tcam_loop_hit_vld { + u32 hit_vld:16; /* [15:00] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_LOOP_HIT_VLD_DWLEN]; +} __packed; + +#define NBL_ACL_ISE_TCAM_HIT_ADDR (0xb64680) +#define NBL_ACL_ISE_TCAM_HIT_DEPTH (1) +#define NBL_ACL_ISE_TCAM_HIT_WIDTH (32) +#define NBL_ACL_ISE_TCAM_HIT_DWLEN (1) +union acl_ise_tcam_hit_u { + struct acl_ise_tcam_hit { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_ISE_TCAM_HIT_DWLEN]; +} __packed; + +#define NBL_ACL_ISE_TCAM_NOHIT_ADDR (0xb64684) +#define NBL_ACL_ISE_TCAM_NOHIT_DEPTH (1) +#define NBL_ACL_ISE_TCAM_NOHIT_WIDTH (32) +#define NBL_ACL_ISE_TCAM_NOHIT_DWLEN (1) +union acl_ise_tcam_nohit_u { + struct acl_ise_tcam_nohit { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_ISE_TCAM_NOHIT_DWLEN]; +} __packed; + +#define NBL_ACL_LOOP_TCAM_HIT_ADDR (0xb64688) +#define NBL_ACL_LOOP_TCAM_HIT_DEPTH (1) +#define NBL_ACL_LOOP_TCAM_HIT_WIDTH (32) +#define NBL_ACL_LOOP_TCAM_HIT_DWLEN (1) +union acl_loop_tcam_hit_u { + struct acl_loop_tcam_hit { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_LOOP_TCAM_HIT_DWLEN]; +} __packed; + +#define NBL_ACL_NOLOOP_TCAM_HIT_ADDR (0xb6468c) +#define NBL_ACL_NOLOOP_TCAM_HIT_DEPTH (1) +#define NBL_ACL_NOLOOP_TCAM_HIT_WIDTH (32) +#define NBL_ACL_NOLOOP_TCAM_HIT_DWLEN (1) +union acl_noloop_tcam_hit_u { + struct acl_noloop_tcam_hit { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_NOLOOP_TCAM_HIT_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR0_ADDR (0xb64690) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR0_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR0_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR0_DWLEN (1) +union acl_tcam_hit_loop_addr0_u { + struct acl_tcam_hit_loop_addr0 { + u32 addr0:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id0:4; /* [12:09] Default:0x0 RO */ + u32 addr1:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id1:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR0_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR1_ADDR (0xb64694) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR1_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR1_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR1_DWLEN (1) +union acl_tcam_hit_loop_addr1_u { + struct acl_tcam_hit_loop_addr1 { + u32 addr2:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id2:4; /* [12:09] Default:0x0 RO */ + u32 addr3:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id3:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR1_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR2_ADDR (0xb64698) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR2_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR2_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR2_DWLEN (1) +union acl_tcam_hit_loop_addr2_u { + struct acl_tcam_hit_loop_addr2 { + u32 addr4:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id4:4; /* [12:09] Default:0x0 RO */ + u32 addr5:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id5:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR2_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR3_ADDR (0xb6469c) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR3_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR3_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR3_DWLEN (1) +union acl_tcam_hit_loop_addr3_u { + struct acl_tcam_hit_loop_addr3 { + u32 addr6:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id6:4; /* [12:09] Default:0x0 RO */ + u32 addr7:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id7:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR3_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR4_ADDR (0xb646a0) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR4_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR4_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR4_DWLEN (1) +union acl_tcam_hit_loop_addr4_u { + struct acl_tcam_hit_loop_addr4 { + u32 addr8:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id8:4; /* [12:09] Default:0x0 RO */ + u32 addr9:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id9:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR4_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR5_ADDR (0xb646a4) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR5_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR5_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR5_DWLEN (1) +union acl_tcam_hit_loop_addr5_u { + struct acl_tcam_hit_loop_addr5 { + u32 addr10:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id10:4; /* [12:09] Default:0x0 RO */ + u32 addr11:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id11:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR5_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR6_ADDR (0xb646a8) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR6_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR6_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR6_DWLEN (1) +union acl_tcam_hit_loop_addr6_u { + struct acl_tcam_hit_loop_addr6 { + u32 addr12:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id12:4; /* [12:09] Default:0x0 RO */ + u32 addr13:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id13:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR6_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR7_ADDR (0xb646ac) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR7_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR7_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR7_DWLEN (1) +union acl_tcam_hit_loop_addr7_u { + struct acl_tcam_hit_loop_addr7 { + u32 addr14:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id14:4; /* [12:09] Default:0x0 RO */ + u32 addr15:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id15:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR7_DWLEN]; +} __packed; + +#define NBL_ACL_OUT_DROP_ADDR (0xb646c8) +#define NBL_ACL_OUT_DROP_DEPTH (1) +#define NBL_ACL_OUT_DROP_WIDTH (32) +#define NBL_ACL_OUT_DROP_DWLEN (1) +union acl_out_drop_u { + struct acl_out_drop { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_OUT_DROP_DWLEN]; +} __packed; + +#define NBL_ACL_NXT_STAGE_ADDR (0xb646d0) +#define NBL_ACL_NXT_STAGE_DEPTH (1) +#define NBL_ACL_NXT_STAGE_WIDTH (32) +#define NBL_ACL_NXT_STAGE_DWLEN (1) +union acl_nxt_stage_u { + struct acl_nxt_stage { + u32 in_nxt_stage:4; /* [03:00] Default:0x0 RO */ + u32 out_nxt_satge:4; /* [07:04] Default:0x0 RO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_NXT_STAGE_DWLEN]; +} __packed; + +#define NBL_ACL_BP_STATE_ADDR (0xb64700) +#define NBL_ACL_BP_STATE_DEPTH (1) +#define NBL_ACL_BP_STATE_WIDTH (32) +#define NBL_ACL_BP_STATE_DWLEN (1) +union acl_bp_state_u { + struct acl_bp_state { + u32 in_bp:1; /* [00:00] Default:0x0 RO */ + u32 out_bp:1; /* [01:01] Default:0x0 RO */ + u32 inter_bp:1; /* [02:02] Default:0x0 RO */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_BP_STATE_DWLEN]; +} __packed; + +#define NBL_ACL_CMDQ_REQ_HIT_ADDR (0xb647a0) +#define NBL_ACL_CMDQ_REQ_HIT_DEPTH (1) +#define NBL_ACL_CMDQ_REQ_HIT_WIDTH (32) +#define NBL_ACL_CMDQ_REQ_HIT_DWLEN (1) +union acl_cmdq_req_hit_u { + struct acl_cmdq_req_hit { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_CMDQ_REQ_HIT_DWLEN]; +} __packed; + +#define NBL_ACL_CMDQ_REQ_NO_HIT_ADDR (0xb647a8) +#define NBL_ACL_CMDQ_REQ_NO_HIT_DEPTH (1) +#define NBL_ACL_CMDQ_REQ_NO_HIT_WIDTH (32) +#define NBL_ACL_CMDQ_REQ_NO_HIT_DWLEN (1) +union acl_cmdq_req_no_hit_u { + struct acl_cmdq_req_no_hit { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_CMDQ_REQ_NO_HIT_DWLEN]; +} __packed; + +#define NBL_ACL_INSERT_SEARCH_CTRL_ADDR (0xb64880) +#define NBL_ACL_INSERT_SEARCH_CTRL_DEPTH (1) +#define NBL_ACL_INSERT_SEARCH_CTRL_WIDTH (32) +#define NBL_ACL_INSERT_SEARCH_CTRL_DWLEN (1) +union acl_insert_search_ctrl_u { + struct acl_insert_search_ctrl { + u32 profile_idx:4; /* [03:00] Default:0x0 RW */ + u32 start:1; /* [04:04] Default:0x0 WO */ + u32 rsv:27; /* [31:05] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INSERT_SEARCH_CTRL_DWLEN]; +} __packed; + +#define NBL_ACL_INSERT_SEARCH_ACK_ADDR (0xb64884) +#define NBL_ACL_INSERT_SEARCH_ACK_DEPTH (1) +#define NBL_ACL_INSERT_SEARCH_ACK_WIDTH (32) +#define NBL_ACL_INSERT_SEARCH_ACK_DWLEN (1) +union acl_insert_search_ack_u { + struct acl_insert_search_ack { + u32 ack:1; /* [00:00] Default:0x0 RC */ + u32 status:2; /* [02:01] Default:0x0 RWW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INSERT_SEARCH_ACK_DWLEN]; +} __packed; + +#define NBL_ACL_INSERT_SEARCH_DATA_ADDR (0xb64890) +#define NBL_ACL_INSERT_SEARCH_DATA_DEPTH (20) +#define NBL_ACL_INSERT_SEARCH_DATA_WIDTH (32) +#define NBL_ACL_INSERT_SEARCH_DATA_DWLEN (1) +union acl_insert_search_data_u { + struct acl_insert_search_data { + u32 data:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INSERT_SEARCH_DATA_DWLEN]; +} __packed; +#define NBL_ACL_INSERT_SEARCH_DATA_REG(r) (NBL_ACL_INSERT_SEARCH_DATA_ADDR + \ + (NBL_ACL_INSERT_SEARCH_DATA_DWLEN * 4) * (r)) + +#define NBL_ACL_INDIRECT_ACCESS_ACK_ADDR (0xb648f0) +#define NBL_ACL_INDIRECT_ACCESS_ACK_DEPTH (1) +#define NBL_ACL_INDIRECT_ACCESS_ACK_WIDTH (32) +#define NBL_ACL_INDIRECT_ACCESS_ACK_DWLEN (1) +union acl_indirect_access_ack_u { + struct acl_indirect_access_ack { + u32 done:1; /* [00:00] Default:0x0 RC */ + u32 status:16; /* [16:01] Default:0x0 RWW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_ACCESS_ACK_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_CTRL_ADDR (0xb648f4) +#define NBL_ACL_INDIRECT_CTRL_DEPTH (1) +#define NBL_ACL_INDIRECT_CTRL_WIDTH (32) +#define NBL_ACL_INDIRECT_CTRL_DWLEN (1) +union acl_indirect_ctrl_u { + struct acl_indirect_ctrl { + u32 tcam_addr:9; /* [08:00] Default:0x0 RW */ + u32 cpu_acl_cfg_start:1; /* [09:09] Default:0x0 WO */ + u32 cpu_acl_cfg_rw:1; /* [10:10] Default:0x0 RW */ + u32 rsv:5; /* [15:11] Default:0x0 WO */ + u32 acc_btm:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_CTRL_DWLEN]; +} __packed; + +#define NBL_ACL_VALID_BIT_ADDR (0xb64900) +#define NBL_ACL_VALID_BIT_DEPTH (1) +#define NBL_ACL_VALID_BIT_WIDTH (32) +#define NBL_ACL_VALID_BIT_DWLEN (1) +union acl_valid_bit_u { + struct acl_valid_bit { + u32 valid_bit:16; /* [15:00] Default:0x0 RWW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_VALID_BIT_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM0_XL_ADDR (0xb64904) +#define NBL_ACL_INDIRECT_TCAM0_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM0_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM0_XL_DWLEN (1) +union acl_indirect_tcam0_xl_u { + struct acl_indirect_tcam0_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM0_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM0_XH_ADDR (0xb64908) +#define NBL_ACL_INDIRECT_TCAM0_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM0_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM0_XH_DWLEN (1) +union acl_indirect_tcam0_xh_u { + struct acl_indirect_tcam0_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM0_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM1_XL_ADDR (0xb6490c) +#define NBL_ACL_INDIRECT_TCAM1_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM1_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM1_XL_DWLEN (1) +union acl_indirect_tcam1_xl_u { + struct acl_indirect_tcam1_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM1_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM1_XH_ADDR (0xb64910) +#define NBL_ACL_INDIRECT_TCAM1_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM1_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM1_XH_DWLEN (1) +union acl_indirect_tcam1_xh_u { + struct acl_indirect_tcam1_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM1_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM2_XL_ADDR (0xb64914) +#define NBL_ACL_INDIRECT_TCAM2_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM2_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM2_XL_DWLEN (1) +union acl_indirect_tcam2_xl_u { + struct acl_indirect_tcam2_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM2_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM2_XH_ADDR (0xb64918) +#define NBL_ACL_INDIRECT_TCAM2_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM2_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM2_XH_DWLEN (1) +union acl_indirect_tcam2_xh_u { + struct acl_indirect_tcam2_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM2_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM3_XL_ADDR (0xb6491c) +#define NBL_ACL_INDIRECT_TCAM3_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM3_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM3_XL_DWLEN (1) +union acl_indirect_tcam3_xl_u { + struct acl_indirect_tcam3_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM3_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM3_XH_ADDR (0xb64920) +#define NBL_ACL_INDIRECT_TCAM3_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM3_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM3_XH_DWLEN (1) +union acl_indirect_tcam3_xh_u { + struct acl_indirect_tcam3_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM3_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM4_XL_ADDR (0xb64924) +#define NBL_ACL_INDIRECT_TCAM4_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM4_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM4_XL_DWLEN (1) +union acl_indirect_tcam4_xl_u { + struct acl_indirect_tcam4_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM4_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM4_XH_ADDR (0xb64928) +#define NBL_ACL_INDIRECT_TCAM4_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM4_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM4_XH_DWLEN (1) +union acl_indirect_tcam4_xh_u { + struct acl_indirect_tcam4_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM4_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM5_XL_ADDR (0xb6492c) +#define NBL_ACL_INDIRECT_TCAM5_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM5_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM5_XL_DWLEN (1) +union acl_indirect_tcam5_xl_u { + struct acl_indirect_tcam5_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM5_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM5_XH_ADDR (0xb64930) +#define NBL_ACL_INDIRECT_TCAM5_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM5_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM5_XH_DWLEN (1) +union acl_indirect_tcam5_xh_u { + struct acl_indirect_tcam5_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM5_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM6_XL_ADDR (0xb64934) +#define NBL_ACL_INDIRECT_TCAM6_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM6_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM6_XL_DWLEN (1) +union acl_indirect_tcam6_xl_u { + struct acl_indirect_tcam6_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM6_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM6_XH_ADDR (0xb64938) +#define NBL_ACL_INDIRECT_TCAM6_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM6_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM6_XH_DWLEN (1) +union acl_indirect_tcam6_xh_u { + struct acl_indirect_tcam6_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM6_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM7_XL_ADDR (0xb6493c) +#define NBL_ACL_INDIRECT_TCAM7_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM7_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM7_XL_DWLEN (1) +union acl_indirect_tcam7_xl_u { + struct acl_indirect_tcam7_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM7_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM7_XH_ADDR (0xb64940) +#define NBL_ACL_INDIRECT_TCAM7_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM7_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM7_XH_DWLEN (1) +union acl_indirect_tcam7_xh_u { + struct acl_indirect_tcam7_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM7_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM8_XL_ADDR (0xb64944) +#define NBL_ACL_INDIRECT_TCAM8_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM8_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM8_XL_DWLEN (1) +union acl_indirect_tcam8_xl_u { + struct acl_indirect_tcam8_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM8_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM8_XH_ADDR (0xb64948) +#define NBL_ACL_INDIRECT_TCAM8_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM8_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM8_XH_DWLEN (1) +union acl_indirect_tcam8_xh_u { + struct acl_indirect_tcam8_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM8_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM9_XL_ADDR (0xb6494c) +#define NBL_ACL_INDIRECT_TCAM9_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM9_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM9_XL_DWLEN (1) +union acl_indirect_tcam9_xl_u { + struct acl_indirect_tcam9_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM9_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM9_XH_ADDR (0xb64950) +#define NBL_ACL_INDIRECT_TCAM9_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM9_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM9_XH_DWLEN (1) +union acl_indirect_tcam9_xh_u { + struct acl_indirect_tcam9_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM9_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM10_XL_ADDR (0xb64954) +#define NBL_ACL_INDIRECT_TCAM10_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM10_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM10_XL_DWLEN (1) +union acl_indirect_tcam10_xl_u { + struct acl_indirect_tcam10_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM10_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM10_XH_ADDR (0xb64958) +#define NBL_ACL_INDIRECT_TCAM10_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM10_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM10_XH_DWLEN (1) +union acl_indirect_tcam10_xh_u { + struct acl_indirect_tcam10_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM10_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM11_XL_ADDR (0xb6495c) +#define NBL_ACL_INDIRECT_TCAM11_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM11_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM11_XL_DWLEN (1) +union acl_indirect_tcam11_xl_u { + struct acl_indirect_tcam11_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM11_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM11_XH_ADDR (0xb64960) +#define NBL_ACL_INDIRECT_TCAM11_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM11_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM11_XH_DWLEN (1) +union acl_indirect_tcam11_xh_u { + struct acl_indirect_tcam11_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM11_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM12_XL_ADDR (0xb64964) +#define NBL_ACL_INDIRECT_TCAM12_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM12_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM12_XL_DWLEN (1) +union acl_indirect_tcam12_xl_u { + struct acl_indirect_tcam12_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM12_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM12_XH_ADDR (0xb64968) +#define NBL_ACL_INDIRECT_TCAM12_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM12_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM12_XH_DWLEN (1) +union acl_indirect_tcam12_xh_u { + struct acl_indirect_tcam12_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM12_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM13_XL_ADDR (0xb6496c) +#define NBL_ACL_INDIRECT_TCAM13_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM13_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM13_XL_DWLEN (1) +union acl_indirect_tcam13_xl_u { + struct acl_indirect_tcam13_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM13_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM13_XH_ADDR (0xb64970) +#define NBL_ACL_INDIRECT_TCAM13_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM13_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM13_XH_DWLEN (1) +union acl_indirect_tcam13_xh_u { + struct acl_indirect_tcam13_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM13_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM14_XL_ADDR (0xb64974) +#define NBL_ACL_INDIRECT_TCAM14_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM14_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM14_XL_DWLEN (1) +union acl_indirect_tcam14_xl_u { + struct acl_indirect_tcam14_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM14_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM14_XH_ADDR (0xb64978) +#define NBL_ACL_INDIRECT_TCAM14_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM14_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM14_XH_DWLEN (1) +union acl_indirect_tcam14_xh_u { + struct acl_indirect_tcam14_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM14_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM15_XL_ADDR (0xb6497c) +#define NBL_ACL_INDIRECT_TCAM15_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM15_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM15_XL_DWLEN (1) +union acl_indirect_tcam15_xl_u { + struct acl_indirect_tcam15_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM15_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM15_XH_ADDR (0xb64980) +#define NBL_ACL_INDIRECT_TCAM15_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM15_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM15_XH_DWLEN (1) +union acl_indirect_tcam15_xh_u { + struct acl_indirect_tcam15_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM15_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM0_YL_ADDR (0xb64990) +#define NBL_ACL_INDIRECT_TCAM0_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM0_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM0_YL_DWLEN (1) +union acl_indirect_tcam0_yl_u { + struct acl_indirect_tcam0_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM0_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM0_YH_ADDR (0xb64994) +#define NBL_ACL_INDIRECT_TCAM0_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM0_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM0_YH_DWLEN (1) +union acl_indirect_tcam0_yh_u { + struct acl_indirect_tcam0_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM0_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM1_YL_ADDR (0xb64998) +#define NBL_ACL_INDIRECT_TCAM1_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM1_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM1_YL_DWLEN (1) +union acl_indirect_tcam1_yl_u { + struct acl_indirect_tcam1_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM1_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM1_YH_ADDR (0xb6499c) +#define NBL_ACL_INDIRECT_TCAM1_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM1_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM1_YH_DWLEN (1) +union acl_indirect_tcam1_yh_u { + struct acl_indirect_tcam1_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM1_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM2_YL_ADDR (0xb649a0) +#define NBL_ACL_INDIRECT_TCAM2_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM2_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM2_YL_DWLEN (1) +union acl_indirect_tcam2_yl_u { + struct acl_indirect_tcam2_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM2_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM2_YH_ADDR (0xb649a4) +#define NBL_ACL_INDIRECT_TCAM2_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM2_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM2_YH_DWLEN (1) +union acl_indirect_tcam2_yh_u { + struct acl_indirect_tcam2_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM2_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM3_YL_ADDR (0xb649a8) +#define NBL_ACL_INDIRECT_TCAM3_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM3_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM3_YL_DWLEN (1) +union acl_indirect_tcam3_yl_u { + struct acl_indirect_tcam3_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM3_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM3_YH_ADDR (0xb649ac) +#define NBL_ACL_INDIRECT_TCAM3_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM3_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM3_YH_DWLEN (1) +union acl_indirect_tcam3_yh_u { + struct acl_indirect_tcam3_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM3_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM4_YL_ADDR (0xb649b0) +#define NBL_ACL_INDIRECT_TCAM4_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM4_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM4_YL_DWLEN (1) +union acl_indirect_tcam4_yl_u { + struct acl_indirect_tcam4_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM4_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM4_YH_ADDR (0xb649b4) +#define NBL_ACL_INDIRECT_TCAM4_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM4_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM4_YH_DWLEN (1) +union acl_indirect_tcam4_yh_u { + struct acl_indirect_tcam4_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM4_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM5_YL_ADDR (0xb649b8) +#define NBL_ACL_INDIRECT_TCAM5_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM5_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM5_YL_DWLEN (1) +union acl_indirect_tcam5_yl_u { + struct acl_indirect_tcam5_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM5_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM5_YH_ADDR (0xb649bc) +#define NBL_ACL_INDIRECT_TCAM5_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM5_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM5_YH_DWLEN (1) +union acl_indirect_tcam5_yh_u { + struct acl_indirect_tcam5_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM5_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM6_YL_ADDR (0xb649c0) +#define NBL_ACL_INDIRECT_TCAM6_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM6_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM6_YL_DWLEN (1) +union acl_indirect_tcam6_yl_u { + struct acl_indirect_tcam6_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM6_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM6_YH_ADDR (0xb649c4) +#define NBL_ACL_INDIRECT_TCAM6_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM6_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM6_YH_DWLEN (1) +union acl_indirect_tcam6_yh_u { + struct acl_indirect_tcam6_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM6_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM7_YL_ADDR (0xb649c8) +#define NBL_ACL_INDIRECT_TCAM7_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM7_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM7_YL_DWLEN (1) +union acl_indirect_tcam7_yl_u { + struct acl_indirect_tcam7_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM7_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM7_YH_ADDR (0xb649cc) +#define NBL_ACL_INDIRECT_TCAM7_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM7_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM7_YH_DWLEN (1) +union acl_indirect_tcam7_yh_u { + struct acl_indirect_tcam7_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM7_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM8_YL_ADDR (0xb649d0) +#define NBL_ACL_INDIRECT_TCAM8_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM8_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM8_YL_DWLEN (1) +union acl_indirect_tcam8_yl_u { + struct acl_indirect_tcam8_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM8_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM8_YH_ADDR (0xb649d4) +#define NBL_ACL_INDIRECT_TCAM8_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM8_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM8_YH_DWLEN (1) +union acl_indirect_tcam8_yh_u { + struct acl_indirect_tcam8_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM8_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM9_YL_ADDR (0xb649d8) +#define NBL_ACL_INDIRECT_TCAM9_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM9_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM9_YL_DWLEN (1) +union acl_indirect_tcam9_yl_u { + struct acl_indirect_tcam9_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM9_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM9_YH_ADDR (0xb649dc) +#define NBL_ACL_INDIRECT_TCAM9_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM9_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM9_YH_DWLEN (1) +union acl_indirect_tcam9_yh_u { + struct acl_indirect_tcam9_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM9_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM10_YL_ADDR (0xb649e0) +#define NBL_ACL_INDIRECT_TCAM10_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM10_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM10_YL_DWLEN (1) +union acl_indirect_tcam10_yl_u { + struct acl_indirect_tcam10_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM10_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM10_YH_ADDR (0xb649e4) +#define NBL_ACL_INDIRECT_TCAM10_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM10_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM10_YH_DWLEN (1) +union acl_indirect_tcam10_yh_u { + struct acl_indirect_tcam10_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM10_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM11_YL_ADDR (0xb649e8) +#define NBL_ACL_INDIRECT_TCAM11_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM11_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM11_YL_DWLEN (1) +union acl_indirect_tcam11_yl_u { + struct acl_indirect_tcam11_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM11_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM11_YH_ADDR (0xb649ec) +#define NBL_ACL_INDIRECT_TCAM11_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM11_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM11_YH_DWLEN (1) +union acl_indirect_tcam11_yh_u { + struct acl_indirect_tcam11_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM11_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM12_YL_ADDR (0xb649f0) +#define NBL_ACL_INDIRECT_TCAM12_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM12_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM12_YL_DWLEN (1) +union acl_indirect_tcam12_yl_u { + struct acl_indirect_tcam12_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM12_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM12_YH_ADDR (0xb649f4) +#define NBL_ACL_INDIRECT_TCAM12_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM12_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM12_YH_DWLEN (1) +union acl_indirect_tcam12_yh_u { + struct acl_indirect_tcam12_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM12_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM13_YL_ADDR (0xb649f8) +#define NBL_ACL_INDIRECT_TCAM13_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM13_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM13_YL_DWLEN (1) +union acl_indirect_tcam13_yl_u { + struct acl_indirect_tcam13_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM13_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM13_YH_ADDR (0xb649fc) +#define NBL_ACL_INDIRECT_TCAM13_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM13_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM13_YH_DWLEN (1) +union acl_indirect_tcam13_yh_u { + struct acl_indirect_tcam13_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM13_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM14_YL_ADDR (0xb64a00) +#define NBL_ACL_INDIRECT_TCAM14_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM14_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM14_YL_DWLEN (1) +union acl_indirect_tcam14_yl_u { + struct acl_indirect_tcam14_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM14_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM14_YH_ADDR (0xb64a04) +#define NBL_ACL_INDIRECT_TCAM14_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM14_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM14_YH_DWLEN (1) +union acl_indirect_tcam14_yh_u { + struct acl_indirect_tcam14_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM14_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM15_YL_ADDR (0xb64a08) +#define NBL_ACL_INDIRECT_TCAM15_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM15_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM15_YL_DWLEN (1) +union acl_indirect_tcam15_yl_u { + struct acl_indirect_tcam15_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM15_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM15_YH_ADDR (0xb64a0c) +#define NBL_ACL_INDIRECT_TCAM15_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM15_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM15_YH_DWLEN (1) +union acl_indirect_tcam15_yh_u { + struct acl_indirect_tcam15_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM15_YH_DWLEN]; +} __packed; + +#define NBL_ACL_KGEN_TCAM_ADDR (0xb65800) +#define NBL_ACL_KGEN_TCAM_DEPTH (16) +#define NBL_ACL_KGEN_TCAM_WIDTH (64) +#define NBL_ACL_KGEN_TCAM_DWLEN (2) +union acl_kgen_tcam_u { + struct acl_kgen_tcam { + u32 mask:16; + u32 data:16; + u32 valid_bit:1; + u32 rsv:31; + } __packed info; + u32 data[NBL_ACL_KGEN_TCAM_DWLEN]; +} __packed; +#define NBL_ACL_KGEN_TCAM_REG(r) (NBL_ACL_KGEN_TCAM_ADDR + \ + (NBL_ACL_KGEN_TCAM_DWLEN * 4) * (r)) + +#define NBL_ACL_TCAM_CFG_ADDR (0xb65a00) +#define NBL_ACL_TCAM_CFG_DEPTH (16) +#define NBL_ACL_TCAM_CFG_WIDTH (128) +#define NBL_ACL_TCAM_CFG_DWLEN (4) +union acl_tcam_cfg_u { + struct acl_tcam_cfg { + u32 startcompare0:1; /* [00:00] Default:0x1 RW */ + u32 startset0:1; /* [01:01] Default:0x1 RW */ + u32 tcam0_enable:1; /* [02:02] Default:0x0 RW */ + u32 startcompare1:1; /* [03:03] Default:0x1 RW */ + u32 startset1:1; /* [04:04] Default:0x1 RW */ + u32 tcam1_enable:1; /* [05:05] Default:0x0 RW */ + u32 startcompare2:1; /* [06:06] Default:0x1 RW */ + u32 startset2:1; /* [07:07] Default:0x1 RW */ + u32 tcam2_enable:1; /* [08:08] Default:0x0 RW */ + u32 startcompare3:1; /* [09:09] Default:0x1 RW */ + u32 startset3:1; /* [10:10] Default:0x1 RW */ + u32 tcam3_enable:1; /* [11:11] Default:0x0 RW */ + u32 startcompare4:1; /* [12:12] Default:0x1 RW */ + u32 startset4:1; /* [13:13] Default:0x1 RW */ + u32 tcam4_enable:1; /* [14:14] Default:0x0 RW */ + u32 startcompare5:1; /* [15:15] Default:0x1 RW */ + u32 startset5:1; /* [16:16] Default:0x1 RW */ + u32 tcam5_enable:1; /* [17:17] Default:0x0 RW */ + u32 startcompare6:1; /* [18:18] Default:0x1 RW */ + u32 startset6:1; /* [19:19] Default:0x1 RW */ + u32 tcam6_enable:1; /* [20:20] Default:0x0 RW */ + u32 startcompare7:1; /* [21:21] Default:0x1 RW */ + u32 startset7:1; /* [22:22] Default:0x1 RW */ + u32 tcam7_enable:1; /* [23:23] Default:0x0 RW */ + u32 startcompare8:1; /* [24:24] Default:0x1 RW */ + u32 startset8:1; /* [25:25] Default:0x1 RW */ + u32 tcam8_enable:1; /* [26:26] Default:0x0 RW */ + u32 startcompare9:1; /* [27:27] Default:0x1 RW */ + u32 startset9:1; /* [28:28] Default:0x1 RW */ + u32 tcam9_enable:1; /* [29:29] Default:0x0 RW */ + u32 startcompare10:1; /* [30:30] Default:0x1 RW */ + u32 startset10:1; /* [31:31] Default:0x1 RW */ + u32 tcam10_enable:1; /* [32:32] Default:0x0 RW */ + u32 startcompare11:1; /* [33:33] Default:0x1 RW */ + u32 startset11:1; /* [34:34] Default:0x1 RW */ + u32 tcam11_enable:1; /* [35:35] Default:0x0 RW */ + u32 startcompare12:1; /* [36:36] Default:0x1 RW */ + u32 startset12:1; /* [37:37] Default:0x1 RW */ + u32 tcam12_enable:1; /* [38:38] Default:0x0 RW */ + u32 startcompare13:1; /* [39:39] Default:0x1 RW */ + u32 startset13:1; /* [40:40] Default:0x1 RW */ + u32 tcam13_enable:1; /* [41:41] Default:0x0 RW */ + u32 startcompare14:1; /* [42:42] Default:0x1 RW */ + u32 startset14:1; /* [43:43] Default:0x1 RW */ + u32 tcam14_enable:1; /* [44:44] Default:0x0 RW */ + u32 startcompare15:1; /* [45:45] Default:0x1 RW */ + u32 startset15:1; /* [46:46] Default:0x1 RW */ + u32 tcam15_enable:1; /* [47:47] Default:0x0 RW */ + u32 key_id0:4; /* [51:48] Default:0x0 RW */ + u32 key_id1:4; /* [55:52] Default:0x0 RW */ + u32 key_id2:4; /* [59:56] Default:0x0 RW */ + u32 key_id3:4; /* [63:60] Default:0x0 RW */ + u32 key_id4:4; /* [67:64] Default:0x0 RW */ + u32 key_id5:4; /* [71:68] Default:0x0 RW */ + u32 key_id6:4; /* [75:72] Default:0x0 RW */ + u32 key_id7:4; /* [79:76] Default:0x0 RW */ + u32 key_id8:4; /* [83:80] Default:0x0 RW */ + u32 key_id9:4; /* [87:84] Default:0x0 RW */ + u32 key_id10:4; /* [91:88] Default:0x0 RW */ + u32 key_id11:4; /* [95:92] Default:0x0 RW */ + u32 key_id12:4; /* [99:96] Default:0x0 RW */ + u32 key_id13:4; /* [103:100] Default:0x0 RW */ + u32 key_id14:4; /* [107:104] Default:0x0 RW */ + u32 key_id15:4; /* [111:108] Default:0x0 RW */ + u32 rsv:16; /* [127:112] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_CFG_DWLEN]; +} __packed; +#define NBL_ACL_TCAM_CFG_REG(r) (NBL_ACL_TCAM_CFG_ADDR + \ + (NBL_ACL_TCAM_CFG_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM_CFG_ADDR (0xb65c00) +#define NBL_ACL_ACTION_RAM_CFG_DEPTH (16) +#define NBL_ACL_ACTION_RAM_CFG_WIDTH (128) +#define NBL_ACL_ACTION_RAM_CFG_DWLEN (4) +union acl_action_ram_cfg_u { + struct acl_action_ram_cfg { + u32 action_ram0_alloc_id:4; /* [03:00] Default:0x0 RW */ + u32 action_ram0_enable:1; /* [04:04] Default:0x0 RW */ + u32 action_ram1_alloc_id:4; /* [08:05] Default:0x0 RW */ + u32 action_ram1_enable:1; /* [09:09] Default:0x0 RW */ + u32 action_ram2_alloc_id:4; /* [13:10] Default:0x0 RW */ + u32 action_ram2_enable:1; /* [14:14] Default:0x0 RW */ + u32 action_ram3_alloc_id:4; /* [18:15] Default:0x0 RW */ + u32 action_ram3_enable:1; /* [19:19] Default:0x0 RW */ + u32 action_ram4_alloc_id:4; /* [23:20] Default:0x0 RW */ + u32 action_ram4_enable:1; /* [24:24] Default:0x0 RW */ + u32 action_ram5_alloc_id:4; /* [28:25] Default:0x0 RW */ + u32 action_ram5_enable:1; /* [29:29] Default:0x0 RW */ + u32 action_ram6_alloc_id:4; /* [33:30] Default:0x0 RW */ + u32 action_ram6_enable:1; /* [34:34] Default:0x0 RW */ + u32 action_ram7_alloc_id:4; /* [38:35] Default:0x0 RW */ + u32 action_ram7_enable:1; /* [39:39] Default:0x0 RW */ + u32 action_ram8_alloc_id:4; /* [43:40] Default:0x0 RW */ + u32 action_ram8_enable:1; /* [44:44] Default:0x0 RW */ + u32 action_ram9_alloc_id:4; /* [48:45] Default:0x0 RW */ + u32 action_ram9_enable:1; /* [49:49] Default:0x0 RW */ + u32 action_ram10_alloc_id:4; /* [53:50] Default:0x0 RW */ + u32 action_ram10_enable:1; /* [54:54] Default:0x0 RW */ + u32 action_ram11_alloc_id:4; /* [58:55] Default:0x0 RW */ + u32 action_ram11_enable:1; /* [59:59] Default:0x0 RW */ + u32 action_ram12_alloc_id:4; /* [63:60] Default:0x0 RW */ + u32 action_ram12_enable:1; /* [64:64] Default:0x0 RW */ + u32 action_ram13_alloc_id:4; /* [68:65] Default:0x0 RW */ + u32 action_ram13_enable:1; /* [69:69] Default:0x0 RW */ + u32 action_ram14_alloc_id:4; /* [73:70] Default:0x0 RW */ + u32 action_ram14_enable:1; /* [74:74] Default:0x0 RW */ + u32 action_ram15_alloc_id:4; /* [78:75] Default:0x0 RW */ + u32 action_ram15_enable:1; /* [79:79] Default:0x0 RW */ + u32 rsv_l:32; /* [127:80] Default:0x0 RO */ + u32 rsv_h:16; /* [127:80] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM_CFG_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM_CFG_REG(r) (NBL_ACL_ACTION_RAM_CFG_ADDR + \ + (NBL_ACL_ACTION_RAM_CFG_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM0_ADDR (0xb66000) +#define NBL_ACL_ACTION_RAM0_DEPTH (512) +#define NBL_ACL_ACTION_RAM0_WIDTH (128) +#define NBL_ACL_ACTION_RAM0_DWLEN (4) +union acl_action_ram0_u { + struct acl_action_ram0 { + u32 Action0:22; /* [21:00] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM0_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM0_REG(r) (NBL_ACL_ACTION_RAM0_ADDR + \ + (NBL_ACL_ACTION_RAM0_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM1_ADDR (0xb68000) +#define NBL_ACL_ACTION_RAM1_DEPTH (512) +#define NBL_ACL_ACTION_RAM1_WIDTH (128) +#define NBL_ACL_ACTION_RAM1_DWLEN (4) +union acl_action_ram1_u { + struct acl_action_ram1 { + u32 Action0:22; /* [21:0] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM1_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM1_REG(r) (NBL_ACL_ACTION_RAM1_ADDR + \ + (NBL_ACL_ACTION_RAM1_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM2_ADDR (0xb6a000) +#define NBL_ACL_ACTION_RAM2_DEPTH (512) +#define NBL_ACL_ACTION_RAM2_WIDTH (128) +#define NBL_ACL_ACTION_RAM2_DWLEN (4) +union acl_action_ram2_u { + struct acl_action_ram2 { + u32 Action0:22; /* [21:0] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM2_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM2_REG(r) (NBL_ACL_ACTION_RAM2_ADDR + \ + (NBL_ACL_ACTION_RAM2_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM3_ADDR (0xb6c000) +#define NBL_ACL_ACTION_RAM3_DEPTH (512) +#define NBL_ACL_ACTION_RAM3_WIDTH (128) +#define NBL_ACL_ACTION_RAM3_DWLEN (4) +union acl_action_ram3_u { + struct acl_action_ram3 { + u32 Action0:22; /* [21:0] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM3_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM3_REG(r) (NBL_ACL_ACTION_RAM3_ADDR + \ + (NBL_ACL_ACTION_RAM3_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM4_ADDR (0xb6e000) +#define NBL_ACL_ACTION_RAM4_DEPTH (512) +#define NBL_ACL_ACTION_RAM4_WIDTH (128) +#define NBL_ACL_ACTION_RAM4_DWLEN (4) +union acl_action_ram4_u { + struct acl_action_ram4 { + u32 Action0:22; /* [21:0] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM4_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM4_REG(r) (NBL_ACL_ACTION_RAM4_ADDR + \ + (NBL_ACL_ACTION_RAM4_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM5_ADDR (0xb70000) +#define NBL_ACL_ACTION_RAM5_DEPTH (512) +#define NBL_ACL_ACTION_RAM5_WIDTH (128) +#define NBL_ACL_ACTION_RAM5_DWLEN (4) +union acl_action_ram5_u { + struct acl_action_ram5 { + u32 Action0:22; /* [21:0] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM5_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM5_REG(r) (NBL_ACL_ACTION_RAM5_ADDR + \ + (NBL_ACL_ACTION_RAM5_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM6_ADDR (0xb72000) +#define NBL_ACL_ACTION_RAM6_DEPTH (512) +#define NBL_ACL_ACTION_RAM6_WIDTH (128) +#define NBL_ACL_ACTION_RAM6_DWLEN (4) +union acl_action_ram6_u { + struct acl_action_ram6 { + u32 Action0:22; /* [21:0] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM6_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM6_REG(r) (NBL_ACL_ACTION_RAM6_ADDR + \ + (NBL_ACL_ACTION_RAM6_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM7_ADDR (0xb74000) +#define NBL_ACL_ACTION_RAM7_DEPTH (512) +#define NBL_ACL_ACTION_RAM7_WIDTH (128) +#define NBL_ACL_ACTION_RAM7_DWLEN (4) +union acl_action_ram7_u { + struct acl_action_ram7 { + u32 Action0:22; /* [21:0] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM7_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM7_REG(r) (NBL_ACL_ACTION_RAM7_ADDR + \ + (NBL_ACL_ACTION_RAM7_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM8_ADDR (0xb76000) +#define NBL_ACL_ACTION_RAM8_DEPTH (512) +#define NBL_ACL_ACTION_RAM8_WIDTH (128) +#define NBL_ACL_ACTION_RAM8_DWLEN (4) +union acl_action_ram8_u { + struct acl_action_ram8 { + u32 Action0:22; /* [21:0] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM8_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM8_REG(r) (NBL_ACL_ACTION_RAM8_ADDR + \ + (NBL_ACL_ACTION_RAM8_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM9_ADDR (0xb78000) +#define NBL_ACL_ACTION_RAM9_DEPTH (512) +#define NBL_ACL_ACTION_RAM9_WIDTH (128) +#define NBL_ACL_ACTION_RAM9_DWLEN (4) +union acl_action_ram9_u { + struct acl_action_ram9 { + u32 Action0:22; /* [21:0] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM9_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM9_REG(r) (NBL_ACL_ACTION_RAM9_ADDR + \ + (NBL_ACL_ACTION_RAM9_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM10_ADDR (0xb7a000) +#define NBL_ACL_ACTION_RAM10_DEPTH (512) +#define NBL_ACL_ACTION_RAM10_WIDTH (128) +#define NBL_ACL_ACTION_RAM10_DWLEN (4) +union acl_action_ram10_u { + struct acl_action_ram10 { + u32 Action0:22; /* [21:0] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM10_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM10_REG(r) (NBL_ACL_ACTION_RAM10_ADDR + \ + (NBL_ACL_ACTION_RAM10_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM11_ADDR (0xb7c000) +#define NBL_ACL_ACTION_RAM11_DEPTH (512) +#define NBL_ACL_ACTION_RAM11_WIDTH (128) +#define NBL_ACL_ACTION_RAM11_DWLEN (4) +union acl_action_ram11_u { + struct acl_action_ram11 { + u32 Action0:22; /* [21:0] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM11_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM11_REG(r) (NBL_ACL_ACTION_RAM11_ADDR + \ + (NBL_ACL_ACTION_RAM11_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM12_ADDR (0xb7e000) +#define NBL_ACL_ACTION_RAM12_DEPTH (512) +#define NBL_ACL_ACTION_RAM12_WIDTH (128) +#define NBL_ACL_ACTION_RAM12_DWLEN (4) +union acl_action_ram12_u { + struct acl_action_ram12 { + u32 Action0:22; /* [21:0] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM12_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM12_REG(r) (NBL_ACL_ACTION_RAM12_ADDR + \ + (NBL_ACL_ACTION_RAM12_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM13_ADDR (0xb80000) +#define NBL_ACL_ACTION_RAM13_DEPTH (512) +#define NBL_ACL_ACTION_RAM13_WIDTH (128) +#define NBL_ACL_ACTION_RAM13_DWLEN (4) +union acl_action_ram13_u { + struct acl_action_ram13 { + u32 Action0:22; /* [21:0] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM13_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM13_REG(r) (NBL_ACL_ACTION_RAM13_ADDR + \ + (NBL_ACL_ACTION_RAM13_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM14_ADDR (0xb82000) +#define NBL_ACL_ACTION_RAM14_DEPTH (512) +#define NBL_ACL_ACTION_RAM14_WIDTH (128) +#define NBL_ACL_ACTION_RAM14_DWLEN (4) +union acl_action_ram14_u { + struct acl_action_ram14 { + u32 Action0:22; /* [21:0] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM14_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM14_REG(r) (NBL_ACL_ACTION_RAM14_ADDR + \ + (NBL_ACL_ACTION_RAM14_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM15_ADDR (0xb84000) +#define NBL_ACL_ACTION_RAM15_DEPTH (512) +#define NBL_ACL_ACTION_RAM15_WIDTH (128) +#define NBL_ACL_ACTION_RAM15_DWLEN (4) +union acl_action_ram15_u { + struct acl_action_ram15 { + u32 Action0:22; /* [21:0] Default:0x0 RW */ + u32 Action1:22; /* [43:22] Default:0x0 RW */ + u32 Action2:22; /* [65:44] Default:0x0 RW */ + u32 Action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM15_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM15_REG(r) (NBL_ACL_ACTION_RAM15_ADDR + \ + (NBL_ACL_ACTION_RAM15_DWLEN * 4) * (r)) + +#define NBL_ACL_DEFAULT_ACTION_RAM_ADDR (0xb86000) +#define NBL_ACL_DEFAULT_ACTION_RAM_DEPTH (16) +#define NBL_ACL_DEFAULT_ACTION_RAM_WIDTH (256) +#define NBL_ACL_DEFAULT_ACTION_RAM_DWLEN (8) +union acl_default_action_ram_u { + struct acl_default_action_ram { + u32 action0:22; /* [21:00] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 action4:22; /* [109:88] Default:0x0 RW */ + u32 action5:22; /* [131:110] Default:0x0 RW */ + u32 actoin6:22; /* [153:132] Default:0x0 RW */ + u32 action7:22; /* [175:154] Default:0x0 RW */ + u32 rsv:16; /* [255:176] Default:0x0 RO */ + u32 rsv_arr[2]; /* [255:176] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION_RAM_DWLEN]; +} __packed; +#define NBL_ACL_DEFAULT_ACTION_RAM_REG(r) (NBL_ACL_DEFAULT_ACTION_RAM_ADDR + \ + (NBL_ACL_DEFAULT_ACTION_RAM_DWLEN * 4) * (r)) + +#define NBL_ACL_FLOW_ID_STAT_RAM_ADDR (0xb94000) +#define NBL_ACL_FLOW_ID_STAT_RAM_DEPTH (131072) +#define NBL_ACL_FLOW_ID_STAT_RAM_WIDTH (128) +#define NBL_ACL_FLOW_ID_STAT_RAM_DWLEN (4) +union acl_flow_id_stat_ram_u { + struct acl_flow_id_stat_ram { + u32 pkt_byte_l:32; /* [47:00] Default:0x0 RO */ + u32 pkt_byte_h:16; /* [47:00] Default:0x0 RO */ + u32 pkt_cnt_l:32; /* [87:48] Default:0x0 RO */ + u32 pkt_cnt_h:8; /* [87:48] Default:0x0 RO */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_FLOW_ID_STAT_RAM_DWLEN]; +} __packed; +#define NBL_ACL_FLOW_ID_STAT_RAM_REG(r) (NBL_ACL_FLOW_ID_STAT_RAM_ADDR + \ + (NBL_ACL_FLOW_ID_STAT_RAM_DWLEN * 4) * (r)) + +#define NBL_ACL_STAT_ID_STAT_RAM_ADDR (0xd94000) +#define NBL_ACL_STAT_ID_STAT_RAM_DEPTH (2048) +#define NBL_ACL_STAT_ID_STAT_RAM_WIDTH (128) +#define NBL_ACL_STAT_ID_STAT_RAM_DWLEN (4) +union acl_stat_id_stat_ram_u { + struct acl_stat_id_stat_ram { + u32 pkt_byte_arr[2]; /* [63:0] Default:0x0 RO */ + u32 pkt_cnt_arr[2]; /* [127:64] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_STAT_ID_STAT_RAM_DWLEN]; +} __packed; +#define NBL_ACL_STAT_ID_STAT_RAM_REG(r) (NBL_ACL_STAT_ID_STAT_RAM_ADDR + \ + (NBL_ACL_STAT_ID_STAT_RAM_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_epro.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_epro.h new file mode 100644 index 0000000000000000000000000000000000000000..be808db098d00eb4cd06f4dd812c81b37139bc36 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_epro.h @@ -0,0 +1,660 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_EPRO_H +#define NBL_EPRO_H 1 + +#include + +#define NBL_EPRO_BASE (0x00E74000) + +#define NBL_EPRO_INT_STATUS_ADDR (0xe74000) +#define NBL_EPRO_INT_STATUS_DEPTH (1) +#define NBL_EPRO_INT_STATUS_WIDTH (32) +#define NBL_EPRO_INT_STATUS_DWLEN (1) +union epro_int_status_u { + struct epro_int_status { + u32 fatal_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RWC */ + u32 cif_err:1; /* [3] Default:0x0 RWC */ + u32 input_err:1; /* [4] Default:0x0 RWC */ + u32 cfg_err:1; /* [5] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [6] Default:0x0 RWC */ + u32 data_cor_err:1; /* [7] Default:0x0 RWC */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_EPRO_INT_MASK_ADDR (0xe74004) +#define NBL_EPRO_INT_MASK_DEPTH (1) +#define NBL_EPRO_INT_MASK_WIDTH (32) +#define NBL_EPRO_INT_MASK_DWLEN (1) +union epro_int_mask_u { + struct epro_int_mask { + u32 fatal_err:1; /* [0] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RW */ + u32 cif_err:1; /* [3] Default:0x0 RW */ + u32 input_err:1; /* [4] Default:0x0 RW */ + u32 cfg_err:1; /* [5] Default:0x0 RW */ + u32 data_ucor_err:1; /* [6] Default:0x0 RW */ + u32 data_cor_err:1; /* [7] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_INT_MASK_DWLEN]; +} __packed; + +#define NBL_EPRO_INT_SET_ADDR (0xe74008) +#define NBL_EPRO_INT_SET_DEPTH (1) +#define NBL_EPRO_INT_SET_WIDTH (32) +#define NBL_EPRO_INT_SET_DWLEN (1) +union epro_int_set_u { + struct epro_int_set { + u32 fatal_err:1; /* [0] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 WO */ + u32 cif_err:1; /* [3] Default:0x0 WO */ + u32 input_err:1; /* [4] Default:0x0 WO */ + u32 cfg_err:1; /* [5] Default:0x0 WO */ + u32 data_ucor_err:1; /* [6] Default:0x0 WO */ + u32 data_cor_err:1; /* [7] Default:0x0 WO */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_INT_SET_DWLEN]; +} __packed; + +#define NBL_EPRO_INIT_DONE_ADDR (0xe7400c) +#define NBL_EPRO_INIT_DONE_DEPTH (1) +#define NBL_EPRO_INIT_DONE_WIDTH (32) +#define NBL_EPRO_INIT_DONE_DWLEN (1) +union epro_init_done_u { + struct epro_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_EPRO_CIF_ERR_INFO_ADDR (0xe74040) +#define NBL_EPRO_CIF_ERR_INFO_DEPTH (1) +#define NBL_EPRO_CIF_ERR_INFO_WIDTH (32) +#define NBL_EPRO_CIF_ERR_INFO_DWLEN (1) +union epro_cif_err_info_u { + struct epro_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_EPRO_CFG_ERR_INFO_ADDR (0xe74050) +#define NBL_EPRO_CFG_ERR_INFO_DEPTH (1) +#define NBL_EPRO_CFG_ERR_INFO_WIDTH (32) +#define NBL_EPRO_CFG_ERR_INFO_DWLEN (1) +union epro_cfg_err_info_u { + struct epro_cfg_err_info { + u32 addr:10; /* [9:0] Default:0x0 RO */ + u32 id:3; /* [12:10] Default:0x0 RO */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_EPRO_CAR_CTRL_ADDR (0xe74100) +#define NBL_EPRO_CAR_CTRL_DEPTH (1) +#define NBL_EPRO_CAR_CTRL_WIDTH (32) +#define NBL_EPRO_CAR_CTRL_DWLEN (1) +union epro_car_ctrl_u { + struct epro_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_EPRO_INIT_START_ADDR (0xe74180) +#define NBL_EPRO_INIT_START_DEPTH (1) +#define NBL_EPRO_INIT_START_WIDTH (32) +#define NBL_EPRO_INIT_START_DWLEN (1) +union epro_init_start_u { + struct epro_init_start { + u32 start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_INIT_START_DWLEN]; +} __packed; + +#define NBL_EPRO_FLAG_SEL_ADDR (0xe74200) +#define NBL_EPRO_FLAG_SEL_DEPTH (1) +#define NBL_EPRO_FLAG_SEL_WIDTH (32) +#define NBL_EPRO_FLAG_SEL_DWLEN (1) +union epro_flag_sel_u { + struct epro_flag_sel { + u32 dir_offset_en:1; /* [0] Default:0x1 RW */ + u32 dir_offset:5; /* [5:1] Default:0x0 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_FLAG_SEL_DWLEN]; +} __packed; + +#define NBL_EPRO_ACT_SEL_EN_ADDR (0xe74214) +#define NBL_EPRO_ACT_SEL_EN_DEPTH (1) +#define NBL_EPRO_ACT_SEL_EN_WIDTH (32) +#define NBL_EPRO_ACT_SEL_EN_DWLEN (1) +union epro_act_sel_en_u { + struct epro_act_sel_en { + u32 rssidx_en:1; /* [0] Default:0x1 RW */ + u32 dport_en:1; /* [1] Default:0x1 RW */ + u32 mirroridx_en:1; /* [2] Default:0x1 RW */ + u32 dqueue_en:1; /* [3] Default:0x1 RW */ + u32 encap_en:1; /* [4] Default:0x1 RW */ + u32 pop_8021q_en:1; /* [5] Default:0x1 RW */ + u32 pop_qinq_en:1; /* [6] Default:0x1 RW */ + u32 push_cvlan_en:1; /* [7] Default:0x1 RW */ + u32 push_svlan_en:1; /* [8] Default:0x1 RW */ + u32 replace_cvlan_en:1; /* [9] Default:0x1 RW */ + u32 replace_svlan_en:1; /* [10] Default:0x1 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_ACT_SEL_EN_DWLEN]; +} __packed; + +#define NBL_EPRO_AM_ACT_ID0_ADDR (0xe74218) +#define NBL_EPRO_AM_ACT_ID0_DEPTH (1) +#define NBL_EPRO_AM_ACT_ID0_WIDTH (32) +#define NBL_EPRO_AM_ACT_ID0_DWLEN (1) +union epro_am_act_id0_u { + struct epro_am_act_id0 { + u32 replace_cvlan:6; /* [5:0] Default:0x2b RW */ + u32 rsv3:2; /* [7:6] Default:0x0 RO */ + u32 replace_svlan:6; /* [13:8] Default:0x2a RW */ + u32 rsv2:2; /* [15:14] Default:0x0 RO */ + u32 push_cvlan:6; /* [21:16] Default:0x2d RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 push_svlan:6; /* [29:24] Default:0x2c RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_AM_ACT_ID0_DWLEN]; +} __packed; + +#define NBL_EPRO_AM_ACT_ID1_ADDR (0xe7421c) +#define NBL_EPRO_AM_ACT_ID1_DEPTH (1) +#define NBL_EPRO_AM_ACT_ID1_WIDTH (32) +#define NBL_EPRO_AM_ACT_ID1_DWLEN (1) +union epro_am_act_id1_u { + struct epro_am_act_id1 { + u32 pop_qinq:6; /* [5:0] Default:0x29 RW */ + u32 rsv3:2; /* [7:6] Default:0x0 RO */ + u32 pop_8021q:6; /* [13:08] Default:0x28 RW */ + u32 rsv2:2; /* [15:14] Default:0x0 RO */ + u32 dport:6; /* [21:16] Default:0x9 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 dqueue:6; /* [29:24] Default:0xa RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_AM_ACT_ID1_DWLEN]; +} __packed; + +#define NBL_EPRO_AM_ACT_ID2_ADDR (0xe74220) +#define NBL_EPRO_AM_ACT_ID2_DEPTH (1) +#define NBL_EPRO_AM_ACT_ID2_WIDTH (32) +#define NBL_EPRO_AM_ACT_ID2_DWLEN (1) +union epro_am_act_id2_u { + struct epro_am_act_id2 { + u32 rssidx:6; /* [5:0] Default:0x4 RW */ + u32 rsv3:2; /* [7:6] Default:0x0 RO */ + u32 mirroridx:6; /* [13:8] Default:0x8 RW */ + u32 rsv2:2; /* [15:14] Default:0x0 RO */ + u32 car:6; /* [21:16] Default:0x5 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 encap:6; /* [29:24] Default:0x2e RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_AM_ACT_ID2_DWLEN]; +} __packed; + +#define NBL_EPRO_AM_ACT_ID3_ADDR (0xe74224) +#define NBL_EPRO_AM_ACT_ID3_DEPTH (1) +#define NBL_EPRO_AM_ACT_ID3_WIDTH (32) +#define NBL_EPRO_AM_ACT_ID3_DWLEN (1) +union epro_am_act_id3_u { + struct epro_am_act_id3 { + u32 outer_sport_mdf:6; /* [5:0] Default:0x30 RW */ + u32 rsv3:2; /* [7:6] Default:0x0 RO */ + u32 pri_mdf:6; /* [13:8] Default:0x15 RW */ + u32 rsv2:2; /* [15:14] Default:0x0 RO */ + u32 dp_hash0:6; /* [21:16] Default:0x13 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 dp_hash1:6; /* [29:24] Default:0x14 RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_AM_ACT_ID3_DWLEN]; +} __packed; + +#define NBL_EPRO_ACTION_PRIORITY_ADDR (0xe74230) +#define NBL_EPRO_ACTION_PRIORITY_DEPTH (1) +#define NBL_EPRO_ACTION_PRIORITY_WIDTH (32) +#define NBL_EPRO_ACTION_PRIORITY_DWLEN (1) +union epro_action_priority_u { + struct epro_action_priority { + u32 mirroridx:2; /* [1:0] Default:0x0 RW */ + u32 car:2; /* [3:2] Default:0x0 RW */ + u32 dqueue:2; /* [5:4] Default:0x0 RW */ + u32 dport:2; /* [7:6] Default:0x0 RW */ + u32 pop_8021q:2; /* [9:8] Default:0x0 RW */ + u32 pop_qinq:2; /* [11:10] Default:0x0 RW */ + u32 replace_inner_vlan:2; /* [13:12] Default:0x0 RW */ + u32 replace_outer_vlan:2; /* [15:14] Default:0x0 RW */ + u32 push_inner_vlan:2; /* [17:16] Default:0x0 RW */ + u32 push_outer_vlan:2; /* [19:18] Default:0x0 RW */ + u32 outer_sport_mdf:2; /* [21:20] Default:0x0 RW */ + u32 pri_mdf:2; /* [23:22] Default:0x0 RW */ + u32 dp_hash0:2; /* [25:24] Default:0x0 RW */ + u32 dp_hash1:2; /* [27:26] Default:0x0 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_ACTION_PRIORITY_DWLEN]; +} __packed; + +#define NBL_EPRO_MIRROR_ACTION_PRIORITY_ADDR (0xe74234) +#define NBL_EPRO_MIRROR_ACTION_PRIORITY_DEPTH (1) +#define NBL_EPRO_MIRROR_ACTION_PRIORITY_WIDTH (32) +#define NBL_EPRO_MIRROR_ACTION_PRIORITY_DWLEN (1) +union epro_mirror_action_priority_u { + struct epro_mirror_action_priority { + u32 car:2; /* [1:0] Default:0x0 RW */ + u32 dqueue:2; /* [3:2] Default:0x0 RW */ + u32 dport:2; /* [5:4] Default:0x0 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_MIRROR_ACTION_PRIORITY_DWLEN]; +} __packed; + +#define NBL_EPRO_SET_FLAGS_ADDR (0xe74238) +#define NBL_EPRO_SET_FLAGS_DEPTH (1) +#define NBL_EPRO_SET_FLAGS_WIDTH (32) +#define NBL_EPRO_SET_FLAGS_DWLEN (1) +union epro_set_flags_u { + struct epro_set_flags { + u32 set_flags:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_EPRO_SET_FLAGS_DWLEN]; +} __packed; + +#define NBL_EPRO_CLEAR_FLAGS_ADDR (0xe7423c) +#define NBL_EPRO_CLEAR_FLAGS_DEPTH (1) +#define NBL_EPRO_CLEAR_FLAGS_WIDTH (32) +#define NBL_EPRO_CLEAR_FLAGS_DWLEN (1) +union epro_clear_flags_u { + struct epro_clear_flags { + u32 clear_flags:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_EPRO_CLEAR_FLAGS_DWLEN]; +} __packed; + +#define NBL_EPRO_RSS_SK_ADDR (0xe74400) +#define NBL_EPRO_RSS_SK_DEPTH (1) +#define NBL_EPRO_RSS_SK_WIDTH (320) +#define NBL_EPRO_RSS_SK_DWLEN (10) +union epro_rss_sk_u { + struct epro_rss_sk { + u32 sk_arr[10]; /* [319:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_EPRO_RSS_SK_DWLEN]; +} __packed; + +#define NBL_EPRO_VXLAN_SP_ADDR (0xe74500) +#define NBL_EPRO_VXLAN_SP_DEPTH (1) +#define NBL_EPRO_VXLAN_SP_WIDTH (32) +#define NBL_EPRO_VXLAN_SP_DWLEN (1) +union epro_vxlan_sp_u { + struct epro_vxlan_sp { + u32 vxlan_tnl_sp_min:16; /* [15:0] Default:0x8000 RW */ + u32 vxlan_tnl_sp_max:16; /* [31:16] Default:0xee48 RW */ + } __packed info; + u32 data[NBL_EPRO_VXLAN_SP_DWLEN]; +} __packed; + +#define NBL_EPRO_LOOP_SCH_COS_DEFAULT_ADDR (0xe74600) +#define NBL_EPRO_LOOP_SCH_COS_DEFAULT_DEPTH (1) +#define NBL_EPRO_LOOP_SCH_COS_DEFAULT_WIDTH (32) +#define NBL_EPRO_LOOP_SCH_COS_DEFAULT_DWLEN (1) +union epro_loop_sch_cos_default_u { + struct epro_loop_sch_cos_default { + u32 sch_cos:3; /* [2:0] Default:0x0 RW */ + u32 pfc_mode:1; /* [3] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_LOOP_SCH_COS_DEFAULT_DWLEN]; +} __packed; + +#define NBL_EPRO_MIRROR_PKT_COS_DEFAULT_ADDR (0xe74604) +#define NBL_EPRO_MIRROR_PKT_COS_DEFAULT_DEPTH (1) +#define NBL_EPRO_MIRROR_PKT_COS_DEFAULT_WIDTH (32) +#define NBL_EPRO_MIRROR_PKT_COS_DEFAULT_DWLEN (1) +union epro_mirror_pkt_cos_default_u { + struct epro_mirror_pkt_cos_default { + u32 pkt_cos:3; /* [2:0] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_MIRROR_PKT_COS_DEFAULT_DWLEN]; +} __packed; + +#define NBL_EPRO_NO_DPORT_REDIRECT_ADDR (0xe7463c) +#define NBL_EPRO_NO_DPORT_REDIRECT_DEPTH (1) +#define NBL_EPRO_NO_DPORT_REDIRECT_WIDTH (32) +#define NBL_EPRO_NO_DPORT_REDIRECT_DWLEN (1) +union epro_no_dport_redirect_u { + struct epro_no_dport_redirect { + u32 dport:16; /* [15:0] Default:0x0 RW */ + u32 dqueue:11; /* [26:16] Default:0x0 RW */ + u32 dqueue_en:1; /* [27] Default:0x0 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_NO_DPORT_REDIRECT_DWLEN]; +} __packed; + +#define NBL_EPRO_SCH_COS_MAP_ETH0_ADDR (0xe74640) +#define NBL_EPRO_SCH_COS_MAP_ETH0_DEPTH (8) +#define NBL_EPRO_SCH_COS_MAP_ETH0_WIDTH (32) +#define NBL_EPRO_SCH_COS_MAP_ETH0_DWLEN (1) +union epro_sch_cos_map_eth0_u { + struct epro_sch_cos_map_eth0 { + u32 pkt_cos:3; /* [2:0] Default:0x0 RW */ + u32 dscp:6; /* [8:3] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_SCH_COS_MAP_ETH0_DWLEN]; +} __packed; +#define NBL_EPRO_SCH_COS_MAP_ETH0_REG(r) (NBL_EPRO_SCH_COS_MAP_ETH0_ADDR + \ + (NBL_EPRO_SCH_COS_MAP_ETH0_DWLEN * 4) * (r)) + +#define NBL_EPRO_SCH_COS_MAP_ETH1_ADDR (0xe74660) +#define NBL_EPRO_SCH_COS_MAP_ETH1_DEPTH (8) +#define NBL_EPRO_SCH_COS_MAP_ETH1_WIDTH (32) +#define NBL_EPRO_SCH_COS_MAP_ETH1_DWLEN (1) +union epro_sch_cos_map_eth1_u { + struct epro_sch_cos_map_eth1 { + u32 pkt_cos:3; /* [2:0] Default:0x0 RW */ + u32 dscp:6; /* [8:3] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_SCH_COS_MAP_ETH1_DWLEN]; +} __packed; +#define NBL_EPRO_SCH_COS_MAP_ETH1_REG(r) (NBL_EPRO_SCH_COS_MAP_ETH1_ADDR + \ + (NBL_EPRO_SCH_COS_MAP_ETH1_DWLEN * 4) * (r)) + +#define NBL_EPRO_SCH_COS_MAP_ETH2_ADDR (0xe74680) +#define NBL_EPRO_SCH_COS_MAP_ETH2_DEPTH (8) +#define NBL_EPRO_SCH_COS_MAP_ETH2_WIDTH (32) +#define NBL_EPRO_SCH_COS_MAP_ETH2_DWLEN (1) +union epro_sch_cos_map_eth2_u { + struct epro_sch_cos_map_eth2 { + u32 pkt_cos:3; /* [2:0] Default:0x0 RW */ + u32 dscp:6; /* [8:3] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_SCH_COS_MAP_ETH2_DWLEN]; +} __packed; +#define NBL_EPRO_SCH_COS_MAP_ETH2_REG(r) (NBL_EPRO_SCH_COS_MAP_ETH2_ADDR + \ + (NBL_EPRO_SCH_COS_MAP_ETH2_DWLEN * 4) * (r)) + +#define NBL_EPRO_SCH_COS_MAP_ETH3_ADDR (0xe746a0) +#define NBL_EPRO_SCH_COS_MAP_ETH3_DEPTH (8) +#define NBL_EPRO_SCH_COS_MAP_ETH3_WIDTH (32) +#define NBL_EPRO_SCH_COS_MAP_ETH3_DWLEN (1) +union epro_sch_cos_map_eth3_u { + struct epro_sch_cos_map_eth3 { + u32 pkt_cos:3; /* [2:0] Default:0x0 RW */ + u32 dscp:6; /* [8:3] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_SCH_COS_MAP_ETH3_DWLEN]; +} __packed; +#define NBL_EPRO_SCH_COS_MAP_ETH3_REG(r) (NBL_EPRO_SCH_COS_MAP_ETH3_ADDR + \ + (NBL_EPRO_SCH_COS_MAP_ETH3_DWLEN * 4) * (r)) + +#define NBL_EPRO_SCH_COS_MAP_LOOP_ADDR (0xe746c0) +#define NBL_EPRO_SCH_COS_MAP_LOOP_DEPTH (8) +#define NBL_EPRO_SCH_COS_MAP_LOOP_WIDTH (32) +#define NBL_EPRO_SCH_COS_MAP_LOOP_DWLEN (1) +union epro_sch_cos_map_loop_u { + struct epro_sch_cos_map_loop { + u32 pkt_cos:3; /* [2:0] Default:0x0 RW */ + u32 dscp:6; /* [8:3] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_SCH_COS_MAP_LOOP_DWLEN]; +} __packed; +#define NBL_EPRO_SCH_COS_MAP_LOOP_REG(r) (NBL_EPRO_SCH_COS_MAP_LOOP_ADDR + \ + (NBL_EPRO_SCH_COS_MAP_LOOP_DWLEN * 4) * (r)) + +#define NBL_EPRO_PORT_PRI_MDF_EN_ADDR (0xe746e0) +#define NBL_EPRO_PORT_PRI_MDF_EN_DEPTH (1) +#define NBL_EPRO_PORT_PRI_MDF_EN_WIDTH (32) +#define NBL_EPRO_PORT_PRI_MDF_EN_DWLEN (1) +union epro_port_pri_mdf_en_u { + struct epro_port_pri_mdf_en { + u32 eth0:1; /* [0] Default:0x0 RW */ + u32 eth1:1; /* [1] Default:0x0 RW */ + u32 eth2:1; /* [2] Default:0x0 RW */ + u32 eth3:1; /* [3] Default:0x0 RW */ + u32 loop:1; /* [4] Default:0x0 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_PORT_PRI_MDF_EN_DWLEN]; +} __packed; + +#define NBL_EPRO_CFG_TEST_ADDR (0xe7480c) +#define NBL_EPRO_CFG_TEST_DEPTH (1) +#define NBL_EPRO_CFG_TEST_WIDTH (32) +#define NBL_EPRO_CFG_TEST_DWLEN (1) +union epro_cfg_test_u { + struct epro_cfg_test { + u32 test:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_EPRO_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_EPRO_BP_STATE_ADDR (0xe74b00) +#define NBL_EPRO_BP_STATE_DEPTH (1) +#define NBL_EPRO_BP_STATE_WIDTH (32) +#define NBL_EPRO_BP_STATE_DWLEN (1) +union epro_bp_state_u { + struct epro_bp_state { + u32 in_bp:1; /* [0] Default:0x0 RO */ + u32 out_bp:1; /* [1] Default:0x0 RO */ + u32 inter_bp:1; /* [2] Default:0x0 RO */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_BP_STATE_DWLEN]; +} __packed; + +#define NBL_EPRO_BP_HISTORY_ADDR (0xe74b04) +#define NBL_EPRO_BP_HISTORY_DEPTH (1) +#define NBL_EPRO_BP_HISTORY_WIDTH (32) +#define NBL_EPRO_BP_HISTORY_DWLEN (1) +union epro_bp_history_u { + struct epro_bp_history { + u32 in_bp:1; /* [0] Default:0x0 RC */ + u32 out_bp:1; /* [1] Default:0x0 RC */ + u32 inter_bp:1; /* [2] Default:0x0 RC */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_EPRO_MT_ADDR (0xe75400) +#define NBL_EPRO_MT_DEPTH (16) +#define NBL_EPRO_MT_WIDTH (64) +#define NBL_EPRO_MT_DWLEN (2) +#define NBL_EPRO_MT_MAX (8) +union epro_mt_u { + struct epro_mt { + u32 dport:16; /* [15:0] Default:0x0 RW */ + u32 dqueue:11; /* [26:16] Default:0x0 RW */ + u32 car_en:1; /* [27] Default:0x0 RW */ + u32 car_id:10; /* [37:28] Default:0x0 RW */ + u32 vld:1; /* [38] Default:0x0 RW */ + u32 rsv:25; /* [63:39] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_MT_DWLEN]; +} __packed; +#define NBL_EPRO_MT_REG(r) (NBL_EPRO_MT_ADDR + \ + (NBL_EPRO_MT_DWLEN * 4) * (r)) + +#define NBL_EPRO_KG_TCAM_ADDR (0xe75480) +#define NBL_EPRO_KG_TCAM_DEPTH (16) +#define NBL_EPRO_KG_TCAM_WIDTH (64) +#define NBL_EPRO_KG_TCAM_DWLEN (2) +union epro_kg_tcam_u { + struct epro_kg_tcam { + u32 mask:16; /* [15:0] Default:0x0 RW */ + u32 data:16; /* [31:16] Default:0x0 RW */ + u32 valid_bit:1; /* [32] Default:0x0 RW */ + u32 rsv:31; /* [63:33] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_KG_TCAM_DWLEN]; +} __packed; +#define NBL_EPRO_KG_TCAM_REG(r) (NBL_EPRO_KG_TCAM_ADDR + \ + (NBL_EPRO_KG_TCAM_DWLEN * 4) * (r)) + +#define NBL_EPRO_VPT_ADDR (0xe78000) +#define NBL_EPRO_VPT_DEPTH (1024) +#define NBL_EPRO_VPT_WIDTH (64) +#define NBL_EPRO_VPT_DWLEN (2) +union epro_vpt_u { + struct epro_vpt { + u32 cvlan:16; /* [15:0] Default:0x0 RW */ + u32 svlan:16; /* [31:16] Default:0x0 RW */ + u32 fwd:1; /* [32] Default:0x0 RW */ + u32 mirror_en:1; /* [33] Default:0x0 RW */ + u32 mirror_id:4; /* [37:34] Default:0x0 RW */ + u32 car_en:1; /* [38] Default:0x0 RW */ + u32 car_id:10; /* [48:39] Default:0x0 RW */ + u32 pop_vlan:2; /* [50:49] Default:0x0 RW */ + u32 push_vlan:2; /* [52:51] Default:0x0 RW */ + u32 replace_vlan:2; /* [54:53] Default:0x0 RW */ + u32 rss_alg_sel:1; /* [55] Default:0x0 RW */ + u32 rss_key_type_btm:2; /* [57:56] Default:0x0 RW */ + u32 vld:1; /* [58] Default:0x0 RW */ + u32 rsv:5; /* [63:59] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_VPT_DWLEN]; +} __packed; +#define NBL_EPRO_VPT_REG(r) (NBL_EPRO_VPT_ADDR + \ + (NBL_EPRO_VPT_DWLEN * 4) * (r)) + +#define NBL_EPRO_EPT_ADDR (0xe75800) +#define NBL_EPRO_EPT_DEPTH (8) +#define NBL_EPRO_EPT_WIDTH (64) +#define NBL_EPRO_EPT_DWLEN (2) +union epro_ept_u { + struct epro_ept { + u32 cvlan:16; /* [15:0] Default:0x0 RW */ + u32 svlan:16; /* [31:16] Default:0x0 RW */ + u32 fwd:1; /* [32] Default:0x0 RW */ + u32 mirror_en:1; /* [33] Default:0x0 RW */ + u32 mirror_id:4; /* [37:34] Default:0x0 RW */ + u32 pop_vlan:2; /* [39:38] Default:0x0 RW */ + u32 push_vlan:2; /* [41:40] Default:0x0 RW */ + u32 replace_vlan:2; /* [43:42] Default:0x0 RW */ + u32 lag_alg_sel:2; /* [45:44] Default:0x0 RW */ + u32 lag_port_btm:4; /* [49:46] Default:0x0 RW */ + u32 lag_l2_protect_en:1; /* [50] Default:0x0 RW */ + u32 pfc_sch_cos_default:3; /* [53:51] Default:0x0 RW */ + u32 pfc_mode:1; /* [54] Default:0x0 RW */ + u32 vld:1; /* [55] Default:0x0 RW */ + u32 rsv:8; /* [63:56] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_EPT_DWLEN]; +} __packed; +#define NBL_EPRO_EPT_REG(r) (NBL_EPRO_EPT_ADDR + \ + (NBL_EPRO_EPT_DWLEN * 4) * (r)) + +#define NBL_EPRO_AFT_ADDR (0xe75900) +#define NBL_EPRO_AFT_DEPTH (16) +#define NBL_EPRO_AFT_WIDTH (64) +#define NBL_EPRO_AFT_DWLEN (2) +union epro_aft_u { + struct epro_aft { + u32 action_filter_btm_arr[2]; /* [63:0] Default:0x0 RW */ + } __packed info; + u64 data; +} __packed; +#define NBL_EPRO_AFT_REG(r) (NBL_EPRO_AFT_ADDR + \ + (NBL_EPRO_AFT_DWLEN * 4) * (r)) + +#define NBL_EPRO_RSS_PT_ADDR (0xe76000) +#define NBL_EPRO_RSS_PT_DEPTH (1024) +#define NBL_EPRO_RSS_PT_WIDTH (64) +#define NBL_EPRO_RSS_PT_DWLEN (2) +union epro_rss_pt_u { + struct epro_rss_pt { + u32 entry_size:3; /* [2:0] Default:0x0 RW */ + u32 offset1:14; /* [16:3] Default:0x0 RW */ + u32 offset1_vld:1; /* [17:17] Default:0x0 RW */ + u32 offset0:14; /* [31:18] Default:0x0 RW */ + u32 offset0_vld:1; /* [32] Default:0x0 RW */ + u32 vld:1; /* [33] Default:0x0 RW */ + u32 rsv:30; /* [63:34] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_RSS_PT_DWLEN]; +} __packed; +#define NBL_EPRO_RSS_PT_REG(r) (NBL_EPRO_RSS_PT_ADDR + \ + (NBL_EPRO_RSS_PT_DWLEN * 4) * (r)) + +#define NBL_EPRO_ECPVPT_ADDR (0xe7a000) +#define NBL_EPRO_ECPVPT_DEPTH (256) +#define NBL_EPRO_ECPVPT_WIDTH (32) +#define NBL_EPRO_ECPVPT_DWLEN (1) +union epro_ecpvpt_u { + struct epro_ecpvpt { + u32 encap_cvlan_vld0:1; /* [0] Default:0x0 RW */ + u32 encap_svlan_vld0:1; /* [1] Default:0x0 RW */ + u32 encap_vlan_vld1_15:30; /* [31:2] Default:0x0 RW */ + } __packed info; + u32 data[NBL_EPRO_ECPVPT_DWLEN]; +} __packed; +#define NBL_EPRO_ECPVPT_REG(r) (NBL_EPRO_ECPVPT_ADDR + \ + (NBL_EPRO_ECPVPT_DWLEN * 4) * (r)) + +#define NBL_EPRO_ECPIPT_ADDR (0xe7b000) +#define NBL_EPRO_ECPIPT_DEPTH (128) +#define NBL_EPRO_ECPIPT_WIDTH (32) +#define NBL_EPRO_ECPIPT_DWLEN (1) +union epro_ecpipt_u { + struct epro_ecpipt { + u32 encap_ip_type0:1; /* [0] Default:0x0 RW */ + u32 encap_ip_type1_31:31; /* [31:1] Default:0x0 RW */ + } __packed info; + u32 data[NBL_EPRO_ECPIPT_DWLEN]; +} __packed; +#define NBL_EPRO_ECPIPT_REG(r) (NBL_EPRO_ECPIPT_ADDR + \ + (NBL_EPRO_ECPIPT_DWLEN * 4) * (r)) + +#define NBL_EPRO_RSS_RET_ADDR (0xe7c000) +#define NBL_EPRO_RSS_RET_DEPTH (8192) +#define NBL_EPRO_RSS_RET_WIDTH (32) +#define NBL_EPRO_RSS_RET_DWLEN (1) +union epro_rss_ret_u { + struct epro_rss_ret { + u32 dqueue0:11; /* [10:0] Default:0x0 RW */ + u32 vld0:1; /* [11] Default:0x0 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 dqueue1:11; /* [26:16] Default:0x0 RW */ + u32 vld1:1; /* [27] Default:0x0 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_RSS_RET_DWLEN]; +} __packed; +#define NBL_EPRO_RSS_RET_REG(r) (NBL_EPRO_RSS_RET_ADDR + \ + (NBL_EPRO_RSS_RET_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_fem.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_fem.h new file mode 100644 index 0000000000000000000000000000000000000000..37fe59d6ad8a3f16598802c650e66ee1478b958f --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_fem.h @@ -0,0 +1,1485 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_FEM_H +#define NBL_FEM_H 1 + +#include + +#define NBL_FEM_BASE (0x00A04000) + +#define NBL_FEM_INT_STATUS_ADDR (0xa04000) +#define NBL_FEM_INT_STATUS_DEPTH (1) +#define NBL_FEM_INT_STATUS_WIDTH (32) +#define NBL_FEM_INT_STATUS_DWLEN (1) +union fem_int_status_u { + struct fem_int_status { + u32 rsv3:2; /* [01:00] Default:0x0 RO */ + u32 fifo_ovf_err:1; /* [02:02] Default:0x0 RWC */ + u32 fifo_udf_err:1; /* [03:03] Default:0x0 RWC */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 bank_cflt_err:1; /* [08:08] Default:0x0 RWC */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_FEM_INT_MASK_ADDR (0xa04004) +#define NBL_FEM_INT_MASK_DEPTH (1) +#define NBL_FEM_INT_MASK_WIDTH (32) +#define NBL_FEM_INT_MASK_DWLEN (1) +union fem_int_mask_u { + struct fem_int_mask { + u32 rsv3:2; /* [01:00] Default:0x0 RO */ + u32 fifo_ovf_err:1; /* [02:02] Default:0x0 RW */ + u32 fifo_udf_err:1; /* [03:03] Default:0x0 RW */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RW */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 bank_cflt_err:1; /* [08:08] Default:0x0 RW */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_INT_MASK_DWLEN]; +} __packed; + +#define NBL_FEM_INT_SET_ADDR (0xa04008) +#define NBL_FEM_INT_SET_DEPTH (1) +#define NBL_FEM_INT_SET_WIDTH (32) +#define NBL_FEM_INT_SET_DWLEN (1) +union fem_int_set_u { + struct fem_int_set { + u32 rsv3:2; /* [01:00] Default:0x0 RO */ + u32 fifo_ovf_err:1; /* [02:02] Default:0x0 WO */ + u32 fifo_udf_err:1; /* [03:03] Default:0x0 WO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 WO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 bank_cflt_err:1; /* [08:08] Default:0x0 WO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_INT_SET_DWLEN]; +} __packed; + +#define NBL_FEM_INIT_DONE_ADDR (0xa0400c) +#define NBL_FEM_INIT_DONE_DEPTH (1) +#define NBL_FEM_INIT_DONE_WIDTH (32) +#define NBL_FEM_INIT_DONE_DWLEN (1) +union fem_init_done_u { + struct fem_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_FEM_CIF_ERR_INFO_ADDR (0xa04040) +#define NBL_FEM_CIF_ERR_INFO_DEPTH (1) +#define NBL_FEM_CIF_ERR_INFO_WIDTH (32) +#define NBL_FEM_CIF_ERR_INFO_DWLEN (1) +union fem_cif_err_info_u { + struct fem_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_FEM_CFG_ERR_INFO_ADDR (0xa04068) +#define NBL_FEM_CFG_ERR_INFO_DEPTH (1) +#define NBL_FEM_CFG_ERR_INFO_WIDTH (32) +#define NBL_FEM_CFG_ERR_INFO_DWLEN (1) +union fem_cfg_err_info_u { + struct fem_cfg_err_info { + u32 addr:24; /* [23:00] Default:0x0 RO */ + u32 id:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_FEM_BANK_CFLT_ERR_INFO0_ADDR (0xa04074) +#define NBL_FEM_BANK_CFLT_ERR_INFO0_DEPTH (1) +#define NBL_FEM_BANK_CFLT_ERR_INFO0_WIDTH (32) +#define NBL_FEM_BANK_CFLT_ERR_INFO0_DWLEN (1) +union fem_bank_cflt_err_info0_u { + struct fem_bank_cflt_err_info0 { + u32 addr0:24; /* [23:00] Default:0x0 RO */ + u32 id:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_BANK_CFLT_ERR_INFO0_DWLEN]; +} __packed; + +#define NBL_FEM_BANK_CFLT_ERR_INFO1_ADDR (0xa04078) +#define NBL_FEM_BANK_CFLT_ERR_INFO1_DEPTH (1) +#define NBL_FEM_BANK_CFLT_ERR_INFO1_WIDTH (32) +#define NBL_FEM_BANK_CFLT_ERR_INFO1_DWLEN (1) +union fem_bank_cflt_err_info1_u { + struct fem_bank_cflt_err_info1 { + u32 addr1:24; /* [23:00] Default:0x0 RO */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_BANK_CFLT_ERR_INFO1_DWLEN]; +} __packed; + +#define NBL_FEM_CAR_CTRL_ADDR (0xa04100) +#define NBL_FEM_CAR_CTRL_DEPTH (1) +#define NBL_FEM_CAR_CTRL_WIDTH (32) +#define NBL_FEM_CAR_CTRL_DWLEN (1) +union fem_car_ctrl_u { + struct fem_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_BP_TH_ADDR (0xa04118) +#define NBL_FEM_BP_TH_DEPTH (1) +#define NBL_FEM_BP_TH_WIDTH (32) +#define NBL_FEM_BP_TH_DWLEN (1) +union fem_bp_th_u { + struct fem_bp_th { + u32 th:12; /* [11:00] Default:0xf RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_BP_TH_DWLEN]; +} __packed; + +#define NBL_FEM_HT_BANK_SEL_BTM_ADDR (0xa0411c) +#define NBL_FEM_HT_BANK_SEL_BTM_DEPTH (1) +#define NBL_FEM_HT_BANK_SEL_BTM_WIDTH (32) +#define NBL_FEM_HT_BANK_SEL_BTM_DWLEN (1) +union fem_ht_bank_sel_btm_u { + struct fem_ht_bank_sel_btm { + u32 port0_ht_depth:5; /* [04:00] Default:0x8 RW */ + u32 rsv2:3; /* [07:05] Default:0x0 RO */ + u32 port1_ht_depth:5; /* [12:08] Default:0x8 RW */ + u32 rsv1:3; /* [15:13] Default:0x0 RO */ + u32 port2_ht_depth:5; /* [20:16] Default:0x8 RW */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_HT_BANK_SEL_BTM_DWLEN]; +} __packed; + +#define NBL_FEM_INIT_START_ADDR (0xa04180) +#define NBL_FEM_INIT_START_DEPTH (1) +#define NBL_FEM_INIT_START_WIDTH (32) +#define NBL_FEM_INIT_START_DWLEN (1) +union fem_init_start_u { + struct fem_init_start { + u32 start:1; /* [00:00] Default:0x0 WO */ + u32 ht_bank_init:7; /* [07:01] Default:0x0 WO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_INIT_START_DWLEN]; +} __packed; + +#define NBL_FEM_MHASH_ADDR (0xa04188) +#define NBL_FEM_MHASH_DEPTH (1) +#define NBL_FEM_MHASH_WIDTH (32) +#define NBL_FEM_MHASH_DWLEN (1) +union fem_mhash_u { + struct fem_mhash { + u32 mod_action_id:6; /* [05:00] Default:0x12 RW */ + u32 hash0_action_id:6; /* [11:06] Default:0x13 RW */ + u32 hash1_action_id:6; /* [17:12] Default:0x14 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_MHASH_DWLEN]; +} __packed; + +#define NBL_FEM_CPU_ACCESS_CFG_ADDR (0xa04190) +#define NBL_FEM_CPU_ACCESS_CFG_DEPTH (1) +#define NBL_FEM_CPU_ACCESS_CFG_WIDTH (32) +#define NBL_FEM_CPU_ACCESS_CFG_DWLEN (1) +union fem_cpu_access_cfg_u { + struct fem_cpu_access_cfg { + u32 cpu_access_bp_th:8; /* [7:0] Default:0xf RW */ + u32 rsv1:8; /* [15:8] Default:0x0 RO */ + u32 cpu_access_timeout_th:10; /* [25:16] Default:0x50 RW */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CPU_ACCESS_CFG_DWLEN]; +} __packed; + +#define NBL_FEM_HT_BANK_SEL_BITMAP_ADDR (0xa04200) +#define NBL_FEM_HT_BANK_SEL_BITMAP_DEPTH (1) +#define NBL_FEM_HT_BANK_SEL_BITMAP_WIDTH (32) +#define NBL_FEM_HT_BANK_SEL_BITMAP_DWLEN (1) +union fem_ht_bank_sel_bitmap_u { + struct fem_ht_bank_sel_bitmap { + u32 port0_bank_sel:8; /* [7:0] Default:0x1 RW */ + u32 port1_bank_sel:8; /* [15:8] Default:0x6 RW */ + u32 port2_bank_sel:8; /* [23:16] Default:0x78 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_HT_BANK_SEL_BITMAP_DWLEN]; +} __packed; + +#define NBL_FEM_KT_BANK_SEL_BITMAP_ADDR (0xa04204) +#define NBL_FEM_KT_BANK_SEL_BITMAP_DEPTH (1) +#define NBL_FEM_KT_BANK_SEL_BITMAP_WIDTH (32) +#define NBL_FEM_KT_BANK_SEL_BITMAP_DWLEN (1) +union fem_kt_bank_sel_bitmap_u { + struct fem_kt_bank_sel_bitmap { + u32 port0_bank_sel:8; /* [7:0] Default:0x1 RW */ + u32 port1_bank_sel:8; /* [15:8] Default:0x6 RW */ + u32 port2_bank_sel:8; /* [23:16] Default:0xF8 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_KT_BANK_SEL_BITMAP_DWLEN]; +} __packed; + +#define NBL_FEM_AT_BANK_SEL_BITMAP_ADDR (0xa04208) +#define NBL_FEM_AT_BANK_SEL_BITMAP_DEPTH (1) +#define NBL_FEM_AT_BANK_SEL_BITMAP_WIDTH (32) +#define NBL_FEM_AT_BANK_SEL_BITMAP_DWLEN (1) +union fem_at_bank_sel_bitmap_u { + struct fem_at_bank_sel_bitmap { + u32 port0_bank_sel:12; /* [11:0] Default:0x3 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 port1_bank_sel:12; /* [27:16] Default:0x1C RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_AT_BANK_SEL_BITMAP_DWLEN]; +} __packed; + +#define NBL_FEM_AT_BANK_SEL_BITMAP2_ADDR (0xa0420c) +#define NBL_FEM_AT_BANK_SEL_BITMAP2_DEPTH (1) +#define NBL_FEM_AT_BANK_SEL_BITMAP2_WIDTH (32) +#define NBL_FEM_AT_BANK_SEL_BITMAP2_DWLEN (1) +union fem_at_bank_sel_bitmap2_u { + struct fem_at_bank_sel_bitmap2 { + u32 port2_bank_sel:12; /* [11:0] Default:0xFE0 RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_AT_BANK_SEL_BITMAP2_DWLEN]; +} __packed; + +#define NBL_FEM_AGE_EN_ADDR (0xa04210) +#define NBL_FEM_AGE_EN_DEPTH (1) +#define NBL_FEM_AGE_EN_WIDTH (32) +#define NBL_FEM_AGE_EN_DWLEN (1) +union fem_age_en_u { + struct fem_age_en { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_AGE_EN_DWLEN]; +} __packed; + +#define NBL_FEM_AGE_HARD_STEP_ADDR (0xa04214) +#define NBL_FEM_AGE_HARD_STEP_DEPTH (1) +#define NBL_FEM_AGE_HARD_STEP_WIDTH (32) +#define NBL_FEM_AGE_HARD_STEP_DWLEN (1) +union fem_age_hard_step_u { + struct fem_age_hard_step { + u32 data:3; /* [2:0] Default:0x6 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_AGE_HARD_STEP_DWLEN]; +} __packed; + +#define NBL_FEM_AGE_TIME_UNIT_ADDR (0xa04218) +#define NBL_FEM_AGE_TIME_UNIT_DEPTH (1) +#define NBL_FEM_AGE_TIME_UNIT_WIDTH (32) +#define NBL_FEM_AGE_TIME_UNIT_DWLEN (1) +union fem_age_time_unit_u { + struct fem_age_time_unit { + u32 data:32; /* [31:0] Default:0x17CB5 RW */ + } __packed info; + u32 data[NBL_FEM_AGE_TIME_UNIT_DWLEN]; +} __packed; + +#define NBL_FEM_AGE_INFO_HEAD_ADDR (0xa04220) +#define NBL_FEM_AGE_INFO_HEAD_DEPTH (1) +#define NBL_FEM_AGE_INFO_HEAD_WIDTH (32) +#define NBL_FEM_AGE_INFO_HEAD_DWLEN (1) +union fem_age_info_head_u { + struct fem_age_info_head { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_FEM_AGE_INFO_HEAD_DWLEN]; +} __packed; + +#define NBL_FEM_KEY_IN_ADDR (0xa04240) +#define NBL_FEM_KEY_IN_DEPTH (1) +#define NBL_FEM_KEY_IN_WIDTH (32) +#define NBL_FEM_KEY_IN_DWLEN (1) +union fem_key_in_u { + struct fem_key_in { + u32 em0_cap_mode:1; /* [0:0] Default:0x1 RW */ + u32 em1_cap_mode:1; /* [01:01] Default:0x1 RW */ + u32 em2_cap_mode:1; /* [02:02] Default:0x1 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_KEY_IN_DWLEN]; +} __packed; + +#define NBL_FEM_CAP_ADDR (0xa04244) +#define NBL_FEM_CAP_DEPTH (1) +#define NBL_FEM_CAP_WIDTH (32) +#define NBL_FEM_CAP_DWLEN (1) +union fem_cap_u { + struct fem_cap { + u32 em0_cap_start:1; /* [0:0] Default:0x0 WO */ + u32 em1_cap_start:1; /* [01:01] Default:0x0 WO */ + u32 em2_cap_start:1; /* [02:02] Default:0x0 WO */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CAP_DWLEN]; +} __packed; + +#define NBL_FEM_HT_ACCESS_CTRL_ADDR (0xa04300) +#define NBL_FEM_HT_ACCESS_CTRL_DEPTH (1) +#define NBL_FEM_HT_ACCESS_CTRL_WIDTH (32) +#define NBL_FEM_HT_ACCESS_CTRL_DWLEN (1) +union fem_ht_access_ctrl_u { + struct fem_ht_access_ctrl { + u32 addr:17; /* [16:00] Default:0x0 RW */ + u32 port:2; /* [18:17] Default:0x0 RW */ + u32 rsv:10; /* [28:19] Default:0x0 RO */ + u32 access_size:1; /* [29:29] Default:0x0 RW */ + u32 rw:1; /* [30:30] Default:0x0 RW */ + u32 start:1; /* [31:31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_FEM_HT_ACCESS_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_HT_ACCESS_ACK_ADDR (0xa04304) +#define NBL_FEM_HT_ACCESS_ACK_DEPTH (1) +#define NBL_FEM_HT_ACCESS_ACK_WIDTH (32) +#define NBL_FEM_HT_ACCESS_ACK_DWLEN (1) +union fem_ht_access_ack_u { + struct fem_ht_access_ack { + u32 done:1; /* [00:00] Default:0x0 RC */ + u32 status:1; /* [01:01] Default:0x0 RWW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_HT_ACCESS_ACK_DWLEN]; +} __packed; + +#define NBL_FEM_HT_ACCESS_DATA_ADDR (0xa04308) +#define NBL_FEM_HT_ACCESS_DATA_DEPTH (4) +#define NBL_FEM_HT_ACCESS_DATA_WIDTH (32) +#define NBL_FEM_HT_ACCESS_DATA_DWLEN (1) +union fem_ht_access_data_u { + struct fem_ht_access_data { + u32 data:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_FEM_HT_ACCESS_DATA_DWLEN]; +} __packed; +#define NBL_FEM_HT_ACCESS_DATA_REG(r) (NBL_FEM_HT_ACCESS_DATA_ADDR + \ + (NBL_FEM_HT_ACCESS_DATA_DWLEN * 4) * (r)) + +#define NBL_FEM_KT_ACCESS_CTRL_ADDR (0xa04340) +#define NBL_FEM_KT_ACCESS_CTRL_DEPTH (1) +#define NBL_FEM_KT_ACCESS_CTRL_WIDTH (32) +#define NBL_FEM_KT_ACCESS_CTRL_DWLEN (1) +union fem_kt_access_ctrl_u { + struct fem_kt_access_ctrl { + u32 addr:17; /* [16:00] Default:0x0 RW */ + u32 rsv:12; /* [28:17] Default:0x0 RO */ + u32 access_size:1; /* [29:29] Default:0x0 RW */ + u32 rw:1; /* [30:30] Default:0x0 RW */ + u32 start:1; /* [31:31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_FEM_KT_ACCESS_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_KT_ACCESS_ACK_ADDR (0xa04344) +#define NBL_FEM_KT_ACCESS_ACK_DEPTH (1) +#define NBL_FEM_KT_ACCESS_ACK_WIDTH (32) +#define NBL_FEM_KT_ACCESS_ACK_DWLEN (1) +union fem_kt_access_ack_u { + struct fem_kt_access_ack { + u32 done:1; /* [00:00] Default:0x0 RC */ + u32 status:1; /* [01:01] Default:0x0 RWW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_KT_ACCESS_ACK_DWLEN]; +} __packed; + +#define NBL_FEM_KT_ACCESS_DATA_ADDR (0xa04348) +#define NBL_FEM_KT_ACCESS_DATA_DEPTH (10) +#define NBL_FEM_KT_ACCESS_DATA_WIDTH (32) +#define NBL_FEM_KT_ACCESS_DATA_DWLEN (1) +union fem_kt_access_data_u { + struct fem_kt_access_data { + u32 data:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_FEM_KT_ACCESS_DATA_DWLEN]; +} __packed; +#define NBL_FEM_KT_ACCESS_DATA_REG(r) (NBL_FEM_KT_ACCESS_DATA_ADDR + \ + (NBL_FEM_KT_ACCESS_DATA_DWLEN * 4) * (r)) + +#define NBL_FEM_AT_ACCESS_CTRL_ADDR (0xa04390) +#define NBL_FEM_AT_ACCESS_CTRL_DEPTH (1) +#define NBL_FEM_AT_ACCESS_CTRL_WIDTH (32) +#define NBL_FEM_AT_ACCESS_CTRL_DWLEN (1) +union fem_at_access_ctrl_u { + struct fem_at_access_ctrl { + u32 addr:17; /* [16:00] Default:0x0 RW */ + u32 rsv:12; /* [28:17] Default:0x0 RO */ + u32 access_size:1; /* [29:29] Default:0x0 RW */ + u32 rw:1; /* [30:30] Default:0x0 RW */ + u32 start:1; /* [31:31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_FEM_AT_ACCESS_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_AT_ACCESS_ACK_ADDR (0xa04394) +#define NBL_FEM_AT_ACCESS_ACK_DEPTH (1) +#define NBL_FEM_AT_ACCESS_ACK_WIDTH (32) +#define NBL_FEM_AT_ACCESS_ACK_DWLEN (1) +union fem_at_access_ack_u { + struct fem_at_access_ack { + u32 done:1; /* [00:00] Default:0x0 RC */ + u32 status:1; /* [01:01] Default:0x0 RWW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_AT_ACCESS_ACK_DWLEN]; +} __packed; + +#define NBL_FEM_AT_ACCESS_DATA_ADDR (0xa04398) +#define NBL_FEM_AT_ACCESS_DATA_DEPTH (6) +#define NBL_FEM_AT_ACCESS_DATA_WIDTH (32) +#define NBL_FEM_AT_ACCESS_DATA_DWLEN (1) +union fem_at_access_data_u { + struct fem_at_access_data { + u32 data:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_FEM_AT_ACCESS_DATA_DWLEN]; +} __packed; +#define NBL_FEM_AT_ACCESS_DATA_REG(r) (NBL_FEM_AT_ACCESS_DATA_ADDR + \ + (NBL_FEM_AT_ACCESS_DATA_DWLEN * 4) * (r)) + +#define NBL_FEM_AGE_TBL_ACCESS_CTRL_ADDR (0xa04400) +#define NBL_FEM_AGE_TBL_ACCESS_CTRL_DEPTH (1) +#define NBL_FEM_AGE_TBL_ACCESS_CTRL_WIDTH (32) +#define NBL_FEM_AGE_TBL_ACCESS_CTRL_DWLEN (1) +union fem_age_tbl_access_ctrl_u { + struct fem_age_tbl_access_ctrl { + u32 addr:17; /* [16:0] Default:0x0 RW */ + u32 rsv:13; /* [29:17] Default:0x0 RO */ + u32 rw:1; /* [30:30] Default:0x0 RW */ + u32 start:1; /* [31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_FEM_AGE_TBL_ACCESS_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_AGE_TBL_ACCESS_ACK_ADDR (0xa04404) +#define NBL_FEM_AGE_TBL_ACCESS_ACK_DEPTH (1) +#define NBL_FEM_AGE_TBL_ACCESS_ACK_WIDTH (32) +#define NBL_FEM_AGE_TBL_ACCESS_ACK_DWLEN (1) +union fem_age_tbl_access_ack_u { + struct fem_age_tbl_access_ack { + u32 done:1; /* [0] Default:0x0 RC */ + u32 status:1; /* [1] Default:0x0 RWW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_AGE_TBL_ACCESS_ACK_DWLEN]; +} __packed; + +#define NBL_FEM_AGE_TBL_ACCESS_DATA_ADDR (0xa04408) +#define NBL_FEM_AGE_TBL_ACCESS_DATA_DEPTH (12) +#define NBL_FEM_AGE_TBL_ACCESS_DATA_WIDTH (32) +#define NBL_FEM_AGE_TBL_ACCESS_DATA_DWLEN (1) +union fem_age_tbl_access_data_u { + struct fem_age_tbl_access_data { + u32 data:32; /* [31:0] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_FEM_AGE_TBL_ACCESS_DATA_DWLEN]; +} __packed; +#define NBL_FEM_AGE_TBL_ACCESS_DATA_REG(r) (NBL_FEM_AGE_TBL_ACCESS_DATA_ADDR + \ + (NBL_FEM_AGE_TBL_ACCESS_DATA_DWLEN * 4) * (r)) + +#define NBL_FEM_CPU_INSERT_SEARCH0_CTRL_ADDR (0xa04500) +#define NBL_FEM_CPU_INSERT_SEARCH0_CTRL_DEPTH (1) +#define NBL_FEM_CPU_INSERT_SEARCH0_CTRL_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH0_CTRL_DWLEN (1) +union fem_cpu_insert_search0_ctrl_u { + struct fem_cpu_insert_search0_ctrl { + u32 rsv:31; /* [30:00] Default:0x0 RO */ + u32 start:1; /* [31:31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH0_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_CPU_INSERT_SEARCH0_ACK_ADDR (0xa04504) +#define NBL_FEM_CPU_INSERT_SEARCH0_ACK_DEPTH (1) +#define NBL_FEM_CPU_INSERT_SEARCH0_ACK_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH0_ACK_DWLEN (1) +union fem_cpu_insert_search0_ack_u { + struct fem_cpu_insert_search0_ack { + u32 done:1; /* [00:00] Default:0x0 RC */ + u32 status:2; /* [02:01] Default:0x0 RWW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH0_ACK_DWLEN]; +} __packed; + +#define NBL_FEM_CPU_INSERT_SEARCH0_DATA_ADDR (0xa04508) +#define NBL_FEM_CPU_INSERT_SEARCH0_DATA_DEPTH (11) +#define NBL_FEM_CPU_INSERT_SEARCH0_DATA_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH0_DATA_DWLEN (1) +union fem_cpu_insert_search0_data_u { + struct fem_cpu_insert_search0_data { + u32 data:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH0_DATA_DWLEN]; +} __packed; +#define NBL_FEM_CPU_INSERT_SEARCH0_DATA_REG(r) (NBL_FEM_CPU_INSERT_SEARCH0_DATA_ADDR + \ + (NBL_FEM_CPU_INSERT_SEARCH0_DATA_DWLEN * 4) * (r)) + +#define NBL_FEM_CPU_INSERT_SEARCH1_CTRL_ADDR (0xa04550) +#define NBL_FEM_CPU_INSERT_SEARCH1_CTRL_DEPTH (1) +#define NBL_FEM_CPU_INSERT_SEARCH1_CTRL_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH1_CTRL_DWLEN (1) +union fem_cpu_insert_search1_ctrl_u { + struct fem_cpu_insert_search1_ctrl { + u32 rsv:31; /* [30:00] Default:0x0 RO */ + u32 start:1; /* [31:31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH1_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_CPU_INSERT_SEARCH1_ACK_ADDR (0xa04554) +#define NBL_FEM_CPU_INSERT_SEARCH1_ACK_DEPTH (1) +#define NBL_FEM_CPU_INSERT_SEARCH1_ACK_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH1_ACK_DWLEN (1) +union fem_cpu_insert_search1_ack_u { + struct fem_cpu_insert_search1_ack { + u32 done:1; /* [00:00] Default:0x0 RC */ + u32 status:2; /* [02:01] Default:0x0 RWW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH1_ACK_DWLEN]; +} __packed; + +#define NBL_FEM_CPU_INSERT_SEARCH1_DATA_ADDR (0xa04558) +#define NBL_FEM_CPU_INSERT_SEARCH1_DATA_DEPTH (11) +#define NBL_FEM_CPU_INSERT_SEARCH1_DATA_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH1_DATA_DWLEN (1) +union fem_cpu_insert_search1_data_u { + struct fem_cpu_insert_search1_data { + u32 data:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH1_DATA_DWLEN]; +} __packed; +#define NBL_FEM_CPU_INSERT_SEARCH1_DATA_REG(r) (NBL_FEM_CPU_INSERT_SEARCH1_DATA_ADDR + \ + (NBL_FEM_CPU_INSERT_SEARCH1_DATA_DWLEN * 4) * (r)) + +#define NBL_FEM_CPU_INSERT_SEARCH2_CTRL_ADDR (0xa045a0) +#define NBL_FEM_CPU_INSERT_SEARCH2_CTRL_DEPTH (1) +#define NBL_FEM_CPU_INSERT_SEARCH2_CTRL_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH2_CTRL_DWLEN (1) +union fem_cpu_insert_search2_ctrl_u { + struct fem_cpu_insert_search2_ctrl { + u32 rsv:31; /* [30:00] Default:0x0 RO */ + u32 start:1; /* [31:31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH2_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_CPU_INSERT_SEARCH2_ACK_ADDR (0xa045a4) +#define NBL_FEM_CPU_INSERT_SEARCH2_ACK_DEPTH (1) +#define NBL_FEM_CPU_INSERT_SEARCH2_ACK_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH2_ACK_DWLEN (1) +union fem_cpu_insert_search2_ack_u { + struct fem_cpu_insert_search2_ack { + u32 done:1; /* [00:00] Default:0x0 RC */ + u32 status:2; /* [02:01] Default:0x0 RWW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH2_ACK_DWLEN]; +} __packed; + +#define NBL_FEM_CPU_INSERT_SEARCH2_DATA_ADDR (0xa045a8) +#define NBL_FEM_CPU_INSERT_SEARCH2_DATA_DEPTH (11) +#define NBL_FEM_CPU_INSERT_SEARCH2_DATA_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH2_DATA_DWLEN (1) +union fem_cpu_insert_search2_data_u { + struct fem_cpu_insert_search2_data { + u32 data:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH2_DATA_DWLEN]; +} __packed; +#define NBL_FEM_CPU_INSERT_SEARCH2_DATA_REG(r) (NBL_FEM_CPU_INSERT_SEARCH2_DATA_ADDR + \ + (NBL_FEM_CPU_INSERT_SEARCH2_DATA_DWLEN * 4) * (r)) + +#define NBL_FEM_CFG_TEST_ADDR (0xa0480c) +#define NBL_FEM_CFG_TEST_DEPTH (1) +#define NBL_FEM_CFG_TEST_WIDTH (32) +#define NBL_FEM_CFG_TEST_DWLEN (1) +union fem_cfg_test_u { + struct fem_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_FEM_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_FEM_RCV_CMDQ_ADDR (0xa04818) +#define NBL_FEM_RCV_CMDQ_DEPTH (1) +#define NBL_FEM_RCV_CMDQ_WIDTH (32) +#define NBL_FEM_RCV_CMDQ_DWLEN (1) +union fem_rcv_cmdq_u { + struct fem_rcv_cmdq { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_RCV_CMDQ_DWLEN]; +} __packed; + +#define NBL_FEM_SND_CMDQ_ADDR (0xa0481c) +#define NBL_FEM_SND_CMDQ_DEPTH (1) +#define NBL_FEM_SND_CMDQ_WIDTH (32) +#define NBL_FEM_SND_CMDQ_DWLEN (1) +union fem_snd_cmdq_u { + struct fem_snd_cmdq { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_SND_CMDQ_DWLEN]; +} __packed; + +#define NBL_FEM_CMDQ_PRO_ADDR (0xa04820) +#define NBL_FEM_CMDQ_PRO_DEPTH (1) +#define NBL_FEM_CMDQ_PRO_WIDTH (32) +#define NBL_FEM_CMDQ_PRO_DWLEN (1) +union fem_cmdq_pro_u { + struct fem_cmdq_pro { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_CMDQ_PRO_DWLEN]; +} __packed; + +#define NBL_FEM_PP0_REQ_ADDR (0xa04850) +#define NBL_FEM_PP0_REQ_DEPTH (1) +#define NBL_FEM_PP0_REQ_WIDTH (32) +#define NBL_FEM_PP0_REQ_DWLEN (1) +union fem_pp0_req_u { + struct fem_pp0_req { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP0_REQ_DWLEN]; +} __packed; + +#define NBL_FEM_PP0_ALL_RSP_ADDR (0xa04854) +#define NBL_FEM_PP0_ALL_RSP_DEPTH (1) +#define NBL_FEM_PP0_ALL_RSP_WIDTH (32) +#define NBL_FEM_PP0_ALL_RSP_DWLEN (1) +union fem_pp0_all_rsp_u { + struct fem_pp0_all_rsp { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP0_ALL_RSP_DWLEN]; +} __packed; + +#define NBL_FEM_PP0_RSP_ADDR (0xa04858) +#define NBL_FEM_PP0_RSP_DEPTH (1) +#define NBL_FEM_PP0_RSP_WIDTH (32) +#define NBL_FEM_PP0_RSP_DWLEN (1) +union fem_pp0_rsp_u { + struct fem_pp0_rsp { + u32 miss_cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 err_cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP0_RSP_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_HT_LOOKUP_ADDR (0xa04878) +#define NBL_FEM_EM0_HT_LOOKUP_DEPTH (1) +#define NBL_FEM_EM0_HT_LOOKUP_WIDTH (32) +#define NBL_FEM_EM0_HT_LOOKUP_DWLEN (1) +union fem_em0_ht_lookup_u { + struct fem_em0_ht_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_HT_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_HT_HIT_ADDR (0xa0487c) +#define NBL_FEM_EM0_HT_HIT_DEPTH (1) +#define NBL_FEM_EM0_HT_HIT_WIDTH (32) +#define NBL_FEM_EM0_HT_HIT_DWLEN (1) +union fem_em0_ht_hit_u { + struct fem_em0_ht_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_HT_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_TCAM_LOOKUP_ADDR (0xa04880) +#define NBL_FEM_EM0_TCAM_LOOKUP_DEPTH (1) +#define NBL_FEM_EM0_TCAM_LOOKUP_WIDTH (32) +#define NBL_FEM_EM0_TCAM_LOOKUP_DWLEN (1) +union fem_em0_tcam_lookup_u { + struct fem_em0_tcam_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_TCAM_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_TCAM_HIT_ADDR (0xa04884) +#define NBL_FEM_EM0_TCAM_HIT_DEPTH (1) +#define NBL_FEM_EM0_TCAM_HIT_WIDTH (32) +#define NBL_FEM_EM0_TCAM_HIT_DWLEN (1) +union fem_em0_tcam_hit_u { + struct fem_em0_tcam_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_TCAM_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_KT_LOOKUP_ADDR (0xa04888) +#define NBL_FEM_EM0_KT_LOOKUP_DEPTH (1) +#define NBL_FEM_EM0_KT_LOOKUP_WIDTH (32) +#define NBL_FEM_EM0_KT_LOOKUP_DWLEN (1) +union fem_em0_kt_lookup_u { + struct fem_em0_kt_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_KT_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_KT_HIT_ADDR (0xa0488c) +#define NBL_FEM_EM0_KT_HIT_DEPTH (1) +#define NBL_FEM_EM0_KT_HIT_WIDTH (32) +#define NBL_FEM_EM0_KT_HIT_DWLEN (1) +union fem_em0_kt_hit_u { + struct fem_em0_kt_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_KT_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_PP1_REQ_ADDR (0xa048b0) +#define NBL_FEM_PP1_REQ_DEPTH (1) +#define NBL_FEM_PP1_REQ_WIDTH (32) +#define NBL_FEM_PP1_REQ_DWLEN (1) +union fem_pp1_req_u { + struct fem_pp1_req { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP1_REQ_DWLEN]; +} __packed; + +#define NBL_FEM_PP1_ALL_RSP_ADDR (0xa048b4) +#define NBL_FEM_PP1_ALL_RSP_DEPTH (1) +#define NBL_FEM_PP1_ALL_RSP_WIDTH (32) +#define NBL_FEM_PP1_ALL_RSP_DWLEN (1) +union fem_pp1_all_rsp_u { + struct fem_pp1_all_rsp { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP1_ALL_RSP_DWLEN]; +} __packed; + +#define NBL_FEM_PP1_RSP_ADDR (0xa048b8) +#define NBL_FEM_PP1_RSP_DEPTH (1) +#define NBL_FEM_PP1_RSP_WIDTH (32) +#define NBL_FEM_PP1_RSP_DWLEN (1) +union fem_pp1_rsp_u { + struct fem_pp1_rsp { + u32 miss_cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 err_cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP1_RSP_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_HT_LOOKUP_ADDR (0xa048d8) +#define NBL_FEM_EM1_HT_LOOKUP_DEPTH (1) +#define NBL_FEM_EM1_HT_LOOKUP_WIDTH (32) +#define NBL_FEM_EM1_HT_LOOKUP_DWLEN (1) +union fem_em1_ht_lookup_u { + struct fem_em1_ht_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_HT_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_HT_HIT_ADDR (0xa048dc) +#define NBL_FEM_EM1_HT_HIT_DEPTH (1) +#define NBL_FEM_EM1_HT_HIT_WIDTH (32) +#define NBL_FEM_EM1_HT_HIT_DWLEN (1) +union fem_em1_ht_hit_u { + struct fem_em1_ht_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_HT_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_TCAM_LOOKUP_ADDR (0xa048e0) +#define NBL_FEM_EM1_TCAM_LOOKUP_DEPTH (1) +#define NBL_FEM_EM1_TCAM_LOOKUP_WIDTH (32) +#define NBL_FEM_EM1_TCAM_LOOKUP_DWLEN (1) +union fem_em1_tcam_lookup_u { + struct fem_em1_tcam_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_TCAM_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_TCAM_HIT_ADDR (0xa048e4) +#define NBL_FEM_EM1_TCAM_HIT_DEPTH (1) +#define NBL_FEM_EM1_TCAM_HIT_WIDTH (32) +#define NBL_FEM_EM1_TCAM_HIT_DWLEN (1) +union fem_em1_tcam_hit_u { + struct fem_em1_tcam_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_TCAM_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_KT_LOOKUP_ADDR (0xa048e8) +#define NBL_FEM_EM1_KT_LOOKUP_DEPTH (1) +#define NBL_FEM_EM1_KT_LOOKUP_WIDTH (32) +#define NBL_FEM_EM1_KT_LOOKUP_DWLEN (1) +union fem_em1_kt_lookup_u { + struct fem_em1_kt_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_KT_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_KT_HIT_ADDR (0xa048ec) +#define NBL_FEM_EM1_KT_HIT_DEPTH (1) +#define NBL_FEM_EM1_KT_HIT_WIDTH (32) +#define NBL_FEM_EM1_KT_HIT_DWLEN (1) +union fem_em1_kt_hit_u { + struct fem_em1_kt_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_KT_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_PP2_REQ_ADDR (0xa04910) +#define NBL_FEM_PP2_REQ_DEPTH (1) +#define NBL_FEM_PP2_REQ_WIDTH (32) +#define NBL_FEM_PP2_REQ_DWLEN (1) +union fem_pp2_req_u { + struct fem_pp2_req { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP2_REQ_DWLEN]; +} __packed; + +#define NBL_FEM_PP2_ALL_RSP_ADDR (0xa04914) +#define NBL_FEM_PP2_ALL_RSP_DEPTH (1) +#define NBL_FEM_PP2_ALL_RSP_WIDTH (32) +#define NBL_FEM_PP2_ALL_RSP_DWLEN (1) +union fem_pp2_all_rsp_u { + struct fem_pp2_all_rsp { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP2_ALL_RSP_DWLEN]; +} __packed; + +#define NBL_FEM_PP2_RSP_ADDR (0xa04918) +#define NBL_FEM_PP2_RSP_DEPTH (1) +#define NBL_FEM_PP2_RSP_WIDTH (32) +#define NBL_FEM_PP2_RSP_DWLEN (1) +union fem_pp2_rsp_u { + struct fem_pp2_rsp { + u32 miss_cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 err_cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP2_RSP_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_HT_LOOKUP_ADDR (0xa04938) +#define NBL_FEM_EM2_HT_LOOKUP_DEPTH (1) +#define NBL_FEM_EM2_HT_LOOKUP_WIDTH (32) +#define NBL_FEM_EM2_HT_LOOKUP_DWLEN (1) +union fem_em2_ht_lookup_u { + struct fem_em2_ht_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_HT_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_HT_HIT_ADDR (0xa0493c) +#define NBL_FEM_EM2_HT_HIT_DEPTH (1) +#define NBL_FEM_EM2_HT_HIT_WIDTH (32) +#define NBL_FEM_EM2_HT_HIT_DWLEN (1) +union fem_em2_ht_hit_u { + struct fem_em2_ht_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_HT_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_TCAM_LOOKUP_ADDR (0xa04940) +#define NBL_FEM_EM2_TCAM_LOOKUP_DEPTH (1) +#define NBL_FEM_EM2_TCAM_LOOKUP_WIDTH (32) +#define NBL_FEM_EM2_TCAM_LOOKUP_DWLEN (1) +union fem_em2_tcam_lookup_u { + struct fem_em2_tcam_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_TCAM_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_TCAM_HIT_ADDR (0xa04944) +#define NBL_FEM_EM2_TCAM_HIT_DEPTH (1) +#define NBL_FEM_EM2_TCAM_HIT_WIDTH (32) +#define NBL_FEM_EM2_TCAM_HIT_DWLEN (1) +union fem_em2_tcam_hit_u { + struct fem_em2_tcam_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_TCAM_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_KT_LOOKUP_ADDR (0xa04948) +#define NBL_FEM_EM2_KT_LOOKUP_DEPTH (1) +#define NBL_FEM_EM2_KT_LOOKUP_WIDTH (32) +#define NBL_FEM_EM2_KT_LOOKUP_DWLEN (1) +union fem_em2_kt_lookup_u { + struct fem_em2_kt_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_KT_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_KT_HIT_ADDR (0xa0494c) +#define NBL_FEM_EM2_KT_HIT_DEPTH (1) +#define NBL_FEM_EM2_KT_HIT_WIDTH (32) +#define NBL_FEM_EM2_KT_HIT_DWLEN (1) +union fem_em2_kt_hit_u { + struct fem_em2_kt_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_KT_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_AGE_INFO_DROP_ADDR (0xa04950) +#define NBL_FEM_AGE_INFO_DROP_DEPTH (1) +#define NBL_FEM_AGE_INFO_DROP_WIDTH (32) +#define NBL_FEM_AGE_INFO_DROP_DWLEN (1) +union fem_age_info_drop_u { + struct fem_age_info_drop { + u32 cnt:32; /* [31:00] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_FEM_AGE_INFO_DROP_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_PP_KEY_CHANGE_ADDR (0xa04954) +#define NBL_FEM_EM0_PP_KEY_CHANGE_DEPTH (1) +#define NBL_FEM_EM0_PP_KEY_CHANGE_WIDTH (32) +#define NBL_FEM_EM0_PP_KEY_CHANGE_DWLEN (1) +union fem_em0_pp_key_change_u { + struct fem_em0_pp_key_change { + u32 cnt:32; /* [31:00] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_FEM_EM0_PP_KEY_CHANGE_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_PP_KEY_CHANGE_ADDR (0xa04958) +#define NBL_FEM_EM1_PP_KEY_CHANGE_DEPTH (1) +#define NBL_FEM_EM1_PP_KEY_CHANGE_WIDTH (32) +#define NBL_FEM_EM1_PP_KEY_CHANGE_DWLEN (1) +union fem_em1_pp_key_change_u { + struct fem_em1_pp_key_change { + u32 cnt:32; /* [31:00] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_FEM_EM1_PP_KEY_CHANGE_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_PP_KEY_CHANGE_ADDR (0xa0495c) +#define NBL_FEM_EM2_PP_KEY_CHANGE_DEPTH (1) +#define NBL_FEM_EM2_PP_KEY_CHANGE_WIDTH (32) +#define NBL_FEM_EM2_PP_KEY_CHANGE_DWLEN (1) +union fem_em2_pp_key_change_u { + struct fem_em2_pp_key_change { + u32 cnt:32; /* [31:00] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_FEM_EM2_PP_KEY_CHANGE_DWLEN]; +} __packed; + +#define NBL_FEM_BP_STATE_ADDR (0xa04b00) +#define NBL_FEM_BP_STATE_DEPTH (1) +#define NBL_FEM_BP_STATE_WIDTH (32) +#define NBL_FEM_BP_STATE_DWLEN (1) +union fem_bp_state_u { + struct fem_bp_state { + u32 fem_pp0_bp:1; /* [00:00] Default:0x0 RO */ + u32 fem_pp1_bp:1; /* [01:01] Default:0x0 RO */ + u32 fem_pp2_bp:1; /* [02:02] Default:0x0 RO */ + u32 up_cmdq_bp:1; /* [03:03] Default:0x0 RO */ + u32 dn_acl_cmdq_bp:1; /* [04:04] Default:0x0 RO */ + u32 dn_age_msgq_bp:1; /* [05:05] Default:0x0 RO */ + u32 p0_ht0_cpu_acc_bp:1; /* [06:06] Default:0x0 RO */ + u32 p1_ht0_cpu_acc_bp:1; /* [07:07] Default:0x0 RO */ + u32 p2_ht0_cpu_acc_bp:1; /* [08:08] Default:0x0 RO */ + u32 p0_ht1_cpu_acc_bp:1; /* [09:09] Default:0x0 RO */ + u32 p1_ht1_cpu_acc_bp:1; /* [10:10] Default:0x0 RO */ + u32 p2_ht1_cpu_acc_bp:1; /* [11:11] Default:0x0 RO */ + u32 p0_kt_cpu_acc_bp:1; /* [12:12] Default:0x0 RO */ + u32 p1_kt_cpu_acc_bp:1; /* [13:13] Default:0x0 RO */ + u32 p2_kt_cpu_acc_bp:1; /* [14:14] Default:0x0 RO */ + u32 p0_at_cpu_acc_bp:1; /* [15:15] Default:0x0 RO */ + u32 p1_at_cpu_acc_bp:1; /* [16:16] Default:0x0 RO */ + u32 p2_at_cpu_acc_bp:1; /* [17:17] Default:0x0 RO */ + u32 p0_age_cpu_acc_bp:1; /* [18:18] Default:0x0 RO */ + u32 p1_age_cpu_acc_bp:1; /* [19:19] Default:0x0 RO */ + u32 p2_age_cpu_acc_bp:1; /* [20:20] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_BP_STATE_DWLEN]; +} __packed; + +#define NBL_FEM_BP_HISTORY_ADDR (0xa04b04) +#define NBL_FEM_BP_HISTORY_DEPTH (1) +#define NBL_FEM_BP_HISTORY_WIDTH (32) +#define NBL_FEM_BP_HISTORY_DWLEN (1) +union fem_bp_history_u { + struct fem_bp_history { + u32 fem_pp0_bp:1; /* [00:00] Default:0x0 RC */ + u32 fem_pp1_bp:1; /* [01:01] Default:0x0 RC */ + u32 fem_pp2_bp:1; /* [02:02] Default:0x0 RC */ + u32 up_cmdq_bp:1; /* [03:03] Default:0x0 RC */ + u32 dn_acl_cmdq_bp:1; /* [04:04] Default:0x0 RC */ + u32 dn_age_msgq_bp:1; /* [05:05] Default:0x0 RC */ + u32 p0_ht0_cpu_acc_bp:1; /* [06:06] Default:0x0 RC */ + u32 p1_ht0_cpu_acc_bp:1; /* [07:07] Default:0x0 RC */ + u32 p2_ht0_cpu_acc_bp:1; /* [08:08] Default:0x0 RC */ + u32 p0_ht1_cpu_acc_bp:1; /* [09:09] Default:0x0 RC */ + u32 p1_ht1_cpu_acc_bp:1; /* [10:10] Default:0x0 RC */ + u32 p2_ht1_cpu_acc_bp:1; /* [11:11] Default:0x0 RC */ + u32 p0_kt_cpu_acc_bp:1; /* [12:12] Default:0x0 RC */ + u32 p1_kt_cpu_acc_bp:1; /* [13:13] Default:0x0 RC */ + u32 p2_kt_cpu_acc_bp:1; /* [14:14] Default:0x0 RC */ + u32 p0_at_cpu_acc_bp:1; /* [15:15] Default:0x0 RC */ + u32 p1_at_cpu_acc_bp:1; /* [16:16] Default:0x0 RC */ + u32 p2_at_cpu_acc_bp:1; /* [17:17] Default:0x0 RC */ + u32 p0_age_cpu_acc_bp:1; /* [18:18] Default:0x0 RC */ + u32 p1_age_cpu_acc_bp:1; /* [19:19] Default:0x0 RC */ + u32 p2_age_cpu_acc_bp:1; /* [20:20] Default:0x0 RC */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_LOCK_SEARCH_ADDR (0xa04c00) +#define NBL_FEM_EM0_LOCK_SEARCH_DEPTH (10) +#define NBL_FEM_EM0_LOCK_SEARCH_WIDTH (32) +#define NBL_FEM_EM0_LOCK_SEARCH_DWLEN (1) +union fem_em0_lock_search_u { + struct fem_em0_lock_search { + u32 key:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_LOCK_SEARCH_DWLEN]; +} __packed; +#define NBL_FEM_EM0_LOCK_SEARCH_REG(r) (NBL_FEM_EM0_LOCK_SEARCH_ADDR + \ + (NBL_FEM_EM0_LOCK_SEARCH_DWLEN * 4) * (r)) + +#define NBL_FEM_EM0_HT_VALUE_ADDR (0xa04c28) +#define NBL_FEM_EM0_HT_VALUE_DEPTH (1) +#define NBL_FEM_EM0_HT_VALUE_WIDTH (32) +#define NBL_FEM_EM0_HT_VALUE_DWLEN (1) +union fem_em0_ht_value_u { + struct fem_em0_ht_value { + u32 ht0_value:16; /* [15:00] Default:0x0 RO */ + u32 ht1_value:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_HT_VALUE_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_HT_INDEX_ADDR (0xa04c2c) +#define NBL_FEM_EM0_HT_INDEX_DEPTH (1) +#define NBL_FEM_EM0_HT_INDEX_WIDTH (32) +#define NBL_FEM_EM0_HT_INDEX_DWLEN (1) +union fem_em0_ht_index_u { + struct fem_em0_ht_index { + u32 ht0_idx:14; /* [13:00] Default:0x0 RO */ + u32 rsv1:2; /* [15:14] Default:0x0 RO */ + u32 ht1_idx:14; /* [29:16] Default:0x0 RO */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_HT_INDEX_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_LOCK_SEARCH_ADDR (0xa04c30) +#define NBL_FEM_EM1_LOCK_SEARCH_DEPTH (10) +#define NBL_FEM_EM1_LOCK_SEARCH_WIDTH (32) +#define NBL_FEM_EM1_LOCK_SEARCH_DWLEN (1) +union fem_em1_lock_search_u { + struct fem_em1_lock_search { + u32 key:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_LOCK_SEARCH_DWLEN]; +} __packed; +#define NBL_FEM_EM1_LOCK_SEARCH_REG(r) (NBL_FEM_EM1_LOCK_SEARCH_ADDR + \ + (NBL_FEM_EM1_LOCK_SEARCH_DWLEN * 4) * (r)) + +#define NBL_FEM_EM1_HT_VALUE_ADDR (0xa04c58) +#define NBL_FEM_EM1_HT_VALUE_DEPTH (1) +#define NBL_FEM_EM1_HT_VALUE_WIDTH (32) +#define NBL_FEM_EM1_HT_VALUE_DWLEN (1) +union fem_em1_ht_value_u { + struct fem_em1_ht_value { + u32 ht0_value:16; /* [15:00] Default:0x0 RO */ + u32 ht1_value:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_HT_VALUE_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_HT_INDEX_ADDR (0xa04c5c) +#define NBL_FEM_EM1_HT_INDEX_DEPTH (1) +#define NBL_FEM_EM1_HT_INDEX_WIDTH (32) +#define NBL_FEM_EM1_HT_INDEX_DWLEN (1) +union fem_em1_ht_index_u { + struct fem_em1_ht_index { + u32 ht0_idx:14; /* [13:00] Default:0x0 RO */ + u32 rsv1:2; /* [15:14] Default:0x0 RO */ + u32 ht1_idx:14; /* [29:16] Default:0x0 RO */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_HT_INDEX_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_LOCK_SEARCH_ADDR (0xa04c60) +#define NBL_FEM_EM2_LOCK_SEARCH_DEPTH (10) +#define NBL_FEM_EM2_LOCK_SEARCH_WIDTH (32) +#define NBL_FEM_EM2_LOCK_SEARCH_DWLEN (1) +union fem_em2_lock_search_u { + struct fem_em2_lock_search { + u32 key:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_LOCK_SEARCH_DWLEN]; +} __packed; +#define NBL_FEM_EM2_LOCK_SEARCH_REG(r) (NBL_FEM_EM2_LOCK_SEARCH_ADDR + \ + (NBL_FEM_EM2_LOCK_SEARCH_DWLEN * 4) * (r)) + +#define NBL_FEM_EM2_HT_VALUE_ADDR (0xa04c88) +#define NBL_FEM_EM2_HT_VALUE_DEPTH (1) +#define NBL_FEM_EM2_HT_VALUE_WIDTH (32) +#define NBL_FEM_EM2_HT_VALUE_DWLEN (1) +union fem_em2_ht_value_u { + struct fem_em2_ht_value { + u32 ht0_value:16; /* [15:00] Default:0x0 RO */ + u32 ht1_value:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_HT_VALUE_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_HT_INDEX_ADDR (0xa04c8c) +#define NBL_FEM_EM2_HT_INDEX_DEPTH (1) +#define NBL_FEM_EM2_HT_INDEX_WIDTH (32) +#define NBL_FEM_EM2_HT_INDEX_DWLEN (1) +union fem_em2_ht_index_u { + struct fem_em2_ht_index { + u32 ht0_idx:14; /* [13:00] Default:0x0 RO */ + u32 rsv1:2; /* [15:14] Default:0x0 RO */ + u32 ht1_idx:14; /* [29:16] Default:0x0 RO */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_HT_INDEX_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_LOCK_MISS_ADDR (0xa04c90) +#define NBL_FEM_EM0_LOCK_MISS_DEPTH (10) +#define NBL_FEM_EM0_LOCK_MISS_WIDTH (32) +#define NBL_FEM_EM0_LOCK_MISS_DWLEN (1) +union fem_em0_lock_miss_u { + struct fem_em0_lock_miss { + u32 key:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_LOCK_MISS_DWLEN]; +} __packed; +#define NBL_FEM_EM0_LOCK_MISS_REG(r) (NBL_FEM_EM0_LOCK_MISS_ADDR + \ + (NBL_FEM_EM0_LOCK_MISS_DWLEN * 4) * (r)) + +#define NBL_FEM_EM1_LOCK_MISS_ADDR (0xa04cb8) +#define NBL_FEM_EM1_LOCK_MISS_DEPTH (10) +#define NBL_FEM_EM1_LOCK_MISS_WIDTH (32) +#define NBL_FEM_EM1_LOCK_MISS_DWLEN (1) +union fem_em1_lock_miss_u { + struct fem_em1_lock_miss { + u32 key:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_LOCK_MISS_DWLEN]; +} __packed; +#define NBL_FEM_EM1_LOCK_MISS_REG(r) (NBL_FEM_EM1_LOCK_MISS_ADDR + \ + (NBL_FEM_EM1_LOCK_MISS_DWLEN * 4) * (r)) + +#define NBL_FEM_EM2_LOCK_MISS_ADDR (0xa04ce0) +#define NBL_FEM_EM2_LOCK_MISS_DEPTH (10) +#define NBL_FEM_EM2_LOCK_MISS_WIDTH (32) +#define NBL_FEM_EM2_LOCK_MISS_DWLEN (1) +union fem_em2_lock_miss_u { + struct fem_em2_lock_miss { + u32 key:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_LOCK_MISS_DWLEN]; +} __packed; +#define NBL_FEM_EM2_LOCK_MISS_REG(r) (NBL_FEM_EM2_LOCK_MISS_ADDR + \ + (NBL_FEM_EM2_LOCK_MISS_DWLEN * 4) * (r)) + +#define NBL_FEM_EM0_PROFILE_TABLE_ADDR (0xa05000) +#define NBL_FEM_EM0_PROFILE_TABLE_DEPTH (16) +#define NBL_FEM_EM0_PROFILE_TABLE_WIDTH (512) +#define NBL_FEM_EM0_PROFILE_TABLE_DWLEN (16) +union fem_em0_profile_table_u { + struct fem_em0_profile_table { + u32 cmd:1; /* [0] Default:0x0 RW */ + u32 key_size:1; /* [1] Default:0x0 RW */ + u32 mask_btm:16; /* [81:2] Default:0x0 RW */ + u32 mask_btm_arr[2]; /* [81:2] Default:0x0 RW */ + u32 hash_sel0:2; /* [83:82] Default:0x0 RW */ + u32 hash_sel1:2; /* [85:84] Default:0x0 RW */ + u32 action0:22; /* [107:86] Default:0x0 RW */ + u32 action1:22; /* [129:108] Default:0x0 RW */ + u32 action2:22; /* [151:130] Default:0x0 RW */ + u32 action3:22; /* [173:152] Default:0x0 RW */ + u32 action4:22; /* [195:174] Default:0x0 RW */ + u32 action5:22; /* [217:196] Default:0x0 RW */ + u32 action6:22; /* [239:218] Default:0x0 RW */ + u32 action7:22; /* [261:240] Default:0x0 RW */ + u32 act_num:4; /* [265:262] Default:0x0 RW */ + u32 vld:1; /* [266] Default:0x0 RW */ + u32 rsv_l:32; /* [511:267] Default:0x0 RO */ + u32 rsv_h:21; /* [511:267] Default:0x0 RO */ + u32 rsv_arr[6]; /* [511:267] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_PROFILE_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM0_PROFILE_TABLE_REG(r) (NBL_FEM_EM0_PROFILE_TABLE_ADDR + \ + (NBL_FEM_EM0_PROFILE_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM1_PROFILE_TABLE_ADDR (0xa06000) +#define NBL_FEM_EM1_PROFILE_TABLE_DEPTH (16) +#define NBL_FEM_EM1_PROFILE_TABLE_WIDTH (512) +#define NBL_FEM_EM1_PROFILE_TABLE_DWLEN (16) +union fem_em1_profile_table_u { + struct fem_em1_profile_table { + u32 cmd:1; /* [0] Default:0x0 RW */ + u32 key_size:1; /* [1] Default:0x0 RW */ + u32 mask_btm:16; /* [81:2] Default:0x0 RW */ + u32 mask_btm_arr[2]; /* [81:2] Default:0x0 RW */ + u32 hash_sel0:2; /* [83:82] Default:0x0 RW */ + u32 hash_sel1:2; /* [85:84] Default:0x0 RW */ + u32 action0:22; /* [107:86] Default:0x0 RW */ + u32 action1:22; /* [129:108] Default:0x0 RW */ + u32 action2:22; /* [151:130] Default:0x0 RW */ + u32 action3:22; /* [173:152] Default:0x0 RW */ + u32 action4:22; /* [195:174] Default:0x0 RW */ + u32 action5:22; /* [217:196] Default:0x0 RW */ + u32 action6:22; /* [239:218] Default:0x0 RW */ + u32 action7:22; /* [261:240] Default:0x0 RW */ + u32 act_num:4; /* [265:262] Default:0x0 RW */ + u32 vld:1; /* [266] Default:0x0 RW */ + u32 rsv_l:32; /* [511:267] Default:0x0 RO */ + u32 rsv_h:21; /* [511:267] Default:0x0 RO */ + u32 rsv_arr[6]; /* [511:267] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_PROFILE_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM1_PROFILE_TABLE_REG(r) (NBL_FEM_EM1_PROFILE_TABLE_ADDR + \ + (NBL_FEM_EM1_PROFILE_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM2_PROFILE_TABLE_ADDR (0xa07000) +#define NBL_FEM_EM2_PROFILE_TABLE_DEPTH (16) +#define NBL_FEM_EM2_PROFILE_TABLE_WIDTH (512) +#define NBL_FEM_EM2_PROFILE_TABLE_DWLEN (16) +union fem_em2_profile_table_u { + struct fem_em2_profile_table { + u32 cmd:1; /* [0] Default:0x0 RW */ + u32 key_size:1; /* [1] Default:0x0 RW */ + u32 mask_btm:16; /* [81:2] Default:0x0 RW */ + u32 mask_btm_arr[2]; /* [81:2] Default:0x0 RW */ + u32 hash_sel0:2; /* [83:82] Default:0x0 RW */ + u32 hash_sel1:2; /* [85:84] Default:0x0 RW */ + u32 action0:22; /* [107:86] Default:0x0 RW */ + u32 action1:22; /* [129:108] Default:0x0 RW */ + u32 action2:22; /* [151:130] Default:0x0 RW */ + u32 action3:22; /* [173:152] Default:0x0 RW */ + u32 action4:22; /* [195:174] Default:0x0 RW */ + u32 action5:22; /* [217:196] Default:0x0 RW */ + u32 action6:22; /* [239:218] Default:0x0 RW */ + u32 action7:22; /* [261:240] Default:0x0 RW */ + u32 act_num:4; /* [265:262] Default:0x0 RW */ + u32 vld:1; /* [266] Default:0x0 RW */ + u32 rsv_l:32; /* [511:267] Default:0x0 RO */ + u32 rsv_h:21; /* [511:267] Default:0x0 RO */ + u32 rsv_arr[6]; /* [511:267] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_PROFILE_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM2_PROFILE_TABLE_REG(r) (NBL_FEM_EM2_PROFILE_TABLE_ADDR + \ + (NBL_FEM_EM2_PROFILE_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM0_AD_TABLE_ADDR (0xa08000) +#define NBL_FEM_EM0_AD_TABLE_DEPTH (64) +#define NBL_FEM_EM0_AD_TABLE_WIDTH (512) +#define NBL_FEM_EM0_AD_TABLE_DWLEN (16) +union fem_em0_ad_table_u { + struct fem_em0_ad_table { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 action4:22; /* [109:88] Default:0x0 RW */ + u32 action5:22; /* [131:110] Default:0x0 RW */ + u32 action6:22; /* [153:132] Default:0x0 RW */ + u32 action7:22; /* [175:154] Default:0x0 RW */ + u32 action8:22; /* [197:176] Default:0x0 RW */ + u32 action9:22; /* [219:198] Default:0x0 RW */ + u32 action10:22; /* [241:220] Default:0x0 RW */ + u32 action11:22; /* [263:242] Default:0x0 RW */ + u32 action12:22; /* [285:264] Default:0x0 RW */ + u32 action13:22; /* [307:286] Default:0x0 RW */ + u32 action14:22; /* [329:308] Default:0x0 RW */ + u32 action15:22; /* [351:330] Default:0x0 RW */ + u32 rsv:32; /* [511:352] Default:0x0 RO */ + u32 rsv_arr[4]; /* [511:352] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_AD_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM0_AD_TABLE_REG(r) (NBL_FEM_EM0_AD_TABLE_ADDR + \ + (NBL_FEM_EM0_AD_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM1_AD_TABLE_ADDR (0xa09000) +#define NBL_FEM_EM1_AD_TABLE_DEPTH (64) +#define NBL_FEM_EM1_AD_TABLE_WIDTH (512) +#define NBL_FEM_EM1_AD_TABLE_DWLEN (16) +union fem_em1_ad_table_u { + struct fem_em1_ad_table { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 action4:22; /* [109:88] Default:0x0 RW */ + u32 action5:22; /* [131:110] Default:0x0 RW */ + u32 action6:22; /* [153:132] Default:0x0 RW */ + u32 action7:22; /* [175:154] Default:0x0 RW */ + u32 action8:22; /* [197:176] Default:0x0 RW */ + u32 action9:22; /* [219:198] Default:0x0 RW */ + u32 action10:22; /* [241:220] Default:0x0 RW */ + u32 action11:22; /* [263:242] Default:0x0 RW */ + u32 action12:22; /* [285:264] Default:0x0 RW */ + u32 action13:22; /* [307:286] Default:0x0 RW */ + u32 action14:22; /* [329:308] Default:0x0 RW */ + u32 action15:22; /* [351:330] Default:0x0 RW */ + u32 rsv:32; /* [511:352] Default:0x0 RO */ + u32 rsv_arr[4]; /* [511:352] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_AD_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM1_AD_TABLE_REG(r) (NBL_FEM_EM1_AD_TABLE_ADDR + \ + (NBL_FEM_EM1_AD_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM2_AD_TABLE_ADDR (0xa0a000) +#define NBL_FEM_EM2_AD_TABLE_DEPTH (64) +#define NBL_FEM_EM2_AD_TABLE_WIDTH (512) +#define NBL_FEM_EM2_AD_TABLE_DWLEN (16) +union fem_em2_ad_table_u { + struct fem_em2_ad_table { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 action4:22; /* [109:88] Default:0x0 RW */ + u32 action5:22; /* [131:110] Default:0x0 RW */ + u32 action6:22; /* [153:132] Default:0x0 RW */ + u32 action7:22; /* [175:154] Default:0x0 RW */ + u32 action8:22; /* [197:176] Default:0x0 RW */ + u32 action9:22; /* [219:198] Default:0x0 RW */ + u32 action10:22; /* [241:220] Default:0x0 RW */ + u32 action11:22; /* [263:242] Default:0x0 RW */ + u32 action12:22; /* [285:264] Default:0x0 RW */ + u32 action13:22; /* [307:286] Default:0x0 RW */ + u32 action14:22; /* [329:308] Default:0x0 RW */ + u32 action15:22; /* [351:330] Default:0x0 RW */ + u32 rsv:32; /* [511:352] Default:0x0 RO */ + u32 rsv_arr[4]; /* [511:352] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_AD_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM2_AD_TABLE_REG(r) (NBL_FEM_EM2_AD_TABLE_ADDR + \ + (NBL_FEM_EM2_AD_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM0_TCAM_TABLE_ADDR (0xa0b000) +#define NBL_FEM_EM0_TCAM_TABLE_DEPTH (64) +#define NBL_FEM_EM0_TCAM_TABLE_WIDTH (256) +#define NBL_FEM_EM0_TCAM_TABLE_DWLEN (8) +union fem_em0_tcam_table_u { + struct fem_em0_tcam_table { + u32 key:32; /* [159:0] Default:0x0 RW */ + u32 key_arr[4]; /* [159:0] Default:0x0 RW */ + u32 key_vld:1; /* [160] Default:0x0 RW */ + u32 key_size:1; /* [161] Default:0x0 RW */ + u32 rsv:30; /* [255:162] Default:0x0 RO */ + u32 rsv_arr[2]; /* [255:162] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_TCAM_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM0_TCAM_TABLE_REG(r) (NBL_FEM_EM0_TCAM_TABLE_ADDR + \ + (NBL_FEM_EM0_TCAM_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM1_TCAM_TABLE_ADDR (0xa0c000) +#define NBL_FEM_EM1_TCAM_TABLE_DEPTH (64) +#define NBL_FEM_EM1_TCAM_TABLE_WIDTH (256) +#define NBL_FEM_EM1_TCAM_TABLE_DWLEN (8) +union fem_em1_tcam_table_u { + struct fem_em1_tcam_table { + u32 key:32; /* [159:0] Default:0x0 RW */ + u32 key_arr[4]; /* [159:0] Default:0x0 RW */ + u32 key_vld:1; /* [160] Default:0x0 RW */ + u32 key_size:1; /* [161] Default:0x0 RW */ + u32 rsv:30; /* [255:162] Default:0x0 RO */ + u32 rsv_arr[2]; /* [255:162] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_TCAM_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM1_TCAM_TABLE_REG(r) (NBL_FEM_EM1_TCAM_TABLE_ADDR + \ + (NBL_FEM_EM1_TCAM_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM2_TCAM_TABLE_ADDR (0xa0d000) +#define NBL_FEM_EM2_TCAM_TABLE_DEPTH (64) +#define NBL_FEM_EM2_TCAM_TABLE_WIDTH (256) +#define NBL_FEM_EM2_TCAM_TABLE_DWLEN (8) +union fem_em2_tcam_table_u { + struct fem_em2_tcam_table { + u32 key:32; /* [159:0] Default:0x0 RW */ + u32 key_arr[4]; /* [159:0] Default:0x0 RW */ + u32 key_vld:1; /* [160] Default:0x0 RW */ + u32 key_size:1; /* [161] Default:0x0 RW */ + u32 rsv:30; /* [255:162] Default:0x0 RO */ + u32 rsv_arr[2]; /* [255:162] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_TCAM_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM2_TCAM_TABLE_REG(r) (NBL_FEM_EM2_TCAM_TABLE_ADDR + \ + (NBL_FEM_EM2_TCAM_TABLE_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_ipro.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_ipro.h new file mode 100644 index 0000000000000000000000000000000000000000..416df1273597ab3d3481ca5637f82a81d6941421 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_ipro.h @@ -0,0 +1,1392 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_IPRO_H +#define NBL_IPRO_H 1 + +#include + +#define NBL_IPRO_BASE (0x00B04000) + +#define NBL_IPRO_INT_STATUS_ADDR (0xb04000) +#define NBL_IPRO_INT_STATUS_DEPTH (1) +#define NBL_IPRO_INT_STATUS_WIDTH (32) +#define NBL_IPRO_INT_STATUS_DWLEN (1) +union ipro_int_status_u { + struct ipro_int_status { + u32 fatal_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RWC */ + u32 cif_err:1; /* [3] Default:0x0 RWC */ + u32 input_err:1; /* [4] Default:0x0 RWC */ + u32 cfg_err:1; /* [5] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [6] Default:0x0 RWC */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_IPRO_INT_MASK_ADDR (0xb04004) +#define NBL_IPRO_INT_MASK_DEPTH (1) +#define NBL_IPRO_INT_MASK_WIDTH (32) +#define NBL_IPRO_INT_MASK_DWLEN (1) +union ipro_int_mask_u { + struct ipro_int_mask { + u32 fatal_err:1; /* [0] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RW */ + u32 cif_err:1; /* [3] Default:0x0 RW */ + u32 input_err:1; /* [4] Default:0x0 RW */ + u32 cfg_err:1; /* [5] Default:0x0 RW */ + u32 data_ucor_err:1; /* [6] Default:0x0 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_INT_MASK_DWLEN]; +} __packed; + +#define NBL_IPRO_INT_SET_ADDR (0xb04008) +#define NBL_IPRO_INT_SET_DEPTH (1) +#define NBL_IPRO_INT_SET_WIDTH (32) +#define NBL_IPRO_INT_SET_DWLEN (1) +union ipro_int_set_u { + struct ipro_int_set { + u32 fatal_err:1; /* [0] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 WO */ + u32 cif_err:1; /* [3] Default:0x0 WO */ + u32 input_err:1; /* [4] Default:0x0 WO */ + u32 cfg_err:1; /* [5] Default:0x0 WO */ + u32 data_ucor_err:1; /* [6] Default:0x0 WO */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_INT_SET_DWLEN]; +} __packed; + +#define NBL_IPRO_INIT_DONE_ADDR (0xb0400c) +#define NBL_IPRO_INIT_DONE_DEPTH (1) +#define NBL_IPRO_INIT_DONE_WIDTH (32) +#define NBL_IPRO_INIT_DONE_DWLEN (1) +union ipro_init_done_u { + struct ipro_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_IPRO_CIF_ERR_INFO_ADDR (0xb04040) +#define NBL_IPRO_CIF_ERR_INFO_DEPTH (1) +#define NBL_IPRO_CIF_ERR_INFO_WIDTH (32) +#define NBL_IPRO_CIF_ERR_INFO_DWLEN (1) +union ipro_cif_err_info_u { + struct ipro_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_IPRO_INPUT_ERR_INFO_ADDR (0xb04048) +#define NBL_IPRO_INPUT_ERR_INFO_DEPTH (1) +#define NBL_IPRO_INPUT_ERR_INFO_WIDTH (32) +#define NBL_IPRO_INPUT_ERR_INFO_DWLEN (1) +union ipro_input_err_info_u { + struct ipro_input_err_info { + u32 id:2; /* [1:0] Default:0x0 RO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_INPUT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_IPRO_CFG_ERR_INFO_ADDR (0xb04050) +#define NBL_IPRO_CFG_ERR_INFO_DEPTH (1) +#define NBL_IPRO_CFG_ERR_INFO_WIDTH (32) +#define NBL_IPRO_CFG_ERR_INFO_DWLEN (1) +union ipro_cfg_err_info_u { + struct ipro_cfg_err_info { + u32 id:2; /* [1:0] Default:0x0 RO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_IPRO_CAR_CTRL_ADDR (0xb04100) +#define NBL_IPRO_CAR_CTRL_DEPTH (1) +#define NBL_IPRO_CAR_CTRL_WIDTH (32) +#define NBL_IPRO_CAR_CTRL_DWLEN (1) +union ipro_car_ctrl_u { + struct ipro_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_IPRO_INIT_START_ADDR (0xb04180) +#define NBL_IPRO_INIT_START_DEPTH (1) +#define NBL_IPRO_INIT_START_WIDTH (32) +#define NBL_IPRO_INIT_START_DWLEN (1) +union ipro_init_start_u { + struct ipro_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_INIT_START_DWLEN]; +} __packed; + +#define NBL_IPRO_CREDIT_TOKEN_ADDR (0xb041c0) +#define NBL_IPRO_CREDIT_TOKEN_DEPTH (1) +#define NBL_IPRO_CREDIT_TOKEN_WIDTH (32) +#define NBL_IPRO_CREDIT_TOKEN_DWLEN (1) +union ipro_credit_token_u { + struct ipro_credit_token { + u32 up_token_num:8; /* [7:0] Default:0x80 RW */ + u32 down_token_num:8; /* [15:8] Default:0x80 RW */ + u32 up_init_vld:1; /* [16] Default:0x0 WO */ + u32 down_init_vld:1; /* [17] Default:0x0 WO */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_CREDIT_TOKEN_DWLEN]; +} __packed; + +#define NBL_IPRO_AM_SET_FLAG_ADDR (0xb041e0) +#define NBL_IPRO_AM_SET_FLAG_DEPTH (1) +#define NBL_IPRO_AM_SET_FLAG_WIDTH (32) +#define NBL_IPRO_AM_SET_FLAG_DWLEN (1) +union ipro_am_set_flag_u { + struct ipro_am_set_flag { + u32 set_flag:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_AM_SET_FLAG_DWLEN]; +} __packed; + +#define NBL_IPRO_AM_CLEAR_FLAG_ADDR (0xb041e4) +#define NBL_IPRO_AM_CLEAR_FLAG_DEPTH (1) +#define NBL_IPRO_AM_CLEAR_FLAG_WIDTH (32) +#define NBL_IPRO_AM_CLEAR_FLAG_DWLEN (1) +union ipro_am_clear_flag_u { + struct ipro_am_clear_flag { + u32 clear_flag:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_AM_CLEAR_FLAG_DWLEN]; +} __packed; + +#define NBL_IPRO_FLAG_OFFSET_0_ADDR (0xb04200) +#define NBL_IPRO_FLAG_OFFSET_0_DEPTH (1) +#define NBL_IPRO_FLAG_OFFSET_0_WIDTH (32) +#define NBL_IPRO_FLAG_OFFSET_0_DWLEN (1) +union ipro_flag_offset_0_u { + struct ipro_flag_offset_0 { + u32 dir_offset_en:1; /* [0] Default:0x1 RW */ + u32 dir_offset:5; /* [5:1] Default:0x00 RW */ + u32 rsv1:2; /* [7:6] Default:0x0 RO */ + u32 phy_flow_offset_en:1; /* [8] Default:0x1 RW */ + u32 phy_flow_offset:5; /* [13:9] Default:0xb RW */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_FLAG_OFFSET_0_DWLEN]; +} __packed; + +#define NBL_IPRO_DROP_NXT_STAGE_ADDR (0xb04210) +#define NBL_IPRO_DROP_NXT_STAGE_DEPTH (1) +#define NBL_IPRO_DROP_NXT_STAGE_WIDTH (32) +#define NBL_IPRO_DROP_NXT_STAGE_DWLEN (1) +union ipro_drop_nxt_stage_u { + struct ipro_drop_nxt_stage { + u32 stage:4; /* [3:0] Default:0xf RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_DROP_NXT_STAGE_DWLEN]; +} __packed; + +#define NBL_IPRO_FWD_ACTION_PRI_ADDR (0xb04220) +#define NBL_IPRO_FWD_ACTION_PRI_DEPTH (1) +#define NBL_IPRO_FWD_ACTION_PRI_WIDTH (32) +#define NBL_IPRO_FWD_ACTION_PRI_DWLEN (1) +union ipro_fwd_action_pri_u { + struct ipro_fwd_action_pri { + u32 dqueue:2; /* [1:0] Default:0x0 RW */ + u32 set_dport:2; /* [3:2] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_FWD_ACTION_PRI_DWLEN]; +} __packed; + +#define NBL_IPRO_MTU_CHECK_CTRL_ADDR (0xb0427c) +#define NBL_IPRO_MTU_CHECK_CTRL_DEPTH (1) +#define NBL_IPRO_MTU_CHECK_CTRL_WIDTH (32) +#define NBL_IPRO_MTU_CHECK_CTRL_DWLEN (1) +union ipro_mtu_check_ctrl_u { + struct ipro_mtu_check_ctrl { + u32 set_dport:16; /* [15:0] Default:0xFFFF RW */ + u32 set_dport_pri:2; /* [17:16] Default:0x3 RW */ + u32 proc_done:1; /* [18] Default:0x1 RW */ + u32 rsv:13; /* [31:19] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MTU_CHECK_CTRL_DWLEN]; +} __packed; + +#define NBL_IPRO_MTU_SEL_ADDR (0xb04280) +#define NBL_IPRO_MTU_SEL_DEPTH (8) +#define NBL_IPRO_MTU_SEL_WIDTH (32) +#define NBL_IPRO_MTU_SEL_DWLEN (1) +union ipro_mtu_sel_u { + struct ipro_mtu_sel { + u32 mtu_1:16; /* [15:0] Default:0x0 RW */ + u32 mtu_0:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MTU_SEL_DWLEN]; +} __packed; +#define NBL_IPRO_MTU_SEL_REG(r) (NBL_IPRO_MTU_SEL_ADDR + \ + (NBL_IPRO_MTU_SEL_DWLEN * 4) * (r)) + +#define NBL_IPRO_UDL_PKT_FLT_DMAC_ADDR (0xb04300) +#define NBL_IPRO_UDL_PKT_FLT_DMAC_DEPTH (16) +#define NBL_IPRO_UDL_PKT_FLT_DMAC_WIDTH (64) +#define NBL_IPRO_UDL_PKT_FLT_DMAC_DWLEN (2) +union ipro_udl_pkt_flt_dmac_u { + struct ipro_udl_pkt_flt_dmac { + u32 dmac_l:32; /* [47:0] Default:0x0 RW */ + u32 dmac_h:16; /* [47:0] Default:0x0 RW */ + u32 rsv:16; /* [63:48] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_UDL_PKT_FLT_DMAC_DWLEN]; +} __packed; +#define NBL_IPRO_UDL_PKT_FLT_DMAC_REG(r) (NBL_IPRO_UDL_PKT_FLT_DMAC_ADDR + \ + (NBL_IPRO_UDL_PKT_FLT_DMAC_DWLEN * 4) * (r)) + +#define NBL_IPRO_UDL_PKT_FLT_VLAN_ADDR (0xb04380) +#define NBL_IPRO_UDL_PKT_FLT_VLAN_DEPTH (16) +#define NBL_IPRO_UDL_PKT_FLT_VLAN_WIDTH (32) +#define NBL_IPRO_UDL_PKT_FLT_VLAN_DWLEN (1) +union ipro_udl_pkt_flt_vlan_u { + struct ipro_udl_pkt_flt_vlan { + u32 vlan_0:12; /* [11:0] Default:0x0 RW */ + u32 vlan_1:12; /* [23:12] Default:0x0 RW */ + u32 vlan_layer:2; /* [25:24] Default:0x0 RW */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_UDL_PKT_FLT_VLAN_DWLEN]; +} __packed; +#define NBL_IPRO_UDL_PKT_FLT_VLAN_REG(r) (NBL_IPRO_UDL_PKT_FLT_VLAN_ADDR + \ + (NBL_IPRO_UDL_PKT_FLT_VLAN_DWLEN * 4) * (r)) + +#define NBL_IPRO_UDL_PKT_FLT_CTRL_ADDR (0xb043c0) +#define NBL_IPRO_UDL_PKT_FLT_CTRL_DEPTH (1) +#define NBL_IPRO_UDL_PKT_FLT_CTRL_WIDTH (32) +#define NBL_IPRO_UDL_PKT_FLT_CTRL_DWLEN (1) +union ipro_udl_pkt_flt_ctrl_u { + struct ipro_udl_pkt_flt_ctrl { + u32 vld:16; /* [15:0] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_UDL_PKT_FLT_CTRL_DWLEN]; +} __packed; + +#define NBL_IPRO_UDL_PKT_FLT_ACTION_ADDR (0xb043c4) +#define NBL_IPRO_UDL_PKT_FLT_ACTION_DEPTH (1) +#define NBL_IPRO_UDL_PKT_FLT_ACTION_WIDTH (32) +#define NBL_IPRO_UDL_PKT_FLT_ACTION_DWLEN (1) +union ipro_udl_pkt_flt_action_u { + struct ipro_udl_pkt_flt_action { + u32 dqueue:11; /* [10:0] Default:0x0 RW */ + u32 dqueue_en:1; /* [11] Default:0x0 RW */ + u32 rsv:2; /* [13:12] Default:0x0 RO */ + u32 proc_done:1; /* [14] Default:0x0 RW */ + u32 set_dport_en:1; /* [15] Default:0x0 RW */ + u32 set_dport:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_UDL_PKT_FLT_ACTION_DWLEN]; +} __packed; + +#define NBL_IPRO_ANTI_FAKE_ADDR_ERRCODE_ADDR (0xb043e0) +#define NBL_IPRO_ANTI_FAKE_ADDR_ERRCODE_DEPTH (1) +#define NBL_IPRO_ANTI_FAKE_ADDR_ERRCODE_WIDTH (32) +#define NBL_IPRO_ANTI_FAKE_ADDR_ERRCODE_DWLEN (1) +union ipro_anti_fake_addr_errcode_u { + struct ipro_anti_fake_addr_errcode { + u32 num:4; /* [3:0] Default:0xA RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_ANTI_FAKE_ADDR_ERRCODE_DWLEN]; +} __packed; + +#define NBL_IPRO_ANTI_FAKE_ADDR_ACTION_ADDR (0xb043e4) +#define NBL_IPRO_ANTI_FAKE_ADDR_ACTION_DEPTH (1) +#define NBL_IPRO_ANTI_FAKE_ADDR_ACTION_WIDTH (32) +#define NBL_IPRO_ANTI_FAKE_ADDR_ACTION_DWLEN (1) +union ipro_anti_fake_addr_action_u { + struct ipro_anti_fake_addr_action { + u32 dqueue:11; /* [10:0] Default:0x0 RW */ + u32 dqueue_en:1; /* [11] Default:0x0 RW */ + u32 rsv:2; /* [13:12] Default:0x0 RO */ + u32 proc_done:1; /* [14] Default:0x1 RW */ + u32 set_dport_en:1; /* [15] Default:0x1 RW */ + u32 set_dport:16; /* [31:16] Default:0xFFFF RW */ + } __packed info; + u32 data[NBL_IPRO_ANTI_FAKE_ADDR_ACTION_DWLEN]; +} __packed; + +#define NBL_IPRO_VLAN_NUM_CHK_ERRCODE_ADDR (0xb043f0) +#define NBL_IPRO_VLAN_NUM_CHK_ERRCODE_DEPTH (1) +#define NBL_IPRO_VLAN_NUM_CHK_ERRCODE_WIDTH (32) +#define NBL_IPRO_VLAN_NUM_CHK_ERRCODE_DWLEN (1) +union ipro_vlan_num_chk_errcode_u { + struct ipro_vlan_num_chk_errcode { + u32 num:4; /* [3:0] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_VLAN_NUM_CHK_ERRCODE_DWLEN]; +} __packed; + +#define NBL_IPRO_VLAN_NUM_CHK_ACTION_ADDR (0xb043f4) +#define NBL_IPRO_VLAN_NUM_CHK_ACTION_DEPTH (1) +#define NBL_IPRO_VLAN_NUM_CHK_ACTION_WIDTH (32) +#define NBL_IPRO_VLAN_NUM_CHK_ACTION_DWLEN (1) +union ipro_vlan_num_chk_action_u { + struct ipro_vlan_num_chk_action { + u32 dqueue:11; /* [10:0] Default:0x0 RW */ + u32 dqueue_en:1; /* [11] Default:0x0 RW */ + u32 rsv:2; /* [13:12] Default:0x0 RO */ + u32 proc_done:1; /* [14] Default:0x1 RW */ + u32 set_dport_en:1; /* [15] Default:0x1 RW */ + u32 set_dport:16; /* [31:16] Default:0xFFFF RW */ + } __packed info; + u32 data[NBL_IPRO_VLAN_NUM_CHK_ACTION_DWLEN]; +} __packed; + +#define NBL_IPRO_TCP_STATE_PROBE_ADDR (0xb04400) +#define NBL_IPRO_TCP_STATE_PROBE_DEPTH (1) +#define NBL_IPRO_TCP_STATE_PROBE_WIDTH (32) +#define NBL_IPRO_TCP_STATE_PROBE_DWLEN (1) +union ipro_tcp_state_probe_u { + struct ipro_tcp_state_probe { + u32 up_chk_en:1; /* [0] Default:0x0 RW */ + u32 dn_chk_en:1; /* [1] Default:0x0 RW */ + u32 rsv:14; /* [15:2] Default:0x0 RO */ + u32 up_bitmap:8; /* [23:16] Default:0x0 RW */ + u32 dn_bitmap:8; /* [31:24] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_TCP_STATE_PROBE_DWLEN]; +} __packed; + +#define NBL_IPRO_TCP_STATE_UP_ACTION_ADDR (0xb04404) +#define NBL_IPRO_TCP_STATE_UP_ACTION_DEPTH (1) +#define NBL_IPRO_TCP_STATE_UP_ACTION_WIDTH (32) +#define NBL_IPRO_TCP_STATE_UP_ACTION_DWLEN (1) +union ipro_tcp_state_up_action_u { + struct ipro_tcp_state_up_action { + u32 dqueue:11; /* [10:0] Default:0x0 RW */ + u32 dqueue_en:1; /* [11] Default:0x0 RW */ + u32 rsv:2; /* [13:12] Default:0x0 RO */ + u32 proc_done:1; /* [14] Default:0x0 RW */ + u32 set_dport_en:1; /* [15] Default:0x0 RW */ + u32 set_dport:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_TCP_STATE_UP_ACTION_DWLEN]; +} __packed; + +#define NBL_IPRO_TCP_STATE_DN_ACTION_ADDR (0xb04408) +#define NBL_IPRO_TCP_STATE_DN_ACTION_DEPTH (1) +#define NBL_IPRO_TCP_STATE_DN_ACTION_WIDTH (32) +#define NBL_IPRO_TCP_STATE_DN_ACTION_DWLEN (1) +union ipro_tcp_state_dn_action_u { + struct ipro_tcp_state_dn_action { + u32 dqueue:11; /* [10:0] Default:0x0 RW */ + u32 dqueue_en:1; /* [11] Default:0x0 RW */ + u32 rsv:2; /* [13:12] Default:0x0 RO */ + u32 proc_done:1; /* [14] Default:0x0 RW */ + u32 set_dport_en:1; /* [15] Default:0x0 RW */ + u32 set_dport:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_TCP_STATE_DN_ACTION_DWLEN]; +} __packed; + +#define NBL_IPRO_FWD_ACTION_ID_ADDR (0xb04440) +#define NBL_IPRO_FWD_ACTION_ID_DEPTH (1) +#define NBL_IPRO_FWD_ACTION_ID_WIDTH (32) +#define NBL_IPRO_FWD_ACTION_ID_DWLEN (1) +union ipro_fwd_action_id_u { + struct ipro_fwd_action_id { + u32 mirror_index:6; /* [5:0] Default:0x8 RW */ + u32 dport:6; /* [11:6] Default:0x9 RW */ + u32 dqueue:6; /* [17:12] Default:0xA RW */ + u32 car:6; /* [23:18] Default:0x5 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_FWD_ACTION_ID_DWLEN]; +} __packed; + +#define NBL_IPRO_PED_ACTION_ID_ADDR (0xb04448) +#define NBL_IPRO_PED_ACTION_ID_DEPTH (1) +#define NBL_IPRO_PED_ACTION_ID_WIDTH (32) +#define NBL_IPRO_PED_ACTION_ID_DWLEN (1) +union ipro_ped_action_id_u { + struct ipro_ped_action_id { + u32 encap:6; /* [5:0] Default:0x2E RW */ + u32 decap:6; /* [11:6] Default:0x2F RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_PED_ACTION_ID_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_HIT_ACTION_ADDR (0xb04510) +#define NBL_IPRO_MNG_HIT_ACTION_DEPTH (8) +#define NBL_IPRO_MNG_HIT_ACTION_WIDTH (32) +#define NBL_IPRO_MNG_HIT_ACTION_DWLEN (1) +union ipro_mng_hit_action_u { + struct ipro_mng_hit_action { + u32 data:24; /* [23:0] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_HIT_ACTION_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_HIT_ACTION_REG(r) (NBL_IPRO_MNG_HIT_ACTION_ADDR + \ + (NBL_IPRO_MNG_HIT_ACTION_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_DECISION_FLT_0_ADDR (0xb04530) +#define NBL_IPRO_MNG_DECISION_FLT_0_DEPTH (4) +#define NBL_IPRO_MNG_DECISION_FLT_0_WIDTH (32) +#define NBL_IPRO_MNG_DECISION_FLT_0_DWLEN (1) +union ipro_mng_decision_flt_0_u { + struct ipro_mng_decision_flt_0 { + u32 en:1; /* [0] Default:0x0 RW */ + u32 pkt_len_and:1; /* [1] Default:0x0 RW */ + u32 flow_ctrl_and:1; /* [2] Default:0x0 RW */ + u32 ncsi_and:1; /* [3] Default:0x0 RW */ + u32 eth_id:2; /* [5:4] Default:0x0 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_DECISION_FLT_0_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_DECISION_FLT_0_REG(r) (NBL_IPRO_MNG_DECISION_FLT_0_ADDR + \ + (NBL_IPRO_MNG_DECISION_FLT_0_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_DECISION_FLT_1_ADDR (0xb04540) +#define NBL_IPRO_MNG_DECISION_FLT_1_DEPTH (4) +#define NBL_IPRO_MNG_DECISION_FLT_1_WIDTH (32) +#define NBL_IPRO_MNG_DECISION_FLT_1_DWLEN (1) +union ipro_mng_decision_flt_1_u { + struct ipro_mng_decision_flt_1 { + u32 dmac_and:4; /* [3:0] Default:0x0 RW */ + u32 brcast_and:1; /* [4] Default:0x0 RW */ + u32 mulcast_and:1; /* [5] Default:0x0 RW */ + u32 vlan_and:8; /* [13:6] Default:0x0 RW */ + u32 ipv4_dip_and:4; /* [17:14] Default:0x0 RW */ + u32 ipv6_dip_and:4; /* [21:18] Default:0x0 RW */ + u32 ethertype_and:4; /* [25:22] Default:0x0 RW */ + u32 brcast_or:1; /* [26] Default:0x0 RW */ + u32 icmpv4_or:1; /* [27] Default:0x0 RW */ + u32 mld_or:4; /* [31:28] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_DECISION_FLT_1_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_DECISION_FLT_1_REG(r) (NBL_IPRO_MNG_DECISION_FLT_1_ADDR + \ + (NBL_IPRO_MNG_DECISION_FLT_1_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_DECISION_FLT_2_ADDR (0xb04550) +#define NBL_IPRO_MNG_DECISION_FLT_2_DEPTH (4) +#define NBL_IPRO_MNG_DECISION_FLT_2_WIDTH (32) +#define NBL_IPRO_MNG_DECISION_FLT_2_DWLEN (1) +union ipro_mng_decision_flt_2_u { + struct ipro_mng_decision_flt_2 { + u32 neighbor_or:4; /* [3:0] Default:0x0 RW */ + u32 port_or:16; /* [19:4] Default:0x0 RW */ + u32 ethertype_or:4; /* [23:20] Default:0x0 RW */ + u32 arp_rsp_or:2; /* [25:24] Default:0x0 RW */ + u32 arp_req_or:2; /* [27:26] Default:0x0 RW */ + u32 dmac_or:4; /* [31:28] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_DECISION_FLT_2_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_DECISION_FLT_2_REG(r) (NBL_IPRO_MNG_DECISION_FLT_2_ADDR + \ + (NBL_IPRO_MNG_DECISION_FLT_2_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_DMAC_FLT_0_ADDR (0xb04560) +#define NBL_IPRO_MNG_DMAC_FLT_0_DEPTH (4) +#define NBL_IPRO_MNG_DMAC_FLT_0_WIDTH (32) +#define NBL_IPRO_MNG_DMAC_FLT_0_DWLEN (1) +union ipro_mng_dmac_flt_0_u { + struct ipro_mng_dmac_flt_0 { + u32 data:16; /* [15:0] Default:0x0 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_DMAC_FLT_0_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_DMAC_FLT_0_REG(r) (NBL_IPRO_MNG_DMAC_FLT_0_ADDR + \ + (NBL_IPRO_MNG_DMAC_FLT_0_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_DMAC_FLT_1_ADDR (0xb04570) +#define NBL_IPRO_MNG_DMAC_FLT_1_DEPTH (4) +#define NBL_IPRO_MNG_DMAC_FLT_1_WIDTH (32) +#define NBL_IPRO_MNG_DMAC_FLT_1_DWLEN (1) +union ipro_mng_dmac_flt_1_u { + struct ipro_mng_dmac_flt_1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_DMAC_FLT_1_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_DMAC_FLT_1_REG(r) (NBL_IPRO_MNG_DMAC_FLT_1_ADDR + \ + (NBL_IPRO_MNG_DMAC_FLT_1_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_VLAN_FLT_ADDR (0xb04580) +#define NBL_IPRO_MNG_VLAN_FLT_DEPTH (8) +#define NBL_IPRO_MNG_VLAN_FLT_WIDTH (32) +#define NBL_IPRO_MNG_VLAN_FLT_DWLEN (1) +union ipro_mng_vlan_flt_u { + struct ipro_mng_vlan_flt { + u32 data:12; /* [11:0] Default:0x0 RW */ + u32 sel:1; /* [12] Default:0x0 RW */ + u32 nontag:1; /* [13] Default:0x0 RW */ + u32 en:1; /* [14] Default:0x0 RW */ + u32 rsv:17; /* [31:15] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_VLAN_FLT_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_VLAN_FLT_REG(r) (NBL_IPRO_MNG_VLAN_FLT_ADDR + \ + (NBL_IPRO_MNG_VLAN_FLT_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_ETHERTYPE_FLT_ADDR (0xb045a0) +#define NBL_IPRO_MNG_ETHERTYPE_FLT_DEPTH (4) +#define NBL_IPRO_MNG_ETHERTYPE_FLT_WIDTH (32) +#define NBL_IPRO_MNG_ETHERTYPE_FLT_DWLEN (1) +union ipro_mng_ethertype_flt_u { + struct ipro_mng_ethertype_flt { + u32 data:16; /* [15:0] Default:0x0 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_ETHERTYPE_FLT_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_ETHERTYPE_FLT_REG(r) (NBL_IPRO_MNG_ETHERTYPE_FLT_ADDR + \ + (NBL_IPRO_MNG_ETHERTYPE_FLT_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_IPV4_FLT_0_ADDR (0xb045b0) +#define NBL_IPRO_MNG_IPV4_FLT_0_DEPTH (4) +#define NBL_IPRO_MNG_IPV4_FLT_0_WIDTH (32) +#define NBL_IPRO_MNG_IPV4_FLT_0_DWLEN (1) +union ipro_mng_ipv4_flt_0_u { + struct ipro_mng_ipv4_flt_0 { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_IPV4_FLT_0_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_IPV4_FLT_0_REG(r) (NBL_IPRO_MNG_IPV4_FLT_0_ADDR + \ + (NBL_IPRO_MNG_IPV4_FLT_0_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_IPV4_FLT_1_ADDR (0xb045c0) +#define NBL_IPRO_MNG_IPV4_FLT_1_DEPTH (4) +#define NBL_IPRO_MNG_IPV4_FLT_1_WIDTH (32) +#define NBL_IPRO_MNG_IPV4_FLT_1_DWLEN (1) +union ipro_mng_ipv4_flt_1_u { + struct ipro_mng_ipv4_flt_1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_IPV4_FLT_1_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_IPV4_FLT_1_REG(r) (NBL_IPRO_MNG_IPV4_FLT_1_ADDR + \ + (NBL_IPRO_MNG_IPV4_FLT_1_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_IPV6_FLT_0_ADDR (0xb04600) +#define NBL_IPRO_MNG_IPV6_FLT_0_DEPTH (4) +#define NBL_IPRO_MNG_IPV6_FLT_0_WIDTH (32) +#define NBL_IPRO_MNG_IPV6_FLT_0_DWLEN (1) +union ipro_mng_ipv6_flt_0_u { + struct ipro_mng_ipv6_flt_0 { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:15; /* [15:1] Default:0x0 RO */ + u32 mask:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_IPV6_FLT_0_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_IPV6_FLT_0_REG(r) (NBL_IPRO_MNG_IPV6_FLT_0_ADDR + \ + (NBL_IPRO_MNG_IPV6_FLT_0_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_IPV6_FLT_1_ADDR (0xb04610) +#define NBL_IPRO_MNG_IPV6_FLT_1_DEPTH (4) +#define NBL_IPRO_MNG_IPV6_FLT_1_WIDTH (32) +#define NBL_IPRO_MNG_IPV6_FLT_1_DWLEN (1) +union ipro_mng_ipv6_flt_1_u { + struct ipro_mng_ipv6_flt_1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_IPV6_FLT_1_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_IPV6_FLT_1_REG(r) (NBL_IPRO_MNG_IPV6_FLT_1_ADDR + \ + (NBL_IPRO_MNG_IPV6_FLT_1_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_IPV6_FLT_2_ADDR (0xb04620) +#define NBL_IPRO_MNG_IPV6_FLT_2_DEPTH (4) +#define NBL_IPRO_MNG_IPV6_FLT_2_WIDTH (32) +#define NBL_IPRO_MNG_IPV6_FLT_2_DWLEN (1) +union ipro_mng_ipv6_flt_2_u { + struct ipro_mng_ipv6_flt_2 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_IPV6_FLT_2_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_IPV6_FLT_2_REG(r) (NBL_IPRO_MNG_IPV6_FLT_2_ADDR + \ + (NBL_IPRO_MNG_IPV6_FLT_2_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_IPV6_FLT_3_ADDR (0xb04630) +#define NBL_IPRO_MNG_IPV6_FLT_3_DEPTH (4) +#define NBL_IPRO_MNG_IPV6_FLT_3_WIDTH (32) +#define NBL_IPRO_MNG_IPV6_FLT_3_DWLEN (1) +union ipro_mng_ipv6_flt_3_u { + struct ipro_mng_ipv6_flt_3 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_IPV6_FLT_3_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_IPV6_FLT_3_REG(r) (NBL_IPRO_MNG_IPV6_FLT_3_ADDR + \ + (NBL_IPRO_MNG_IPV6_FLT_3_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_IPV6_FLT_4_ADDR (0xb04640) +#define NBL_IPRO_MNG_IPV6_FLT_4_DEPTH (4) +#define NBL_IPRO_MNG_IPV6_FLT_4_WIDTH (32) +#define NBL_IPRO_MNG_IPV6_FLT_4_DWLEN (1) +union ipro_mng_ipv6_flt_4_u { + struct ipro_mng_ipv6_flt_4 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_IPV6_FLT_4_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_IPV6_FLT_4_REG(r) (NBL_IPRO_MNG_IPV6_FLT_4_ADDR + \ + (NBL_IPRO_MNG_IPV6_FLT_4_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_PORT_FLT_ADDR (0xb04650) +#define NBL_IPRO_MNG_PORT_FLT_DEPTH (16) +#define NBL_IPRO_MNG_PORT_FLT_WIDTH (32) +#define NBL_IPRO_MNG_PORT_FLT_DWLEN (1) +union ipro_mng_port_flt_u { + struct ipro_mng_port_flt { + u32 data:16; /* [15:0] Default:0x0 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 mode:1; /* [17] Default:0x0 RW */ + u32 tcp:1; /* [18] Default:0x0 RW */ + u32 udp:1; /* [19] Default:0x0 RW */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_PORT_FLT_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_PORT_FLT_REG(r) (NBL_IPRO_MNG_PORT_FLT_ADDR + \ + (NBL_IPRO_MNG_PORT_FLT_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_ARP_REQ_FLT_0_ADDR (0xb04690) +#define NBL_IPRO_MNG_ARP_REQ_FLT_0_DEPTH (2) +#define NBL_IPRO_MNG_ARP_REQ_FLT_0_WIDTH (32) +#define NBL_IPRO_MNG_ARP_REQ_FLT_0_DWLEN (1) +union ipro_mng_arp_req_flt_0_u { + struct ipro_mng_arp_req_flt_0 { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:15; /* [15:1] Default:0x0 RO */ + u32 op:16; /* [31:16] Default:0x1 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_ARP_REQ_FLT_0_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_ARP_REQ_FLT_0_REG(r) (NBL_IPRO_MNG_ARP_REQ_FLT_0_ADDR + \ + (NBL_IPRO_MNG_ARP_REQ_FLT_0_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_ARP_REQ_FLT_1_ADDR (0xb046a0) +#define NBL_IPRO_MNG_ARP_REQ_FLT_1_DEPTH (2) +#define NBL_IPRO_MNG_ARP_REQ_FLT_1_WIDTH (32) +#define NBL_IPRO_MNG_ARP_REQ_FLT_1_DWLEN (1) +union ipro_mng_arp_req_flt_1_u { + struct ipro_mng_arp_req_flt_1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_ARP_REQ_FLT_1_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_ARP_REQ_FLT_1_REG(r) (NBL_IPRO_MNG_ARP_REQ_FLT_1_ADDR + \ + (NBL_IPRO_MNG_ARP_REQ_FLT_1_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_ARP_RSP_FLT_0_ADDR (0xb046b0) +#define NBL_IPRO_MNG_ARP_RSP_FLT_0_DEPTH (2) +#define NBL_IPRO_MNG_ARP_RSP_FLT_0_WIDTH (32) +#define NBL_IPRO_MNG_ARP_RSP_FLT_0_DWLEN (1) +union ipro_mng_arp_rsp_flt_0_u { + struct ipro_mng_arp_rsp_flt_0 { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:15; /* [15:1] Default:0x0 RO */ + u32 op:16; /* [31:16] Default:0x2 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_ARP_RSP_FLT_0_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_ARP_RSP_FLT_0_REG(r) (NBL_IPRO_MNG_ARP_RSP_FLT_0_ADDR + \ + (NBL_IPRO_MNG_ARP_RSP_FLT_0_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_ARP_RSP_FLT_1_ADDR (0xb046c0) +#define NBL_IPRO_MNG_ARP_RSP_FLT_1_DEPTH (2) +#define NBL_IPRO_MNG_ARP_RSP_FLT_1_WIDTH (32) +#define NBL_IPRO_MNG_ARP_RSP_FLT_1_DWLEN (1) +union ipro_mng_arp_rsp_flt_1_u { + struct ipro_mng_arp_rsp_flt_1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_ARP_RSP_FLT_1_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_ARP_RSP_FLT_1_REG(r) (NBL_IPRO_MNG_ARP_RSP_FLT_1_ADDR + \ + (NBL_IPRO_MNG_ARP_RSP_FLT_1_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_NEIGHBOR_FLT_86_ADDR (0xb046d0) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_86_DEPTH (1) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_86_WIDTH (32) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_86_DWLEN (1) +union ipro_mng_neighbor_flt_86_u { + struct ipro_mng_neighbor_flt_86 { + u32 data:8; /* [7:0] Default:0x86 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_NEIGHBOR_FLT_86_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_NEIGHBOR_FLT_87_ADDR (0xb046d4) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_87_DEPTH (1) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_87_WIDTH (32) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_87_DWLEN (1) +union ipro_mng_neighbor_flt_87_u { + struct ipro_mng_neighbor_flt_87 { + u32 data:8; /* [7:0] Default:0x87 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_NEIGHBOR_FLT_87_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_NEIGHBOR_FLT_88_ADDR (0xb046d8) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_88_DEPTH (1) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_88_WIDTH (32) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_88_DWLEN (1) +union ipro_mng_neighbor_flt_88_u { + struct ipro_mng_neighbor_flt_88 { + u32 data:8; /* [7:0] Default:0x88 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_NEIGHBOR_FLT_88_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_NEIGHBOR_FLT_89_ADDR (0xb046dc) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_89_DEPTH (1) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_89_WIDTH (32) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_89_DWLEN (1) +union ipro_mng_neighbor_flt_89_u { + struct ipro_mng_neighbor_flt_89 { + u32 data:8; /* [7:0] Default:0x89 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_NEIGHBOR_FLT_89_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_MLD_FLT_82_ADDR (0xb046e0) +#define NBL_IPRO_MNG_MLD_FLT_82_DEPTH (1) +#define NBL_IPRO_MNG_MLD_FLT_82_WIDTH (32) +#define NBL_IPRO_MNG_MLD_FLT_82_DWLEN (1) +union ipro_mng_mld_flt_82_u { + struct ipro_mng_mld_flt_82 { + u32 data:8; /* [7:0] Default:0x82 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_MLD_FLT_82_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_MLD_FLT_83_ADDR (0xb046e4) +#define NBL_IPRO_MNG_MLD_FLT_83_DEPTH (1) +#define NBL_IPRO_MNG_MLD_FLT_83_WIDTH (32) +#define NBL_IPRO_MNG_MLD_FLT_83_DWLEN (1) +union ipro_mng_mld_flt_83_u { + struct ipro_mng_mld_flt_83 { + u32 data:8; /* [7:0] Default:0x83 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_MLD_FLT_83_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_MLD_FLT_84_ADDR (0xb046e8) +#define NBL_IPRO_MNG_MLD_FLT_84_DEPTH (1) +#define NBL_IPRO_MNG_MLD_FLT_84_WIDTH (32) +#define NBL_IPRO_MNG_MLD_FLT_84_DWLEN (1) +union ipro_mng_mld_flt_84_u { + struct ipro_mng_mld_flt_84 { + u32 data:8; /* [7:0] Default:0x84 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_MLD_FLT_84_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_MLD_FLT_8F_ADDR (0xb046ec) +#define NBL_IPRO_MNG_MLD_FLT_8F_DEPTH (1) +#define NBL_IPRO_MNG_MLD_FLT_8F_WIDTH (32) +#define NBL_IPRO_MNG_MLD_FLT_8F_DWLEN (1) +union ipro_mng_mld_flt_8f_u { + struct ipro_mng_mld_flt_8f { + u32 data:8; /* [7:0] Default:0x8f RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_MLD_FLT_8F_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_ICMPV4_FLT_ADDR (0xb046f0) +#define NBL_IPRO_MNG_ICMPV4_FLT_DEPTH (1) +#define NBL_IPRO_MNG_ICMPV4_FLT_WIDTH (32) +#define NBL_IPRO_MNG_ICMPV4_FLT_DWLEN (1) +union ipro_mng_icmpv4_flt_u { + struct ipro_mng_icmpv4_flt { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_ICMPV4_FLT_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_BRCAST_FLT_ADDR (0xb04700) +#define NBL_IPRO_MNG_BRCAST_FLT_DEPTH (1) +#define NBL_IPRO_MNG_BRCAST_FLT_WIDTH (32) +#define NBL_IPRO_MNG_BRCAST_FLT_DWLEN (1) +union ipro_mng_brcast_flt_u { + struct ipro_mng_brcast_flt { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_BRCAST_FLT_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_MULCAST_FLT_ADDR (0xb04704) +#define NBL_IPRO_MNG_MULCAST_FLT_DEPTH (1) +#define NBL_IPRO_MNG_MULCAST_FLT_WIDTH (32) +#define NBL_IPRO_MNG_MULCAST_FLT_DWLEN (1) +union ipro_mng_mulcast_flt_u { + struct ipro_mng_mulcast_flt { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_MULCAST_FLT_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_FLOW_CTRL_FLT_ADDR (0xb04710) +#define NBL_IPRO_MNG_FLOW_CTRL_FLT_DEPTH (1) +#define NBL_IPRO_MNG_FLOW_CTRL_FLT_WIDTH (32) +#define NBL_IPRO_MNG_FLOW_CTRL_FLT_DWLEN (1) +union ipro_mng_flow_ctrl_flt_u { + struct ipro_mng_flow_ctrl_flt { + u32 data:16; /* [15:0] Default:0x8808 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 bow:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_FLOW_CTRL_FLT_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_NCSI_FLT_ADDR (0xb04714) +#define NBL_IPRO_MNG_NCSI_FLT_DEPTH (1) +#define NBL_IPRO_MNG_NCSI_FLT_WIDTH (32) +#define NBL_IPRO_MNG_NCSI_FLT_DWLEN (1) +union ipro_mng_ncsi_flt_u { + struct ipro_mng_ncsi_flt { + u32 data:16; /* [15:0] Default:0x88F8 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 bow:1; /* [17] Default:0x1 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_NCSI_FLT_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_PKT_LEN_FLT_ADDR (0xb04720) +#define NBL_IPRO_MNG_PKT_LEN_FLT_DEPTH (1) +#define NBL_IPRO_MNG_PKT_LEN_FLT_WIDTH (32) +#define NBL_IPRO_MNG_PKT_LEN_FLT_DWLEN (1) +union ipro_mng_pkt_len_flt_u { + struct ipro_mng_pkt_len_flt { + u32 max:16; /* [15:0] Default:0x800 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_PKT_LEN_FLT_DWLEN]; +} __packed; + +#define NBL_IPRO_FLOW_STOP_ADDR (0xb04810) +#define NBL_IPRO_FLOW_STOP_DEPTH (1) +#define NBL_IPRO_FLOW_STOP_WIDTH (32) +#define NBL_IPRO_FLOW_STOP_DWLEN (1) +union ipro_flow_stop_u { + struct ipro_flow_stop { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_FLOW_STOP_DWLEN]; +} __packed; + +#define NBL_IPRO_TOKEN_NUM_ADDR (0xb04814) +#define NBL_IPRO_TOKEN_NUM_DEPTH (1) +#define NBL_IPRO_TOKEN_NUM_WIDTH (32) +#define NBL_IPRO_TOKEN_NUM_DWLEN (1) +union ipro_token_num_u { + struct ipro_token_num { + u32 dn_cnt:8; /* [7:0] Default:0x80 RO */ + u32 up_cnt:8; /* [15:8] Default:0x80 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_TOKEN_NUM_DWLEN]; +} __packed; + +#define NBL_IPRO_BYPASS_ADDR (0xb04818) +#define NBL_IPRO_BYPASS_DEPTH (1) +#define NBL_IPRO_BYPASS_WIDTH (32) +#define NBL_IPRO_BYPASS_DWLEN (1) +union ipro_bypass_u { + struct ipro_bypass { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_BYPASS_DWLEN]; +} __packed; + +#define NBL_IPRO_RR_REQ_MASK_ADDR (0xb0481c) +#define NBL_IPRO_RR_REQ_MASK_DEPTH (1) +#define NBL_IPRO_RR_REQ_MASK_WIDTH (32) +#define NBL_IPRO_RR_REQ_MASK_DWLEN (1) +union ipro_rr_req_mask_u { + struct ipro_rr_req_mask { + u32 dn:1; /* [0] Default:0x0 RW */ + u32 up:1; /* [1] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_RR_REQ_MASK_DWLEN]; +} __packed; + +#define NBL_IPRO_BP_STATE_ADDR (0xb04828) +#define NBL_IPRO_BP_STATE_DEPTH (1) +#define NBL_IPRO_BP_STATE_WIDTH (32) +#define NBL_IPRO_BP_STATE_DWLEN (1) +union ipro_bp_state_u { + struct ipro_bp_state { + u32 pp_up_link_fc:1; /* [0] Default:0x0 RO */ + u32 pp_dn_link_fc:1; /* [1] Default:0x0 RO */ + u32 pp_up_creadit:1; /* [2] Default:0x0 RO */ + u32 pp_dn_creadit:1; /* [3] Default:0x0 RO */ + u32 mcc_up_creadit:1; /* [4] Default:0x0 RO */ + u32 mcc_dn_creadit:1; /* [5] Default:0x0 RO */ + u32 pp_rdy:1; /* [6] Default:0x1 RO */ + u32 dn_rdy:1; /* [7] Default:0x1 RO */ + u32 up_rdy:1; /* [8] Default:0x1 RO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_BP_STATE_DWLEN]; +} __packed; + +#define NBL_IPRO_BP_HISTORY_ADDR (0xb0482c) +#define NBL_IPRO_BP_HISTORY_DEPTH (1) +#define NBL_IPRO_BP_HISTORY_WIDTH (32) +#define NBL_IPRO_BP_HISTORY_DWLEN (1) +union ipro_bp_history_u { + struct ipro_bp_history { + u32 pp_rdy:1; /* [0] Default:0x0 RC */ + u32 dn_rdy:1; /* [1] Default:0x0 RC */ + u32 up_rdy:1; /* [2] Default:0x0 RC */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_IPRO_ERRCODE_TBL_DROP_ADDR (0xb0486c) +#define NBL_IPRO_ERRCODE_TBL_DROP_DEPTH (1) +#define NBL_IPRO_ERRCODE_TBL_DROP_WIDTH (32) +#define NBL_IPRO_ERRCODE_TBL_DROP_DWLEN (1) +union ipro_errcode_tbl_drop_u { + struct ipro_errcode_tbl_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_ERRCODE_TBL_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_SPORT_TBL_DROP_ADDR (0xb04870) +#define NBL_IPRO_SPORT_TBL_DROP_DEPTH (1) +#define NBL_IPRO_SPORT_TBL_DROP_WIDTH (32) +#define NBL_IPRO_SPORT_TBL_DROP_DWLEN (1) +union ipro_sport_tbl_drop_u { + struct ipro_sport_tbl_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_SPORT_TBL_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_PTYPE_TBL_DROP_ADDR (0xb04874) +#define NBL_IPRO_PTYPE_TBL_DROP_DEPTH (1) +#define NBL_IPRO_PTYPE_TBL_DROP_WIDTH (32) +#define NBL_IPRO_PTYPE_TBL_DROP_DWLEN (1) +union ipro_ptype_tbl_drop_u { + struct ipro_ptype_tbl_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_PTYPE_TBL_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_UDL_DROP_ADDR (0xb04878) +#define NBL_IPRO_UDL_DROP_DEPTH (1) +#define NBL_IPRO_UDL_DROP_WIDTH (32) +#define NBL_IPRO_UDL_DROP_DWLEN (1) +union ipro_udl_drop_u { + struct ipro_udl_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_UDL_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_ANTIFAKE_DROP_ADDR (0xb0487c) +#define NBL_IPRO_ANTIFAKE_DROP_DEPTH (1) +#define NBL_IPRO_ANTIFAKE_DROP_WIDTH (32) +#define NBL_IPRO_ANTIFAKE_DROP_DWLEN (1) +union ipro_antifake_drop_u { + struct ipro_antifake_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_ANTIFAKE_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_VLAN_NUM_DROP_ADDR (0xb04880) +#define NBL_IPRO_VLAN_NUM_DROP_DEPTH (1) +#define NBL_IPRO_VLAN_NUM_DROP_WIDTH (32) +#define NBL_IPRO_VLAN_NUM_DROP_DWLEN (1) +union ipro_vlan_num_drop_u { + struct ipro_vlan_num_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_VLAN_NUM_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_TCP_STATE_DROP_ADDR (0xb04884) +#define NBL_IPRO_TCP_STATE_DROP_DEPTH (1) +#define NBL_IPRO_TCP_STATE_DROP_WIDTH (32) +#define NBL_IPRO_TCP_STATE_DROP_DWLEN (1) +union ipro_tcp_state_drop_u { + struct ipro_tcp_state_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_TCP_STATE_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_RAM_ERR_DROP_ADDR (0xb04888) +#define NBL_IPRO_RAM_ERR_DROP_DEPTH (1) +#define NBL_IPRO_RAM_ERR_DROP_WIDTH (32) +#define NBL_IPRO_RAM_ERR_DROP_DWLEN (1) +union ipro_ram_err_drop_u { + struct ipro_ram_err_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_RAM_ERR_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_KG_MISS_ADDR (0xb0488c) +#define NBL_IPRO_KG_MISS_DEPTH (1) +#define NBL_IPRO_KG_MISS_WIDTH (32) +#define NBL_IPRO_KG_MISS_DWLEN (1) +union ipro_kg_miss_u { + struct ipro_kg_miss { + u32 drop_cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 cnt:16; /* [31:16] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_IPRO_KG_MISS_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_DROP_ADDR (0xb04890) +#define NBL_IPRO_MNG_DROP_DEPTH (1) +#define NBL_IPRO_MNG_DROP_WIDTH (32) +#define NBL_IPRO_MNG_DROP_DWLEN (1) +union ipro_mng_drop_u { + struct ipro_mng_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_MTU_CHECK_DROP_ADDR (0xb04900) +#define NBL_IPRO_MTU_CHECK_DROP_DEPTH (256) +#define NBL_IPRO_MTU_CHECK_DROP_WIDTH (32) +#define NBL_IPRO_MTU_CHECK_DROP_DWLEN (1) +union ipro_mtu_check_drop_u { + struct ipro_mtu_check_drop { + u32 vsi_3:8; /* [7:0] Default:0x0 SCTR */ + u32 vsi_2:8; /* [15:8] Default:0x0 SCTR */ + u32 vsi_1:8; /* [23:16] Default:0x0 SCTR */ + u32 vsi_0:8; /* [31:24] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_IPRO_MTU_CHECK_DROP_DWLEN]; +} __packed; +#define NBL_IPRO_MTU_CHECK_DROP_REG(r) (NBL_IPRO_MTU_CHECK_DROP_ADDR + \ + (NBL_IPRO_MTU_CHECK_DROP_DWLEN * 4) * (r)) + +#define NBL_IPRO_LAST_QUEUE_RAM_ERR_ADDR (0xb04d08) +#define NBL_IPRO_LAST_QUEUE_RAM_ERR_DEPTH (1) +#define NBL_IPRO_LAST_QUEUE_RAM_ERR_WIDTH (32) +#define NBL_IPRO_LAST_QUEUE_RAM_ERR_DWLEN (1) +union ipro_last_queue_ram_err_u { + struct ipro_last_queue_ram_err { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_LAST_QUEUE_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_IPRO_LAST_DN_SRC_PORT_RAM_ERR_ADDR (0xb04d0c) +#define NBL_IPRO_LAST_DN_SRC_PORT_RAM_ERR_DEPTH (1) +#define NBL_IPRO_LAST_DN_SRC_PORT_RAM_ERR_WIDTH (32) +#define NBL_IPRO_LAST_DN_SRC_PORT_RAM_ERR_DWLEN (1) +union ipro_last_dn_src_port_ram_err_u { + struct ipro_last_dn_src_port_ram_err { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_LAST_DN_SRC_PORT_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_IPRO_LAST_UP_SRC_PORT_RAM_ERR_ADDR (0xb04d10) +#define NBL_IPRO_LAST_UP_SRC_PORT_RAM_ERR_DEPTH (1) +#define NBL_IPRO_LAST_UP_SRC_PORT_RAM_ERR_WIDTH (32) +#define NBL_IPRO_LAST_UP_SRC_PORT_RAM_ERR_DWLEN (1) +union ipro_last_up_src_port_ram_err_u { + struct ipro_last_up_src_port_ram_err { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_LAST_UP_SRC_PORT_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_IPRO_LAST_DN_PTYPE_RAM_ERR_ADDR (0xb04d14) +#define NBL_IPRO_LAST_DN_PTYPE_RAM_ERR_DEPTH (1) +#define NBL_IPRO_LAST_DN_PTYPE_RAM_ERR_WIDTH (32) +#define NBL_IPRO_LAST_DN_PTYPE_RAM_ERR_DWLEN (1) +union ipro_last_dn_ptype_ram_err_u { + struct ipro_last_dn_ptype_ram_err { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_LAST_DN_PTYPE_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_IPRO_LAST_UP_PTYPE_RAM_ERR_ADDR (0xb04d18) +#define NBL_IPRO_LAST_UP_PTYPE_RAM_ERR_DEPTH (1) +#define NBL_IPRO_LAST_UP_PTYPE_RAM_ERR_WIDTH (32) +#define NBL_IPRO_LAST_UP_PTYPE_RAM_ERR_DWLEN (1) +union ipro_last_up_ptype_ram_err_u { + struct ipro_last_up_ptype_ram_err { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_LAST_UP_PTYPE_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_IPRO_LAST_KG_PROF_RAM_ERR_ADDR (0xb04d20) +#define NBL_IPRO_LAST_KG_PROF_RAM_ERR_DEPTH (1) +#define NBL_IPRO_LAST_KG_PROF_RAM_ERR_WIDTH (32) +#define NBL_IPRO_LAST_KG_PROF_RAM_ERR_DWLEN (1) +union ipro_last_kg_prof_ram_err_u { + struct ipro_last_kg_prof_ram_err { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_LAST_KG_PROF_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_IPRO_LAST_ERRCODE_RAM_ERR_ADDR (0xb04d28) +#define NBL_IPRO_LAST_ERRCODE_RAM_ERR_DEPTH (1) +#define NBL_IPRO_LAST_ERRCODE_RAM_ERR_WIDTH (32) +#define NBL_IPRO_LAST_ERRCODE_RAM_ERR_DWLEN (1) +union ipro_last_errcode_ram_err_u { + struct ipro_last_errcode_ram_err { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_LAST_ERRCODE_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_IPRO_IN_PKT_CAP_EN_ADDR (0xb04dfc) +#define NBL_IPRO_IN_PKT_CAP_EN_DEPTH (1) +#define NBL_IPRO_IN_PKT_CAP_EN_WIDTH (32) +#define NBL_IPRO_IN_PKT_CAP_EN_DWLEN (1) +union ipro_in_pkt_cap_en_u { + struct ipro_in_pkt_cap_en { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_IN_PKT_CAP_EN_DWLEN]; +} __packed; + +#define NBL_IPRO_IN_PKT_CAP_ADDR (0xb04e00) +#define NBL_IPRO_IN_PKT_CAP_DEPTH (64) +#define NBL_IPRO_IN_PKT_CAP_WIDTH (32) +#define NBL_IPRO_IN_PKT_CAP_DWLEN (1) +union ipro_in_pkt_cap_u { + struct ipro_in_pkt_cap { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_IN_PKT_CAP_DWLEN]; +} __packed; +#define NBL_IPRO_IN_PKT_CAP_REG(r) (NBL_IPRO_IN_PKT_CAP_ADDR + \ + (NBL_IPRO_IN_PKT_CAP_DWLEN * 4) * (r)) + +#define NBL_IPRO_ERRCODE_TBL_ADDR (0xb05000) +#define NBL_IPRO_ERRCODE_TBL_DEPTH (16) +#define NBL_IPRO_ERRCODE_TBL_WIDTH (64) +#define NBL_IPRO_ERRCODE_TBL_DWLEN (2) +union ipro_errcode_tbl_u { + struct ipro_errcode_tbl { + u32 dqueue:11; /* [10:0] Default:0x0 RW */ + u32 dqueue_en:1; /* [11] Default:0x0 RW */ + u32 dqueue_pri:2; /* [13:12] Default:0x0 RW */ + u32 set_dport_pri:2; /* [15:14] Default:0x0 RW */ + u32 set_dport:16; /* [31:16] Default:0x0 RW */ + u32 set_dport_en:1; /* [32] Default:0x0 RW */ + u32 proc_done:1; /* [33] Default:0x0 RW */ + u32 vld:1; /* [34] Default:0x0 RW */ + u32 rsv:29; /* [63:35] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_ERRCODE_TBL_DWLEN]; +} __packed; +#define NBL_IPRO_ERRCODE_TBL_REG(r) (NBL_IPRO_ERRCODE_TBL_ADDR + \ + (NBL_IPRO_ERRCODE_TBL_DWLEN * 4) * (r)) + +#define NBL_IPRO_DN_PTYPE_TBL_ADDR (0xb06000) +#define NBL_IPRO_DN_PTYPE_TBL_DEPTH (256) +#define NBL_IPRO_DN_PTYPE_TBL_WIDTH (64) +#define NBL_IPRO_DN_PTYPE_TBL_DWLEN (2) +union ipro_dn_ptype_tbl_u { + struct ipro_dn_ptype_tbl { + u32 dn_entry_vld:1; /* [0] Default:0x0 RW */ + u32 dn_mirror_en:1; /* [1] Default:0x0 RW */ + u32 dn_mirror_pri:2; /* [3:2] Default:0x0 RW */ + u32 dn_mirror_id:4; /* [7:4] Default:0x0 RW */ + u32 dn_encap_en:1; /* [8] Default:0x0 RW */ + u32 dn_encap_pri:2; /* [10:9] Default:0x0 RW */ + u32 dn_encap_index:13; /* [23:11] Default:0x0 RW */ + u32 not_used_0:6; /* [29:24] Default:0x0 RW */ + u32 proc_done:1; /* [30] Default:0x0 RW */ + u32 set_dport_en:1; /* [31] Default:0x0 RW */ + u32 set_dport:16; /* [47:32] Default:0x0 RW */ + u32 set_dport_pri:2; /* [49:48] Default:0x0 RW */ + u32 dqueue_pri:2; /* [51:50] Default:0x0 RW */ + u32 dqueue:11; /* [62:52] Default:0x0 RW */ + u32 dqueue_en:1; /* [63] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_DN_PTYPE_TBL_DWLEN]; +} __packed; +#define NBL_IPRO_DN_PTYPE_TBL_REG(r) (NBL_IPRO_DN_PTYPE_TBL_ADDR + \ + (NBL_IPRO_DN_PTYPE_TBL_DWLEN * 4) * (r)) + +#define NBL_IPRO_UP_PTYPE_TBL_ADDR (0xb06800) +#define NBL_IPRO_UP_PTYPE_TBL_DEPTH (256) +#define NBL_IPRO_UP_PTYPE_TBL_WIDTH (64) +#define NBL_IPRO_UP_PTYPE_TBL_DWLEN (2) +union ipro_up_ptype_tbl_u { + struct ipro_up_ptype_tbl { + u32 up_entry_vld:1; /* [0] Default:0x0 RW */ + u32 up_mirror_en:1; /* [1] Default:0x0 RW */ + u32 up_mirror_pri:2; /* [3:2] Default:0x0 RW */ + u32 up_mirror_id:4; /* [7:4] Default:0x0 RW */ + u32 up_decap_en:1; /* [8] Default:0x0 RW */ + u32 up_decap_pri:2; /* [10:9] Default:0x0 RW */ + u32 not_used_1:19; /* [29:11] Default:0x0 RW */ + u32 proc_done:1; /* [30] Default:0x0 RW */ + u32 set_dport_en:1; /* [31] Default:0x0 RW */ + u32 set_dport:16; /* [47:32] Default:0x0 RW */ + u32 set_dport_pri:2; /* [49:48] Default:0x0 RW */ + u32 dqueue_pri:2; /* [51:50] Default:0x0 RW */ + u32 dqueue:11; /* [62:52] Default:0x0 RW */ + u32 dqueue_en:1; /* [63] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_UP_PTYPE_TBL_DWLEN]; +} __packed; +#define NBL_IPRO_UP_PTYPE_TBL_REG(r) (NBL_IPRO_UP_PTYPE_TBL_ADDR + \ + (NBL_IPRO_UP_PTYPE_TBL_DWLEN * 4) * (r)) + +#define NBL_IPRO_QUEUE_TBL_ADDR (0xb08000) +#define NBL_IPRO_QUEUE_TBL_DEPTH (2048) +#define NBL_IPRO_QUEUE_TBL_WIDTH (32) +#define NBL_IPRO_QUEUE_TBL_DWLEN (1) +union ipro_queue_tbl_u { + struct ipro_queue_tbl { + u32 vsi:10; /* [9:0] Default:0x0 RW */ + u32 vsi_en:1; /* [10] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_QUEUE_TBL_DWLEN]; +} __packed; +#define NBL_IPRO_QUEUE_TBL_REG(r) (NBL_IPRO_QUEUE_TBL_ADDR + \ + (NBL_IPRO_QUEUE_TBL_DWLEN * 4) * (r)) + +#define NBL_IPRO_UP_SRC_PORT_TBL_ADDR (0xb0b000) +#define NBL_IPRO_UP_SRC_PORT_TBL_DEPTH (4) +#define NBL_IPRO_UP_SRC_PORT_TBL_WIDTH (64) +#define NBL_IPRO_UP_SRC_PORT_TBL_DWLEN (2) +union ipro_up_src_port_tbl_u { + struct ipro_up_src_port_tbl { + u32 entry_vld:1; /* [0] Default:0x0 RW */ + u32 vlan_layer_num_0:2; /* [2:1] Default:0x0 RW */ + u32 vlan_layer_num_1:2; /* [4:3] Default:0x0 RW */ + u32 lag_vld:1; /* [5] Default:0x0 RW */ + u32 lag_id:2; /* [7:6] Default:0x0 RW */ + u32 phy_flow:1; /* [8] Default:0x0 RW */ + u32 mirror_en:1; /* [9] Default:0x0 RW */ + u32 mirror_pr:2; /* [11:10] Default:0x0 RW */ + u32 mirror_id:4; /* [15:12] Default:0x0 RW */ + u32 dqueue_pri:2; /* [17:16] Default:0x0 RW */ + u32 set_dport_pri:2; /* [19:18] Default:0x0 RW */ + u32 dqueue:11; /* [30:20] Default:0x0 RW */ + u32 dqueue_en:1; /* [31] Default:0x0 RW */ + u32 set_dport:16; /* [47:32] Default:0x0 RW */ + u32 set_dport_en:1; /* [48] Default:0x0 RW */ + u32 proc_done:1; /* [49] Default:0x0 RW */ + u32 car_en:1; /* [50] Default:0x0 RW */ + u32 car_pr:2; /* [52:51] Default:0x0 RW */ + u32 car_id:10; /* [62:53] Default:0x0 RW */ + u32 rsv:1; /* [63] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_UP_SRC_PORT_TBL_DWLEN]; +} __packed; +#define NBL_IPRO_UP_SRC_PORT_TBL_REG(r) (NBL_IPRO_UP_SRC_PORT_TBL_ADDR + \ + (NBL_IPRO_UP_SRC_PORT_TBL_DWLEN * 4) * (r)) + +#define NBL_IPRO_DN_SRC_PORT_TBL_ADDR (0xb0c000) +#define NBL_IPRO_DN_SRC_PORT_TBL_DEPTH (1024) +#define NBL_IPRO_DN_SRC_PORT_TBL_WIDTH (128) +#define NBL_IPRO_DN_SRC_PORT_TBL_DWLEN (4) +union ipro_dn_src_port_tbl_u { + struct ipro_dn_src_port_tbl { + u32 entry_vld:1; /* [0] Default:0x0 RW */ + u32 mirror_en:1; /* [1] Default:0x0 RW */ + u32 mirror_pr:2; /* [3:2] Default:0x0 RW */ + u32 mirror_id:4; /* [7:4] Default:0x0 RW */ + u32 vlan_layer_num_1:2; /* [9:8] Default:0x0 RW */ + u32 phy_flow:1; /* [10] Default:0x0 RW */ + u32 mtu_sel:4; /* [14:11] Default:0x0 RW */ + u32 addr_check_en:1; /* [15] Default:0x0 RW */ + u32 smac_l:32; /* [63:16] Default:0x0 RW */ + u32 smac_h:16; /* [63:16] Default:0x0 RW */ + u32 dqueue:11; /* [74:64] Default:0x0 RW */ + u32 dqueue_en:1; /* [75] Default:0x0 RW */ + u32 dqueue_pri:2; /* [77:76] Default:0x0 RW */ + u32 set_dport_pri:2; /* [79:78] Default:0x0 RW */ + u32 set_dport:16; /* [95:80] Default:0x0 RW */ + u32 set_dport_en:1; /* [96] Default:0x0 RW */ + u32 proc_done:1; /* [97] Default:0x0 RW */ + u32 not_used_1:2; /* [99:98] Default:0x0 RW */ + u32 rsv:28; /* [127:100] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_DN_SRC_PORT_TBL_DWLEN]; +} __packed; +#define NBL_IPRO_DN_SRC_PORT_TBL_REG(r) (NBL_IPRO_DN_SRC_PORT_TBL_ADDR + \ + (NBL_IPRO_DN_SRC_PORT_TBL_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_mcc.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_mcc.h new file mode 100644 index 0000000000000000000000000000000000000000..da3e1e6f87260200c3d016f99a5f03fc67dabcbe --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_mcc.h @@ -0,0 +1,407 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_MCC_H +#define NBL_MCC_H 1 + +#include + +#define NBL_MCC_BASE (0x00B44000) + +#define NBL_MCC_INT_STATUS_ADDR (0xb44000) +#define NBL_MCC_INT_STATUS_DEPTH (1) +#define NBL_MCC_INT_STATUS_WIDTH (32) +#define NBL_MCC_INT_STATUS_DWLEN (1) +union mcc_int_status_u { + struct mcc_int_status { + u32 fatal_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RWC */ + u32 fsm_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 cfg_err:1; /* [5] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [6] Default:0x0 RWC */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_MCC_INT_MASK_ADDR (0xb44004) +#define NBL_MCC_INT_MASK_DEPTH (1) +#define NBL_MCC_INT_MASK_WIDTH (32) +#define NBL_MCC_INT_MASK_DWLEN (1) +union mcc_int_mask_u { + struct mcc_int_mask { + u32 fatal_err:1; /* [0] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RW */ + u32 fsm_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 cfg_err:1; /* [5] Default:0x0 RW */ + u32 data_ucor_err:1; /* [6] Default:0x0 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_INT_MASK_DWLEN]; +} __packed; + +#define NBL_MCC_INT_SET_ADDR (0xb44008) +#define NBL_MCC_INT_SET_DEPTH (1) +#define NBL_MCC_INT_SET_WIDTH (32) +#define NBL_MCC_INT_SET_DWLEN (1) +union mcc_int_set_u { + struct mcc_int_set { + u32 fatal_err:1; /* [0] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 WO */ + u32 fsm_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 cfg_err:1; /* [5] Default:0x0 WO */ + u32 data_ucor_err:1; /* [6] Default:0x0 WO */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_INT_SET_DWLEN]; +} __packed; + +#define NBL_MCC_INIT_DONE_ADDR (0xb4400c) +#define NBL_MCC_INIT_DONE_DEPTH (1) +#define NBL_MCC_INIT_DONE_WIDTH (32) +#define NBL_MCC_INIT_DONE_DWLEN (1) +union mcc_init_done_u { + struct mcc_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_MCC_CIF_ERR_INFO_ADDR (0xb44040) +#define NBL_MCC_CIF_ERR_INFO_DEPTH (1) +#define NBL_MCC_CIF_ERR_INFO_WIDTH (32) +#define NBL_MCC_CIF_ERR_INFO_DWLEN (1) +union mcc_cif_err_info_u { + struct mcc_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_MCC_CFG_ERR_INFO_ADDR (0xb44050) +#define NBL_MCC_CFG_ERR_INFO_DEPTH (1) +#define NBL_MCC_CFG_ERR_INFO_WIDTH (32) +#define NBL_MCC_CFG_ERR_INFO_DWLEN (1) +union mcc_cfg_err_info_u { + struct mcc_cfg_err_info { + u32 id:8; /* [7:0] Default:0x0 RO */ + u32 addr:16; /* [23:8] Default:0x0 RO */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_MCC_CAR_CTRL_ADDR (0xb44100) +#define NBL_MCC_CAR_CTRL_DEPTH (1) +#define NBL_MCC_CAR_CTRL_WIDTH (32) +#define NBL_MCC_CAR_CTRL_DWLEN (1) +union mcc_car_ctrl_u { + struct mcc_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_MCC_TIMEOUT_CFG_ADDR (0xb44140) +#define NBL_MCC_TIMEOUT_CFG_DEPTH (1) +#define NBL_MCC_TIMEOUT_CFG_WIDTH (32) +#define NBL_MCC_TIMEOUT_CFG_DWLEN (1) +union mcc_timeout_cfg_u { + struct mcc_timeout_cfg { + u32 fsm_max_num:32; /* [31:0] Default:0x0ffffffff RW */ + } __packed info; + u32 data[NBL_MCC_TIMEOUT_CFG_DWLEN]; +} __packed; + +#define NBL_MCC_INIT_START_ADDR (0xb44180) +#define NBL_MCC_INIT_START_DEPTH (1) +#define NBL_MCC_INIT_START_WIDTH (32) +#define NBL_MCC_INIT_START_DWLEN (1) +union mcc_init_start_u { + struct mcc_init_start { + u32 start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_INIT_START_DWLEN]; +} __packed; + +#define NBL_MCC_RATE_CTRL_ADDR (0xb44300) +#define NBL_MCC_RATE_CTRL_DEPTH (1) +#define NBL_MCC_RATE_CTRL_WIDTH (32) +#define NBL_MCC_RATE_CTRL_DWLEN (1) +union mcc_rate_ctrl_u { + struct mcc_rate_ctrl { + u32 rate_ctrl_eth_bandwidth:3; /* [2:0] Default:0x0 RW */ + u32 rate_ctrl_eth_switch:2; /* [4:3] Default:0x0 RW */ + u32 rate_ctrl_gear:3; /* [7:5] Default:0x0 RW */ + u32 rate_ctrl_en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_RATE_CTRL_DWLEN]; +} __packed; + +#define NBL_MCC_CREDIT_ADDR (0xb44400) +#define NBL_MCC_CREDIT_DEPTH (1) +#define NBL_MCC_CREDIT_WIDTH (32) +#define NBL_MCC_CREDIT_DWLEN (1) +union mcc_credit_u { + struct mcc_credit { + u32 mcc_up_credit:5; /* [4:0] Default:0x1d RW */ + u32 mcc_up_vld:1; /* [5] Default:0x0 WO */ + u32 rsv1:10; /* [15:6] Default:0x0 RO */ + u32 mcc_dn_credit:5; /* [20:16] Default:0x1d RW */ + u32 mcc_dn_vld:1; /* [21] Default:0x0 WO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_CREDIT_DWLEN]; +} __packed; + +#define NBL_MCC_ACTION_PRIORITY_ADDR (0xb44500) +#define NBL_MCC_ACTION_PRIORITY_DEPTH (1) +#define NBL_MCC_ACTION_PRIORITY_WIDTH (32) +#define NBL_MCC_ACTION_PRIORITY_DWLEN (1) +union mcc_action_priority_u { + struct mcc_action_priority { + u32 statidx_act_pri:2; /* [1:0] Default:0x0 RW */ + u32 dport_act_pri:2; /* [3:2] Default:0x0 RW */ + u32 dqueue_act_pri:2; /* [5:4] Default:0x0 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_ACTION_PRIORITY_DWLEN]; +} __packed; + +#define NBL_MCC_UU_WEIGHT_ADDR (0xb44600) +#define NBL_MCC_UU_WEIGHT_DEPTH (1) +#define NBL_MCC_UU_WEIGHT_WIDTH (32) +#define NBL_MCC_UU_WEIGHT_DWLEN (1) +union mcc_uu_weight_u { + struct mcc_uu_weight { + u32 uu_weight:8; /* [7:0] Default:0x2 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_UU_WEIGHT_DWLEN]; +} __packed; + +#define NBL_MCC_DU_WEIGHT_ADDR (0xb44604) +#define NBL_MCC_DU_WEIGHT_DEPTH (1) +#define NBL_MCC_DU_WEIGHT_WIDTH (32) +#define NBL_MCC_DU_WEIGHT_DWLEN (1) +union mcc_du_weight_u { + struct mcc_du_weight { + u32 du_weight:8; /* [7:0] Default:0x2 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_DU_WEIGHT_DWLEN]; +} __packed; + +#define NBL_MCC_UCH_WEIGHT_ADDR (0xb44608) +#define NBL_MCC_UCH_WEIGHT_DEPTH (1) +#define NBL_MCC_UCH_WEIGHT_WIDTH (32) +#define NBL_MCC_UCH_WEIGHT_DWLEN (1) +union mcc_uch_weight_u { + struct mcc_uch_weight { + u32 uch_weight:8; /* [7:0] Default:0x1 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_UCH_WEIGHT_DWLEN]; +} __packed; + +#define NBL_MCC_DCH_WEIGHT_ADDR (0xb4460c) +#define NBL_MCC_DCH_WEIGHT_DEPTH (1) +#define NBL_MCC_DCH_WEIGHT_WIDTH (32) +#define NBL_MCC_DCH_WEIGHT_DWLEN (1) +union mcc_dch_weight_u { + struct mcc_dch_weight { + u32 dch_weight:8; /* [7:0] Default:0x1 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_DCH_WEIGHT_DWLEN]; +} __packed; + +#define NBL_MCC_SPD_TIMEOUT_TH_ADDR (0xb44740) +#define NBL_MCC_SPD_TIMEOUT_TH_DEPTH (1) +#define NBL_MCC_SPD_TIMEOUT_TH_WIDTH (32) +#define NBL_MCC_SPD_TIMEOUT_TH_DWLEN (1) +union mcc_spd_timeout_th_u { + struct mcc_spd_timeout_th { + u32 timeout_th:8; /* [7:0] Default:0xff RW */ + u32 rsv:14; /* [21:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_SPD_TIMEOUT_TH_DWLEN]; +} __packed; + +#define NBL_MCC_EXT_FLAG_OFFSET_ADDR (0xb44800) +#define NBL_MCC_EXT_FLAG_OFFSET_DEPTH (1) +#define NBL_MCC_EXT_FLAG_OFFSET_WIDTH (32) +#define NBL_MCC_EXT_FLAG_OFFSET_DWLEN (1) +union mcc_ext_flag_offset_u { + struct mcc_ext_flag_offset { + u32 dir_offset:5; /* [4:0] Default:0x00 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_EXT_FLAG_OFFSET_DWLEN]; +} __packed; + +#define NBL_MCC_EXT_MCIDX_ADDR (0xb44804) +#define NBL_MCC_EXT_MCIDX_DEPTH (1) +#define NBL_MCC_EXT_MCIDX_WIDTH (32) +#define NBL_MCC_EXT_MCIDX_DWLEN (1) +union mcc_ext_mcidx_u { + struct mcc_ext_mcidx { + u32 mcidx_act_id:6; /* [5:0] Default:0x0d RW */ + u32 mcidx_vld:1; /* [6] Default:0x1 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_EXT_MCIDX_DWLEN]; +} __packed; + +#define NBL_MCC_MC_ORIGINAL_DPORT_ADDR (0xb44808) +#define NBL_MCC_MC_ORIGINAL_DPORT_DEPTH (1) +#define NBL_MCC_MC_ORIGINAL_DPORT_WIDTH (32) +#define NBL_MCC_MC_ORIGINAL_DPORT_DWLEN (1) +union mcc_mc_original_dport_u { + struct mcc_mc_original_dport { + u32 dport:16; /* [15:0] Default:0x2fef RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_MC_ORIGINAL_DPORT_DWLEN]; +} __packed; + +#define NBL_MCC_AM_SET_FLAGS_ADDR (0xb44900) +#define NBL_MCC_AM_SET_FLAGS_DEPTH (1) +#define NBL_MCC_AM_SET_FLAGS_WIDTH (32) +#define NBL_MCC_AM_SET_FLAGS_DWLEN (1) +union mcc_am_set_flags_u { + struct mcc_am_set_flags { + u32 set_flags:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_MCC_AM_SET_FLAGS_DWLEN]; +} __packed; + +#define NBL_MCC_AM_CLEAR_FLAGS_ADDR (0xb44904) +#define NBL_MCC_AM_CLEAR_FLAGS_DEPTH (1) +#define NBL_MCC_AM_CLEAR_FLAGS_WIDTH (32) +#define NBL_MCC_AM_CLEAR_FLAGS_DWLEN (1) +union mcc_am_clear_flags_u { + struct mcc_am_clear_flags { + u32 clear_flags:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_MCC_AM_CLEAR_FLAGS_DWLEN]; +} __packed; + +#define NBL_MCC_AM_ACT_ID_ADDR (0xb44a00) +#define NBL_MCC_AM_ACT_ID_DEPTH (1) +#define NBL_MCC_AM_ACT_ID_WIDTH (32) +#define NBL_MCC_AM_ACT_ID_DWLEN (1) +union mcc_am_act_id_u { + struct mcc_am_act_id { + u32 dport_act_id:6; /* [5:0] Default:0x9 RW */ + u32 rsv3:2; /* [7:6] Default:0x0 RO */ + u32 dqueue_act_id:6; /* [13:8] Default:0xa RW */ + u32 rsv2:2; /* [15:14] Default:0x0 RO */ + u32 statidx_act_id:6; /* [21:16] Default:0x10 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 mirroridx_act_id:6; /* [29:24] Default:0x08 RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_AM_ACT_ID_DWLEN]; +} __packed; + +#define NBL_MCC_QUEUE_EN_CTRL_ADDR (0xb44b00) +#define NBL_MCC_QUEUE_EN_CTRL_DEPTH (1) +#define NBL_MCC_QUEUE_EN_CTRL_WIDTH (32) +#define NBL_MCC_QUEUE_EN_CTRL_DWLEN (1) +union mcc_queue_en_ctrl_u { + struct mcc_queue_en_ctrl { + u32 uuq_en:1; /* [0] Default:0x1 RW */ + u32 duq_en:1; /* [1] Default:0x1 RW */ + u32 umhq_en:1; /* [2] Default:0x1 RW */ + u32 dmhq_en:1; /* [3] Default:0x1 RW */ + u32 umlq_en:1; /* [4] Default:0x1 RW */ + u32 dmlq_en:1; /* [5] Default:0x1 RW */ + u32 uchq_en:1; /* [6] Default:0x1 RW */ + u32 dchq_en:1; /* [7] Default:0x1 RW */ + u32 uclq_en:1; /* [8] Default:0x1 RW */ + u32 dclq_en:1; /* [9] Default:0x1 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_QUEUE_EN_CTRL_DWLEN]; +} __packed; + +#define NBL_MCC_CFG_TEST_ADDR (0xb44c00) +#define NBL_MCC_CFG_TEST_DEPTH (1) +#define NBL_MCC_CFG_TEST_WIDTH (32) +#define NBL_MCC_CFG_TEST_DWLEN (1) +union mcc_cfg_test_u { + struct mcc_cfg_test { + u32 test:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_MCC_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_MCC_BP_STATE_ADDR (0xb44f00) +#define NBL_MCC_BP_STATE_DEPTH (1) +#define NBL_MCC_BP_STATE_WIDTH (32) +#define NBL_MCC_BP_STATE_DWLEN (1) +union mcc_bp_state_u { + struct mcc_bp_state { + u32 in_bp:1; /* [0] Default:0x0 RO */ + u32 out_bp:1; /* [1] Default:0x0 RO */ + u32 inter_bp:1; /* [2] Default:0x0 RO */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_BP_STATE_DWLEN]; +} __packed; + +#define NBL_MCC_BP_HISTORY_ADDR (0xb44f04) +#define NBL_MCC_BP_HISTORY_DEPTH (1) +#define NBL_MCC_BP_HISTORY_WIDTH (32) +#define NBL_MCC_BP_HISTORY_DWLEN (1) +union mcc_bp_history_u { + struct mcc_bp_history { + u32 in_bp:1; /* [0] Default:0x0 RC */ + u32 out_bp:1; /* [1] Default:0x0 RC */ + u32 inter_bp:1; /* [2] Default:0x0 RC */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_MCC_TBL_ADDR (0xb54000) +#define NBL_MCC_TBL_DEPTH (8192) +#define NBL_MCC_TBL_WIDTH (64) +#define NBL_MCC_TBL_DWLEN (2) +union mcc_tbl_u { + struct mcc_tbl { + u32 dport_act:16; /* [15:0] Default:0x0 RW */ + u32 dqueue_act:11; /* [26:16] Default:0x0 RW */ + u32 dqueue_en:1; /* [27] Default:0x0 RW */ + u32 dqueue_rsv:4; /* [31:28] Default:0x0 RO */ + u32 statid_act:11; /* [42:32] Default:0x0 RW */ + u32 statid_filter:1; /* [43] Default:0x0 RW */ + u32 flowid_filter:1; /* [44] Default:0x0 RW */ + u32 stateid_rsv:3; /* [47:45] Default:0x0 RO */ + u32 next_pntr:13; /* [60:48] Default:0x0 RW */ + u32 tail:1; /* [61] Default:0x0 RW */ + u32 vld:1; /* [62] Default:0x0 RW */ + u32 rsv:1; /* [63] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_TBL_DWLEN]; +} __packed; +#define NBL_MCC_TBL_REG(r) (NBL_MCC_TBL_ADDR + \ + (NBL_MCC_TBL_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp0.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp0.h new file mode 100644 index 0000000000000000000000000000000000000000..690c6ce96d8462d765a4de31905e16fe9b328289 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp0.h @@ -0,0 +1,614 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_PP0_H +#define NBL_PP0_H 1 + +#include + +#define NBL_PP0_BASE (0x00B14000) + +#define NBL_PP0_INT_STATUS_ADDR (0xb14000) +#define NBL_PP0_INT_STATUS_DEPTH (1) +#define NBL_PP0_INT_STATUS_WIDTH (32) +#define NBL_PP0_INT_STATUS_DWLEN (1) +union pp0_int_status_u { + struct pp0_int_status { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_PP0_INT_MASK_ADDR (0xb14004) +#define NBL_PP0_INT_MASK_DEPTH (1) +#define NBL_PP0_INT_MASK_WIDTH (32) +#define NBL_PP0_INT_MASK_DWLEN (1) +union pp0_int_mask_u { + struct pp0_int_mask { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RW */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_INT_MASK_DWLEN]; +} __packed; + +#define NBL_PP0_INT_SET_ADDR (0xb14008) +#define NBL_PP0_INT_SET_DEPTH (1) +#define NBL_PP0_INT_SET_WIDTH (32) +#define NBL_PP0_INT_SET_DWLEN (1) +union pp0_int_set_u { + struct pp0_int_set { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 WO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_INT_SET_DWLEN]; +} __packed; + +#define NBL_PP0_INIT_DONE_ADDR (0xb1400c) +#define NBL_PP0_INIT_DONE_DEPTH (1) +#define NBL_PP0_INIT_DONE_WIDTH (32) +#define NBL_PP0_INIT_DONE_DWLEN (1) +union pp0_init_done_u { + struct pp0_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_PP0_CFG_ERR_INFO_ADDR (0xb14038) +#define NBL_PP0_CFG_ERR_INFO_DEPTH (1) +#define NBL_PP0_CFG_ERR_INFO_WIDTH (32) +#define NBL_PP0_CFG_ERR_INFO_DWLEN (1) +union pp0_cfg_err_info_u { + struct pp0_cfg_err_info { + u32 id:1; /* [0:0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PP0_CIF_ERR_INFO_ADDR (0xb14040) +#define NBL_PP0_CIF_ERR_INFO_DEPTH (1) +#define NBL_PP0_CIF_ERR_INFO_WIDTH (32) +#define NBL_PP0_CIF_ERR_INFO_DWLEN (1) +union pp0_cif_err_info_u { + struct pp0_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PP0_CAR_CTRL_ADDR (0xb14100) +#define NBL_PP0_CAR_CTRL_DEPTH (1) +#define NBL_PP0_CAR_CTRL_WIDTH (32) +#define NBL_PP0_CAR_CTRL_DWLEN (1) +union pp0_car_ctrl_u { + struct pp0_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_PP0_MODE_ADDR (0xb14104) +#define NBL_PP0_MODE_DEPTH (1) +#define NBL_PP0_MODE_WIDTH (32) +#define NBL_PP0_MODE_DWLEN (1) +union pp0_mode_u { + struct pp0_mode { + u32 bypass:1; /* [0] Default:0x0 RW */ + u32 internal_loopback_en:1; /* [1] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_MODE_DWLEN]; +} __packed; + +#define NBL_PP0_SET_FLAGS0_ADDR (0xb14108) +#define NBL_PP0_SET_FLAGS0_DEPTH (1) +#define NBL_PP0_SET_FLAGS0_WIDTH (32) +#define NBL_PP0_SET_FLAGS0_DWLEN (1) +union pp0_set_flags0_u { + struct pp0_set_flags0 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_SET_FLAGS0_DWLEN]; +} __packed; + +#define NBL_PP0_SET_FLAGS1_ADDR (0xb1410c) +#define NBL_PP0_SET_FLAGS1_DEPTH (1) +#define NBL_PP0_SET_FLAGS1_WIDTH (32) +#define NBL_PP0_SET_FLAGS1_DWLEN (1) +union pp0_set_flags1_u { + struct pp0_set_flags1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_SET_FLAGS1_DWLEN]; +} __packed; + +#define NBL_PP0_CLEAR_FLAGS0_ADDR (0xb14110) +#define NBL_PP0_CLEAR_FLAGS0_DEPTH (1) +#define NBL_PP0_CLEAR_FLAGS0_WIDTH (32) +#define NBL_PP0_CLEAR_FLAGS0_DWLEN (1) +union pp0_clear_flags0_u { + struct pp0_clear_flags0 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_CLEAR_FLAGS0_DWLEN]; +} __packed; + +#define NBL_PP0_CLEAR_FLAGS1_ADDR (0xb14114) +#define NBL_PP0_CLEAR_FLAGS1_DEPTH (1) +#define NBL_PP0_CLEAR_FLAGS1_WIDTH (32) +#define NBL_PP0_CLEAR_FLAGS1_DWLEN (1) +union pp0_clear_flags1_u { + struct pp0_clear_flags1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_CLEAR_FLAGS1_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY0_ADDR (0xb14118) +#define NBL_PP0_ACTION_PRIORITY0_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY0_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY0_DWLEN (1) +union pp0_action_priority0_u { + struct pp0_action_priority0 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY0_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY1_ADDR (0xb1411c) +#define NBL_PP0_ACTION_PRIORITY1_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY1_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY1_DWLEN (1) +union pp0_action_priority1_u { + struct pp0_action_priority1 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY1_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY2_ADDR (0xb14120) +#define NBL_PP0_ACTION_PRIORITY2_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY2_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY2_DWLEN (1) +union pp0_action_priority2_u { + struct pp0_action_priority2 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY2_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY3_ADDR (0xb14124) +#define NBL_PP0_ACTION_PRIORITY3_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY3_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY3_DWLEN (1) +union pp0_action_priority3_u { + struct pp0_action_priority3 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY3_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY4_ADDR (0xb14128) +#define NBL_PP0_ACTION_PRIORITY4_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY4_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY4_DWLEN (1) +union pp0_action_priority4_u { + struct pp0_action_priority4 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY4_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY5_ADDR (0xb1412c) +#define NBL_PP0_ACTION_PRIORITY5_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY5_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY5_DWLEN (1) +union pp0_action_priority5_u { + struct pp0_action_priority5 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY5_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY6_ADDR (0xb14130) +#define NBL_PP0_ACTION_PRIORITY6_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY6_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY6_DWLEN (1) +union pp0_action_priority6_u { + struct pp0_action_priority6 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY6_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY7_ADDR (0xb14134) +#define NBL_PP0_ACTION_PRIORITY7_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY7_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY7_DWLEN (1) +union pp0_action_priority7_u { + struct pp0_action_priority7 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY7_DWLEN]; +} __packed; + +#define NBL_PP0_CPU_ACCESS_ADDR (0xb1416c) +#define NBL_PP0_CPU_ACCESS_DEPTH (1) +#define NBL_PP0_CPU_ACCESS_WIDTH (32) +#define NBL_PP0_CPU_ACCESS_DWLEN (1) +union pp0_cpu_access_u { + struct pp0_cpu_access { + u32 bp_th:10; /* [9:0] Default:0x34 RW */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 timeout_th:10; /* [25:16] Default:0x100 RW */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_CPU_ACCESS_DWLEN]; +} __packed; + +#define NBL_PP0_RDMA_BYPASS_ADDR (0xb14170) +#define NBL_PP0_RDMA_BYPASS_DEPTH (1) +#define NBL_PP0_RDMA_BYPASS_WIDTH (32) +#define NBL_PP0_RDMA_BYPASS_DWLEN (1) +union pp0_rdma_bypass_u { + struct pp0_rdma_bypass { + u32 rdma_flag_offset:5; /* [4:0] Default:0x0 RW */ + u32 dn_bypass_en:1; /* [5] Default:0x0 RW */ + u32 up_bypass_en:1; /* [6] Default:0x0 RW */ + u32 rsv1:1; /* [7] Default:0x0 RO */ + u32 dir_flag_offset:5; /* [12:8] Default:0x0 RW */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_RDMA_BYPASS_DWLEN]; +} __packed; + +#define NBL_PP0_INIT_START_ADDR (0xb141fc) +#define NBL_PP0_INIT_START_DEPTH (1) +#define NBL_PP0_INIT_START_WIDTH (32) +#define NBL_PP0_INIT_START_DWLEN (1) +union pp0_init_start_u { + struct pp0_init_start { + u32 en:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_INIT_START_DWLEN]; +} __packed; + +#define NBL_PP0_BP_SET_ADDR (0xb14200) +#define NBL_PP0_BP_SET_DEPTH (1) +#define NBL_PP0_BP_SET_WIDTH (32) +#define NBL_PP0_BP_SET_DWLEN (1) +union pp0_bp_set_u { + struct pp0_bp_set { + u32 pp_up:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_BP_SET_DWLEN]; +} __packed; + +#define NBL_PP0_BP_MASK_ADDR (0xb14204) +#define NBL_PP0_BP_MASK_DEPTH (1) +#define NBL_PP0_BP_MASK_WIDTH (32) +#define NBL_PP0_BP_MASK_DWLEN (1) +union pp0_bp_mask_u { + struct pp0_bp_mask { + u32 dn_pp:1; /* [00:00] Default:0x0 RW */ + u32 fem_pp:1; /* [01:01] Default:0x0 RW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_BP_MASK_DWLEN]; +} __packed; + +#define NBL_PP0_BP_STATE_ADDR (0xb14308) +#define NBL_PP0_BP_STATE_DEPTH (1) +#define NBL_PP0_BP_STATE_WIDTH (32) +#define NBL_PP0_BP_STATE_DWLEN (1) +union pp0_bp_state_u { + struct pp0_bp_state { + u32 dn_pp_bp:1; /* [00:00] Default:0x0 RO */ + u32 fem_pp_bp:1; /* [01:01] Default:0x0 RO */ + u32 pp_up_bp:1; /* [02:02] Default:0x0 RO */ + u32 inter_pp_bp:1; /* [03:03] Default:0x0 RO */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_BP_STATE_DWLEN]; +} __packed; + +#define NBL_PP0_BP_HISTORY_ADDR (0xb1430c) +#define NBL_PP0_BP_HISTORY_DEPTH (1) +#define NBL_PP0_BP_HISTORY_WIDTH (32) +#define NBL_PP0_BP_HISTORY_DWLEN (1) +union pp0_bp_history_u { + struct pp0_bp_history { + u32 dn_pp_bp:1; /* [00:00] Default:0x0 RC */ + u32 fem_pp_bp:1; /* [01:01] Default:0x0 RC */ + u32 pp_up_bp:1; /* [02:02] Default:0x0 RC */ + u32 inter_pp_bp:1; /* [03:03] Default:0x0 RC */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_PP0_CFG_TEST_ADDR (0xb1442c) +#define NBL_PP0_CFG_TEST_DEPTH (1) +#define NBL_PP0_CFG_TEST_WIDTH (32) +#define NBL_PP0_CFG_TEST_DWLEN (1) +union pp0_cfg_test_u { + struct pp0_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION0_ADDR (0xb14430) +#define NBL_PP0_ABNORMAL_ACTION0_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION0_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION0_DWLEN (1) +union pp0_abnormal_action0_u { + struct pp0_abnormal_action0 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION0_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION1_ADDR (0xb14434) +#define NBL_PP0_ABNORMAL_ACTION1_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION1_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION1_DWLEN (1) +union pp0_abnormal_action1_u { + struct pp0_abnormal_action1 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION1_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION2_ADDR (0xb14438) +#define NBL_PP0_ABNORMAL_ACTION2_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION2_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION2_DWLEN (1) +union pp0_abnormal_action2_u { + struct pp0_abnormal_action2 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION2_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION3_ADDR (0xb1443c) +#define NBL_PP0_ABNORMAL_ACTION3_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION3_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION3_DWLEN (1) +union pp0_abnormal_action3_u { + struct pp0_abnormal_action3 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION3_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION4_ADDR (0xb14440) +#define NBL_PP0_ABNORMAL_ACTION4_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION4_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION4_DWLEN (1) +union pp0_abnormal_action4_u { + struct pp0_abnormal_action4 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION4_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION5_ADDR (0xb14444) +#define NBL_PP0_ABNORMAL_ACTION5_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION5_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION5_DWLEN (1) +union pp0_abnormal_action5_u { + struct pp0_abnormal_action5 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION5_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION6_ADDR (0xb14448) +#define NBL_PP0_ABNORMAL_ACTION6_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION6_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION6_DWLEN (1) +union pp0_abnormal_action6_u { + struct pp0_abnormal_action6 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION6_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION7_ADDR (0xb1444c) +#define NBL_PP0_ABNORMAL_ACTION7_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION7_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION7_DWLEN (1) +union pp0_abnormal_action7_u { + struct pp0_abnormal_action7 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION7_DWLEN]; +} __packed; + +#define NBL_PP0_FWD_DPORT_ACTION_ADDR (0xb14450) +#define NBL_PP0_FWD_DPORT_ACTION_DEPTH (1) +#define NBL_PP0_FWD_DPORT_ACTION_WIDTH (32) +#define NBL_PP0_FWD_DPORT_ACTION_DWLEN (1) +union pp0_fwd_dport_action_u { + struct pp0_fwd_dport_action { + u32 action_id:6; /* [05:00] Default:0x9 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_FWD_DPORT_ACTION_DWLEN]; +} __packed; + +#define NBL_PP0_RDMA_VSI_BTM_ADDR (0xb14454) +#define NBL_PP0_RDMA_VSI_BTM_DEPTH (32) +#define NBL_PP0_RDMA_VSI_BTM_WIDTH (32) +#define NBL_PP0_RDMA_VSI_BTM_DWLEN (1) +union pp0_rdma_vsi_btm_u { + struct pp0_rdma_vsi_btm { + u32 btm:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_RDMA_VSI_BTM_DWLEN]; +} __packed; +#define NBL_PP0_RDMA_VSI_BTM_REG(r) (NBL_PP0_RDMA_VSI_BTM_ADDR + \ + (NBL_PP0_RDMA_VSI_BTM_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp1.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp1.h new file mode 100644 index 0000000000000000000000000000000000000000..d909fd0df59ae29dafc8b92c997f5521e8b4928a --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp1.h @@ -0,0 +1,696 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_PP1_H +#define NBL_PP1_H 1 + +#include + +#define NBL_PP1_BASE (0x00B24000) + +#define NBL_PP1_INT_STATUS_ADDR (0xb24000) +#define NBL_PP1_INT_STATUS_DEPTH (1) +#define NBL_PP1_INT_STATUS_WIDTH (32) +#define NBL_PP1_INT_STATUS_DWLEN (1) +union pp1_int_status_u { + struct pp1_int_status { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_PP1_INT_MASK_ADDR (0xb24004) +#define NBL_PP1_INT_MASK_DEPTH (1) +#define NBL_PP1_INT_MASK_WIDTH (32) +#define NBL_PP1_INT_MASK_DWLEN (1) +union pp1_int_mask_u { + struct pp1_int_mask { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RW */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_INT_MASK_DWLEN]; +} __packed; + +#define NBL_PP1_INT_SET_ADDR (0xb24008) +#define NBL_PP1_INT_SET_DEPTH (1) +#define NBL_PP1_INT_SET_WIDTH (32) +#define NBL_PP1_INT_SET_DWLEN (1) +union pp1_int_set_u { + struct pp1_int_set { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 WO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_INT_SET_DWLEN]; +} __packed; + +#define NBL_PP1_INIT_DONE_ADDR (0xb2400c) +#define NBL_PP1_INIT_DONE_DEPTH (1) +#define NBL_PP1_INIT_DONE_WIDTH (32) +#define NBL_PP1_INIT_DONE_DWLEN (1) +union pp1_init_done_u { + struct pp1_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_PP1_CFG_ERR_INFO_ADDR (0xb24038) +#define NBL_PP1_CFG_ERR_INFO_DEPTH (1) +#define NBL_PP1_CFG_ERR_INFO_WIDTH (32) +#define NBL_PP1_CFG_ERR_INFO_DWLEN (1) +union pp1_cfg_err_info_u { + struct pp1_cfg_err_info { + u32 id:1; /* [0:0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PP1_CIF_ERR_INFO_ADDR (0xb24040) +#define NBL_PP1_CIF_ERR_INFO_DEPTH (1) +#define NBL_PP1_CIF_ERR_INFO_WIDTH (32) +#define NBL_PP1_CIF_ERR_INFO_DWLEN (1) +union pp1_cif_err_info_u { + struct pp1_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PP1_CAR_CTRL_ADDR (0xb24100) +#define NBL_PP1_CAR_CTRL_DEPTH (1) +#define NBL_PP1_CAR_CTRL_WIDTH (32) +#define NBL_PP1_CAR_CTRL_DWLEN (1) +union pp1_car_ctrl_u { + struct pp1_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_PP1_MODE_ADDR (0xb24104) +#define NBL_PP1_MODE_DEPTH (1) +#define NBL_PP1_MODE_WIDTH (32) +#define NBL_PP1_MODE_DWLEN (1) +union pp1_mode_u { + struct pp1_mode { + u32 bypass:1; /* [0] Default:0x0 RW */ + u32 internal_loopback_en:1; /* [1] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_MODE_DWLEN]; +} __packed; + +#define NBL_PP1_SET_FLAGS0_ADDR (0xb24108) +#define NBL_PP1_SET_FLAGS0_DEPTH (1) +#define NBL_PP1_SET_FLAGS0_WIDTH (32) +#define NBL_PP1_SET_FLAGS0_DWLEN (1) +union pp1_set_flags0_u { + struct pp1_set_flags0 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_SET_FLAGS0_DWLEN]; +} __packed; + +#define NBL_PP1_SET_FLAGS1_ADDR (0xb2410c) +#define NBL_PP1_SET_FLAGS1_DEPTH (1) +#define NBL_PP1_SET_FLAGS1_WIDTH (32) +#define NBL_PP1_SET_FLAGS1_DWLEN (1) +union pp1_set_flags1_u { + struct pp1_set_flags1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_SET_FLAGS1_DWLEN]; +} __packed; + +#define NBL_PP1_CLEAR_FLAGS0_ADDR (0xb24110) +#define NBL_PP1_CLEAR_FLAGS0_DEPTH (1) +#define NBL_PP1_CLEAR_FLAGS0_WIDTH (32) +#define NBL_PP1_CLEAR_FLAGS0_DWLEN (1) +union pp1_clear_flags0_u { + struct pp1_clear_flags0 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_CLEAR_FLAGS0_DWLEN]; +} __packed; + +#define NBL_PP1_CLEAR_FLAGS1_ADDR (0xb24114) +#define NBL_PP1_CLEAR_FLAGS1_DEPTH (1) +#define NBL_PP1_CLEAR_FLAGS1_WIDTH (32) +#define NBL_PP1_CLEAR_FLAGS1_DWLEN (1) +union pp1_clear_flags1_u { + struct pp1_clear_flags1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_CLEAR_FLAGS1_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY0_ADDR (0xb24118) +#define NBL_PP1_ACTION_PRIORITY0_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY0_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY0_DWLEN (1) +union pp1_action_priority0_u { + struct pp1_action_priority0 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY0_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY1_ADDR (0xb2411c) +#define NBL_PP1_ACTION_PRIORITY1_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY1_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY1_DWLEN (1) +union pp1_action_priority1_u { + struct pp1_action_priority1 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY1_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY2_ADDR (0xb24120) +#define NBL_PP1_ACTION_PRIORITY2_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY2_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY2_DWLEN (1) +union pp1_action_priority2_u { + struct pp1_action_priority2 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY2_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY3_ADDR (0xb24124) +#define NBL_PP1_ACTION_PRIORITY3_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY3_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY3_DWLEN (1) +union pp1_action_priority3_u { + struct pp1_action_priority3 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY3_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY4_ADDR (0xb24128) +#define NBL_PP1_ACTION_PRIORITY4_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY4_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY4_DWLEN (1) +union pp1_action_priority4_u { + struct pp1_action_priority4 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY4_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY5_ADDR (0xb2412c) +#define NBL_PP1_ACTION_PRIORITY5_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY5_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY5_DWLEN (1) +union pp1_action_priority5_u { + struct pp1_action_priority5 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY5_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY6_ADDR (0xb24130) +#define NBL_PP1_ACTION_PRIORITY6_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY6_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY6_DWLEN (1) +union pp1_action_priority6_u { + struct pp1_action_priority6 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY6_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY7_ADDR (0xb24134) +#define NBL_PP1_ACTION_PRIORITY7_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY7_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY7_DWLEN (1) +union pp1_action_priority7_u { + struct pp1_action_priority7 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY7_DWLEN]; +} __packed; + +#define NBL_PP1_CPU_ACCESS_ADDR (0xb2416c) +#define NBL_PP1_CPU_ACCESS_DEPTH (1) +#define NBL_PP1_CPU_ACCESS_WIDTH (32) +#define NBL_PP1_CPU_ACCESS_DWLEN (1) +union pp1_cpu_access_u { + struct pp1_cpu_access { + u32 bp_th:10; /* [9:0] Default:0x34 RW */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 timeout_th:10; /* [25:16] Default:0x100 RW */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_CPU_ACCESS_DWLEN]; +} __packed; + +#define NBL_PP1_RDMA_BYPASS_ADDR (0xb24170) +#define NBL_PP1_RDMA_BYPASS_DEPTH (1) +#define NBL_PP1_RDMA_BYPASS_WIDTH (32) +#define NBL_PP1_RDMA_BYPASS_DWLEN (1) +union pp1_rdma_bypass_u { + struct pp1_rdma_bypass { + u32 rdma_flag_offset:5; /* [4:0] Default:0x0 RW */ + u32 dn_bypass_en:1; /* [5] Default:0x0 RW */ + u32 up_bypass_en:1; /* [6] Default:0x0 RW */ + u32 rsv1:1; /* [7] Default:0x0 RO */ + u32 dir_flag_offset:5; /* [12:8] Default:0x0 RW */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_RDMA_BYPASS_DWLEN]; +} __packed; + +#define NBL_PP1_INIT_START_ADDR (0xb241fc) +#define NBL_PP1_INIT_START_DEPTH (1) +#define NBL_PP1_INIT_START_WIDTH (32) +#define NBL_PP1_INIT_START_DWLEN (1) +union pp1_init_start_u { + struct pp1_init_start { + u32 en:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_INIT_START_DWLEN]; +} __packed; + +#define NBL_PP1_BP_SET_ADDR (0xb24200) +#define NBL_PP1_BP_SET_DEPTH (1) +#define NBL_PP1_BP_SET_WIDTH (32) +#define NBL_PP1_BP_SET_DWLEN (1) +union pp1_bp_set_u { + struct pp1_bp_set { + u32 pp_up:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_BP_SET_DWLEN]; +} __packed; + +#define NBL_PP1_BP_MASK_ADDR (0xb24204) +#define NBL_PP1_BP_MASK_DEPTH (1) +#define NBL_PP1_BP_MASK_WIDTH (32) +#define NBL_PP1_BP_MASK_DWLEN (1) +union pp1_bp_mask_u { + struct pp1_bp_mask { + u32 dn_pp:1; /* [00:00] Default:0x0 RW */ + u32 fem_pp:1; /* [01:01] Default:0x0 RW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_BP_MASK_DWLEN]; +} __packed; + +#define NBL_PP1_BP_STATE_ADDR (0xb24308) +#define NBL_PP1_BP_STATE_DEPTH (1) +#define NBL_PP1_BP_STATE_WIDTH (32) +#define NBL_PP1_BP_STATE_DWLEN (1) +union pp1_bp_state_u { + struct pp1_bp_state { + u32 dn_pp_bp:1; /* [00:00] Default:0x0 RO */ + u32 fem_pp_bp:1; /* [01:01] Default:0x0 RO */ + u32 pp_up_bp:1; /* [02:02] Default:0x0 RO */ + u32 inter_pp_bp:1; /* [03:03] Default:0x0 RO */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_BP_STATE_DWLEN]; +} __packed; + +#define NBL_PP1_BP_HISTORY_ADDR (0xb2430c) +#define NBL_PP1_BP_HISTORY_DEPTH (1) +#define NBL_PP1_BP_HISTORY_WIDTH (32) +#define NBL_PP1_BP_HISTORY_DWLEN (1) +union pp1_bp_history_u { + struct pp1_bp_history { + u32 dn_pp_bp:1; /* [00:00] Default:0x0 RC */ + u32 fem_pp_bp:1; /* [01:01] Default:0x0 RC */ + u32 pp_up_bp:1; /* [02:02] Default:0x0 RC */ + u32 inter_pp_bp:1; /* [03:03] Default:0x0 RC */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_PP1_CFG_TEST_ADDR (0xb2442c) +#define NBL_PP1_CFG_TEST_DEPTH (1) +#define NBL_PP1_CFG_TEST_WIDTH (32) +#define NBL_PP1_CFG_TEST_DWLEN (1) +union pp1_cfg_test_u { + struct pp1_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION0_ADDR (0xb24430) +#define NBL_PP1_ABNORMAL_ACTION0_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION0_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION0_DWLEN (1) +union pp1_abnormal_action0_u { + struct pp1_abnormal_action0 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION0_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION1_ADDR (0xb24434) +#define NBL_PP1_ABNORMAL_ACTION1_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION1_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION1_DWLEN (1) +union pp1_abnormal_action1_u { + struct pp1_abnormal_action1 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION1_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION2_ADDR (0xb24438) +#define NBL_PP1_ABNORMAL_ACTION2_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION2_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION2_DWLEN (1) +union pp1_abnormal_action2_u { + struct pp1_abnormal_action2 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION2_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION3_ADDR (0xb2443c) +#define NBL_PP1_ABNORMAL_ACTION3_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION3_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION3_DWLEN (1) +union pp1_abnormal_action3_u { + struct pp1_abnormal_action3 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION3_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION4_ADDR (0xb24440) +#define NBL_PP1_ABNORMAL_ACTION4_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION4_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION4_DWLEN (1) +union pp1_abnormal_action4_u { + struct pp1_abnormal_action4 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION4_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION5_ADDR (0xb24444) +#define NBL_PP1_ABNORMAL_ACTION5_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION5_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION5_DWLEN (1) +union pp1_abnormal_action5_u { + struct pp1_abnormal_action5 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION5_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION6_ADDR (0xb24448) +#define NBL_PP1_ABNORMAL_ACTION6_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION6_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION6_DWLEN (1) +union pp1_abnormal_action6_u { + struct pp1_abnormal_action6 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION6_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION7_ADDR (0xb2444c) +#define NBL_PP1_ABNORMAL_ACTION7_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION7_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION7_DWLEN (1) +union pp1_abnormal_action7_u { + struct pp1_abnormal_action7 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION7_DWLEN]; +} __packed; + +#define NBL_PP1_FWD_DPORT_ACTION_ADDR (0xb24450) +#define NBL_PP1_FWD_DPORT_ACTION_DEPTH (1) +#define NBL_PP1_FWD_DPORT_ACTION_WIDTH (32) +#define NBL_PP1_FWD_DPORT_ACTION_DWLEN (1) +union pp1_fwd_dport_action_u { + struct pp1_fwd_dport_action { + u32 action_id:6; /* [05:00] Default:0x9 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_FWD_DPORT_ACTION_DWLEN]; +} __packed; + +#define NBL_PP1_RDMA_VSI_BTM_ADDR (0xb24454) +#define NBL_PP1_RDMA_VSI_BTM_DEPTH (32) +#define NBL_PP1_RDMA_VSI_BTM_WIDTH (32) +#define NBL_PP1_RDMA_VSI_BTM_DWLEN (1) +union pp1_rdma_vsi_btm_u { + struct pp1_rdma_vsi_btm { + u32 btm:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_RDMA_VSI_BTM_DWLEN]; +} __packed; +#define NBL_PP1_RDMA_VSI_BTM_REG(r) (NBL_PP1_RDMA_VSI_BTM_ADDR + \ + (NBL_PP1_RDMA_VSI_BTM_DWLEN * 4) * (r)) + +#define NBL_PP1_KGEN_KEY_PRF_ADDR (0xb25000) +#define NBL_PP1_KGEN_KEY_PRF_DEPTH (16) +#define NBL_PP1_KGEN_KEY_PRF_WIDTH (512) +#define NBL_PP1_KGEN_KEY_PRF_DWLEN (16) +union pp1_kgen_key_prf_u { + struct pp1_kgen_key_prf { + u32 ext4_0_src:10; + u32 ext4_0_dst:7; + u32 ext4_1_src:10; + u32 ext4_1_dst:7; + u32 ext4_2_src:10; + u32 ext4_2_dst:7; + u32 ext4_3_src:10; + u32 ext4_3_dst:7; + u32 ext8_0_src:9; + u32 ext8_0_dst:6; + u32 ext8_1_src:9; + u32 ext8_1_dst:6; + u32 ext8_2_src:9; + u32 ext8_2_dst:6; + u32 ext8_3_src:9; + u32 ext8_3_dst:6; + u32 ext8_4_src:9; + u32 ext8_4_dst:6; + u32 ext8_5_src:9; + u32 ext8_5_dst:6; + u32 ext8_6_src:9; + u32 ext8_6_dst:6; + u32 ext8_7_src:9; + u32 ext8_7_dst:6; + u32 ext16_0_src:8; + u32 ext16_0_dst:5; + u32 ext16_1_src:8; + u32 ext16_1_dst:5; + u32 ext16_2_src:8; + u32 ext16_2_dst:5; + u32 ext16_3_src:8; + u32 ext16_3_dst:5; + u32 ext32_0_src:7; + u32 ext32_0_dst:4; + u32 ext32_1_src:7; + u32 ext32_1_dst:4; + u32 ext32_2_src:7; + u32 ext32_2_dst:4; + u32 ext32_3_src:7; + u32 ext32_3_dst:4; + u32 sp_2_en:1; + u32 sp_2_src_offset:3; + u32 sp_2_dst_offset:8; + u32 sp_4_en:1; + u32 sp_4_src_offset:2; + u32 sp_4_dst_offset:7; + u32 sp_8_en:1; + u32 sp_8_src_offset:1; + u32 sp_8_dst_offset:6; + u32 fwdact0_en:1; + u32 fwdact0_id:6; + u32 fwdact0_dst_offset:5; + u32 fwdact1_en:1; + u32 fwdact1_id:6; + u32 fwdact1_dst_offset:5; + u32 bts_en0:1; + u32 bts_data0:1; + u32 bts_des_offset0:9; + u32 bts_en1:1; + u32 bts_data1:1; + u32 bts_des_offset1:9; + u32 bts_en2:1; + u32 bts_data2:1; + u32 bts_des_offset2:9; + u32 bts_en3:1; + u32 bts_data3:1; + u32 bts_des_offset3:9; + u32 rsv1:2; + u32 rsv[4]; + } __packed info; + u32 data[NBL_PP1_KGEN_KEY_PRF_DWLEN]; +}; + +#define NBL_PP1_KGEN_KEY_PRF_REG(r) (NBL_PP1_KGEN_KEY_PRF_ADDR + \ + (NBL_PP1_KGEN_KEY_PRF_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp2.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp2.h new file mode 100644 index 0000000000000000000000000000000000000000..71e98b61584f57302c67f5d532e3d73551b21d5f --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp2.h @@ -0,0 +1,614 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_PP2_H +#define NBL_PP2_H 1 + +#include + +#define NBL_PP2_BASE (0x00B34000) + +#define NBL_PP2_INT_STATUS_ADDR (0xb34000) +#define NBL_PP2_INT_STATUS_DEPTH (1) +#define NBL_PP2_INT_STATUS_WIDTH (32) +#define NBL_PP2_INT_STATUS_DWLEN (1) +union pp2_int_status_u { + struct pp2_int_status { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_PP2_INT_MASK_ADDR (0xb34004) +#define NBL_PP2_INT_MASK_DEPTH (1) +#define NBL_PP2_INT_MASK_WIDTH (32) +#define NBL_PP2_INT_MASK_DWLEN (1) +union pp2_int_mask_u { + struct pp2_int_mask { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RW */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_INT_MASK_DWLEN]; +} __packed; + +#define NBL_PP2_INT_SET_ADDR (0xb34008) +#define NBL_PP2_INT_SET_DEPTH (1) +#define NBL_PP2_INT_SET_WIDTH (32) +#define NBL_PP2_INT_SET_DWLEN (1) +union pp2_int_set_u { + struct pp2_int_set { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 WO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_INT_SET_DWLEN]; +} __packed; + +#define NBL_PP2_INIT_DONE_ADDR (0xb3400c) +#define NBL_PP2_INIT_DONE_DEPTH (1) +#define NBL_PP2_INIT_DONE_WIDTH (32) +#define NBL_PP2_INIT_DONE_DWLEN (1) +union pp2_init_done_u { + struct pp2_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_PP2_CFG_ERR_INFO_ADDR (0xb34038) +#define NBL_PP2_CFG_ERR_INFO_DEPTH (1) +#define NBL_PP2_CFG_ERR_INFO_WIDTH (32) +#define NBL_PP2_CFG_ERR_INFO_DWLEN (1) +union pp2_cfg_err_info_u { + struct pp2_cfg_err_info { + u32 id:1; /* [0:0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PP2_CIF_ERR_INFO_ADDR (0xb34040) +#define NBL_PP2_CIF_ERR_INFO_DEPTH (1) +#define NBL_PP2_CIF_ERR_INFO_WIDTH (32) +#define NBL_PP2_CIF_ERR_INFO_DWLEN (1) +union pp2_cif_err_info_u { + struct pp2_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PP2_CAR_CTRL_ADDR (0xb34100) +#define NBL_PP2_CAR_CTRL_DEPTH (1) +#define NBL_PP2_CAR_CTRL_WIDTH (32) +#define NBL_PP2_CAR_CTRL_DWLEN (1) +union pp2_car_ctrl_u { + struct pp2_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_PP2_MODE_ADDR (0xb34104) +#define NBL_PP2_MODE_DEPTH (1) +#define NBL_PP2_MODE_WIDTH (32) +#define NBL_PP2_MODE_DWLEN (1) +union pp2_mode_u { + struct pp2_mode { + u32 bypass:1; /* [0] Default:0x0 RW */ + u32 internal_loopback_en:1; /* [1] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_MODE_DWLEN]; +} __packed; + +#define NBL_PP2_SET_FLAGS0_ADDR (0xb34108) +#define NBL_PP2_SET_FLAGS0_DEPTH (1) +#define NBL_PP2_SET_FLAGS0_WIDTH (32) +#define NBL_PP2_SET_FLAGS0_DWLEN (1) +union pp2_set_flags0_u { + struct pp2_set_flags0 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_SET_FLAGS0_DWLEN]; +} __packed; + +#define NBL_PP2_SET_FLAGS1_ADDR (0xb3410c) +#define NBL_PP2_SET_FLAGS1_DEPTH (1) +#define NBL_PP2_SET_FLAGS1_WIDTH (32) +#define NBL_PP2_SET_FLAGS1_DWLEN (1) +union pp2_set_flags1_u { + struct pp2_set_flags1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_SET_FLAGS1_DWLEN]; +} __packed; + +#define NBL_PP2_CLEAR_FLAGS0_ADDR (0xb34110) +#define NBL_PP2_CLEAR_FLAGS0_DEPTH (1) +#define NBL_PP2_CLEAR_FLAGS0_WIDTH (32) +#define NBL_PP2_CLEAR_FLAGS0_DWLEN (1) +union pp2_clear_flags0_u { + struct pp2_clear_flags0 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_CLEAR_FLAGS0_DWLEN]; +} __packed; + +#define NBL_PP2_CLEAR_FLAGS1_ADDR (0xb34114) +#define NBL_PP2_CLEAR_FLAGS1_DEPTH (1) +#define NBL_PP2_CLEAR_FLAGS1_WIDTH (32) +#define NBL_PP2_CLEAR_FLAGS1_DWLEN (1) +union pp2_clear_flags1_u { + struct pp2_clear_flags1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_CLEAR_FLAGS1_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY0_ADDR (0xb34118) +#define NBL_PP2_ACTION_PRIORITY0_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY0_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY0_DWLEN (1) +union pp2_action_priority0_u { + struct pp2_action_priority0 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY0_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY1_ADDR (0xb3411c) +#define NBL_PP2_ACTION_PRIORITY1_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY1_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY1_DWLEN (1) +union pp2_action_priority1_u { + struct pp2_action_priority1 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY1_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY2_ADDR (0xb34120) +#define NBL_PP2_ACTION_PRIORITY2_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY2_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY2_DWLEN (1) +union pp2_action_priority2_u { + struct pp2_action_priority2 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY2_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY3_ADDR (0xb34124) +#define NBL_PP2_ACTION_PRIORITY3_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY3_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY3_DWLEN (1) +union pp2_action_priority3_u { + struct pp2_action_priority3 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY3_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY4_ADDR (0xb34128) +#define NBL_PP2_ACTION_PRIORITY4_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY4_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY4_DWLEN (1) +union pp2_action_priority4_u { + struct pp2_action_priority4 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY4_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY5_ADDR (0xb3412c) +#define NBL_PP2_ACTION_PRIORITY5_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY5_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY5_DWLEN (1) +union pp2_action_priority5_u { + struct pp2_action_priority5 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY5_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY6_ADDR (0xb34130) +#define NBL_PP2_ACTION_PRIORITY6_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY6_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY6_DWLEN (1) +union pp2_action_priority6_u { + struct pp2_action_priority6 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY6_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY7_ADDR (0xb34134) +#define NBL_PP2_ACTION_PRIORITY7_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY7_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY7_DWLEN (1) +union pp2_action_priority7_u { + struct pp2_action_priority7 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY7_DWLEN]; +} __packed; + +#define NBL_PP2_CPU_ACCESS_ADDR (0xb3416c) +#define NBL_PP2_CPU_ACCESS_DEPTH (1) +#define NBL_PP2_CPU_ACCESS_WIDTH (32) +#define NBL_PP2_CPU_ACCESS_DWLEN (1) +union pp2_cpu_access_u { + struct pp2_cpu_access { + u32 bp_th:10; /* [9:0] Default:0x34 RW */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 timeout_th:10; /* [25:16] Default:0x100 RW */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_CPU_ACCESS_DWLEN]; +} __packed; + +#define NBL_PP2_RDMA_BYPASS_ADDR (0xb34170) +#define NBL_PP2_RDMA_BYPASS_DEPTH (1) +#define NBL_PP2_RDMA_BYPASS_WIDTH (32) +#define NBL_PP2_RDMA_BYPASS_DWLEN (1) +union pp2_rdma_bypass_u { + struct pp2_rdma_bypass { + u32 rdma_flag_offset:5; /* [4:0] Default:0x0 RW */ + u32 dn_bypass_en:1; /* [5] Default:0x0 RW */ + u32 up_bypass_en:1; /* [6] Default:0x0 RW */ + u32 rsv1:1; /* [7] Default:0x0 RO */ + u32 dir_flag_offset:5; /* [12:8] Default:0x0 RW */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_RDMA_BYPASS_DWLEN]; +} __packed; + +#define NBL_PP2_INIT_START_ADDR (0xb341fc) +#define NBL_PP2_INIT_START_DEPTH (1) +#define NBL_PP2_INIT_START_WIDTH (32) +#define NBL_PP2_INIT_START_DWLEN (1) +union pp2_init_start_u { + struct pp2_init_start { + u32 en:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_INIT_START_DWLEN]; +} __packed; + +#define NBL_PP2_BP_SET_ADDR (0xb34200) +#define NBL_PP2_BP_SET_DEPTH (1) +#define NBL_PP2_BP_SET_WIDTH (32) +#define NBL_PP2_BP_SET_DWLEN (1) +union pp2_bp_set_u { + struct pp2_bp_set { + u32 pp_up:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_BP_SET_DWLEN]; +} __packed; + +#define NBL_PP2_BP_MASK_ADDR (0xb34204) +#define NBL_PP2_BP_MASK_DEPTH (1) +#define NBL_PP2_BP_MASK_WIDTH (32) +#define NBL_PP2_BP_MASK_DWLEN (1) +union pp2_bp_mask_u { + struct pp2_bp_mask { + u32 dn_pp:1; /* [00:00] Default:0x0 RW */ + u32 fem_pp:1; /* [01:01] Default:0x0 RW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_BP_MASK_DWLEN]; +} __packed; + +#define NBL_PP2_BP_STATE_ADDR (0xb34308) +#define NBL_PP2_BP_STATE_DEPTH (1) +#define NBL_PP2_BP_STATE_WIDTH (32) +#define NBL_PP2_BP_STATE_DWLEN (1) +union pp2_bp_state_u { + struct pp2_bp_state { + u32 dn_pp_bp:1; /* [00:00] Default:0x0 RO */ + u32 fem_pp_bp:1; /* [01:01] Default:0x0 RO */ + u32 pp_up_bp:1; /* [02:02] Default:0x0 RO */ + u32 inter_pp_bp:1; /* [03:03] Default:0x0 RO */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_BP_STATE_DWLEN]; +} __packed; + +#define NBL_PP2_BP_HISTORY_ADDR (0xb3430c) +#define NBL_PP2_BP_HISTORY_DEPTH (1) +#define NBL_PP2_BP_HISTORY_WIDTH (32) +#define NBL_PP2_BP_HISTORY_DWLEN (1) +union pp2_bp_history_u { + struct pp2_bp_history { + u32 dn_pp_bp:1; /* [00:00] Default:0x0 RC */ + u32 fem_pp_bp:1; /* [01:01] Default:0x0 RC */ + u32 pp_up_bp:1; /* [02:02] Default:0x0 RC */ + u32 inter_pp_bp:1; /* [03:03] Default:0x0 RC */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_PP2_CFG_TEST_ADDR (0xb3442c) +#define NBL_PP2_CFG_TEST_DEPTH (1) +#define NBL_PP2_CFG_TEST_WIDTH (32) +#define NBL_PP2_CFG_TEST_DWLEN (1) +union pp2_cfg_test_u { + struct pp2_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION0_ADDR (0xb34430) +#define NBL_PP2_ABNORMAL_ACTION0_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION0_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION0_DWLEN (1) +union pp2_abnormal_action0_u { + struct pp2_abnormal_action0 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION0_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION1_ADDR (0xb34434) +#define NBL_PP2_ABNORMAL_ACTION1_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION1_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION1_DWLEN (1) +union pp2_abnormal_action1_u { + struct pp2_abnormal_action1 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION1_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION2_ADDR (0xb34438) +#define NBL_PP2_ABNORMAL_ACTION2_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION2_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION2_DWLEN (1) +union pp2_abnormal_action2_u { + struct pp2_abnormal_action2 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION2_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION3_ADDR (0xb3443c) +#define NBL_PP2_ABNORMAL_ACTION3_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION3_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION3_DWLEN (1) +union pp2_abnormal_action3_u { + struct pp2_abnormal_action3 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION3_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION4_ADDR (0xb34440) +#define NBL_PP2_ABNORMAL_ACTION4_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION4_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION4_DWLEN (1) +union pp2_abnormal_action4_u { + struct pp2_abnormal_action4 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION4_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION5_ADDR (0xb34444) +#define NBL_PP2_ABNORMAL_ACTION5_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION5_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION5_DWLEN (1) +union pp2_abnormal_action5_u { + struct pp2_abnormal_action5 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION5_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION6_ADDR (0xb34448) +#define NBL_PP2_ABNORMAL_ACTION6_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION6_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION6_DWLEN (1) +union pp2_abnormal_action6_u { + struct pp2_abnormal_action6 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION6_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION7_ADDR (0xb3444c) +#define NBL_PP2_ABNORMAL_ACTION7_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION7_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION7_DWLEN (1) +union pp2_abnormal_action7_u { + struct pp2_abnormal_action7 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION7_DWLEN]; +} __packed; + +#define NBL_PP2_FWD_DPORT_ACTION_ADDR (0xb34450) +#define NBL_PP2_FWD_DPORT_ACTION_DEPTH (1) +#define NBL_PP2_FWD_DPORT_ACTION_WIDTH (32) +#define NBL_PP2_FWD_DPORT_ACTION_DWLEN (1) +union pp2_fwd_dport_action_u { + struct pp2_fwd_dport_action { + u32 action_id:6; /* [05:00] Default:0x9 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_FWD_DPORT_ACTION_DWLEN]; +} __packed; + +#define NBL_PP2_RDMA_VSI_BTM_ADDR (0xb34454) +#define NBL_PP2_RDMA_VSI_BTM_DEPTH (32) +#define NBL_PP2_RDMA_VSI_BTM_WIDTH (32) +#define NBL_PP2_RDMA_VSI_BTM_DWLEN (1) +union pp2_rdma_vsi_btm_u { + struct pp2_rdma_vsi_btm { + u32 btm:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_RDMA_VSI_BTM_DWLEN]; +} __packed; +#define NBL_PP2_RDMA_VSI_BTM_REG(r) (NBL_PP2_RDMA_VSI_BTM_ADDR + \ + (NBL_PP2_RDMA_VSI_BTM_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.c new file mode 100644 index 0000000000000000000000000000000000000000..a1cb89a52d27ba493960a3058101f5c367e8f293 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_fc_leonis.h" +#include "nbl_fc.h" + +static inline void nbl_fc_get_cmd_hdr(struct nbl_fc_mgt *mgt) +{ + static const struct nbl_cmd_hdr g_cmd_hdr[] = { + [NBL_ACL_STATID_READ] = {NBL_BLOCK_PPE, NBL_MODULE_ACL, + NBL_TABLE_ACL_STATID, NBL_CMD_OP_READ}, + [NBL_ACL_FLOWID_READ] = {NBL_BLOCK_PPE, NBL_MODULE_ACL, + NBL_TABLE_ACL_FLOWID, NBL_CMD_OP_READ} + }; + memcpy(mgt->cmd_hdr, g_cmd_hdr, sizeof(g_cmd_hdr)); +} + +static void nbl_fc_get_spec_sz(u16 *hit_sz, u16 *bytes_sz) +{ + *hit_sz = NBL_SPEC_STAT_HIT_SIZE; + *bytes_sz = NBL_SPEC_STAT_BYTES_SIZE; +} + +static void nbl_fc_get_flow_sz(u16 *hit_sz, u16 *bytes_sz) +{ + *hit_sz = NBL_FLOW_STAT_HIT_SIZE; + *bytes_sz = NBL_FLOW_STAT_BYTES_SIZE; +} + +static void nbl_fc_get_spec_stats(struct nbl_flow_counter *counter, u64 *pkts, u64 *bytes) +{ + NBL_GET_SPEC_STAT_HITS(counter->cache.packets, counter->lastpackets, pkts); + NBL_GET_SPEC_STAT_BYTES(counter->cache.bytes, counter->lastbytes, bytes); +} + +static void nbl_fc_get_flow_stats(struct nbl_flow_counter *counter, u64 *pkts, u64 *bytes) +{ + NBL_GET_FLOW_STAT_HITS(counter->cache.packets, counter->lastpackets, pkts); + NBL_GET_FLOW_STAT_BYTES(counter->cache.bytes, counter->lastbytes, bytes); +} + +static int nbl_fc_update_flow_stats(struct nbl_fc_mgt *mgt, + struct nbl_flow_query_counter *counter_array, + u32 flow_num, u32 clear, enum nbl_pp_fc_type fc_type) +{ + int ret = 0; + u32 idx = 0; + u16 hit_size; + u16 bytes_size; + union nbl_cmd_acl_flowid_u fquery_out; + union nbl_cmd_acl_statid_u squery_out; + struct nbl_stats_data data_info = { 0 }; + struct nbl_cmd_content cmd = { 0 }; + struct nbl_cmd_hdr hdr = mgt->cmd_hdr[NBL_ACL_FLOWID_READ]; + + memset(&fquery_out, 0, sizeof(fquery_out)); + memset(&squery_out, 0, sizeof(squery_out)); + + cmd.out_va = &fquery_out; + if (fc_type == NBL_FC_SPEC_TYPE) { + hdr = mgt->cmd_hdr[NBL_ACL_STATID_READ]; + cmd.out_va = &squery_out; + mgt->fc_ops.get_spec_stat_sz(&hit_size, &bytes_size); + } else { + mgt->fc_ops.get_flow_stat_sz(&hit_size, &bytes_size); + } + + cmd.in_va = counter_array->counter_id; + cmd.in_params = (clear << NBL_FLOW_STAT_CLR_OFT) | + ((flow_num - 1) & NBL_FLOW_STAT_NUM_MASK); + cmd.in_length = NBL_CMDQ_ACL_STAT_BASE_LEN; + + ret = nbl_tc_call_inst_cmdq(mgt->common->tc_inst_id, (void *)&hdr, (void *)&cmd); + if (ret) + goto cmd_send_error; + + /* clear no need update cache */ + if (clear) { + nbl_debug(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc flush hw-stats success"); + return 0; + } + + for (idx = 0; idx < flow_num; idx++) { + if (fc_type == NBL_FC_SPEC_TYPE) { + memcpy(&data_info.bytes, squery_out.info.all_data[idx].bytes, bytes_size); + memcpy(&data_info.packets, &squery_out.info.all_data[idx].hits, hit_size); + } else { + memcpy(&data_info.bytes, fquery_out.info.all_data[idx].bytes, bytes_size); + memcpy(&data_info.packets, &fquery_out.info.all_data[idx].hits, hit_size); + } + data_info.flow_id = counter_array->counter_id[idx]; + nbl_debug(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc get %u-%lu: packets:%llu-bytes:%llu\n", + data_info.flow_id, counter_array->cookie[idx], + data_info.packets, data_info.bytes); + ret = nbl_fc_set_stats(mgt, &data_info, counter_array->cookie[idx]); + if (ret) + goto set_stat_error; + } + + return 0; + +cmd_send_error: + nbl_err(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc get hw stats failed. ret %d", ret); + return ret; + +set_stat_error: + nbl_debug(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc set flow stats failed." + " count_id:%u, cookie: %lu, ret(%u): %d", counter_array->counter_id[idx], + counter_array->cookie[idx], idx, ret); + return ret; +} + +static void nbl_fc_init_ops_leonis(struct nbl_fc_mgt *mgt) +{ + mgt->fc_ops.get_spec_stat_sz = &nbl_fc_get_spec_sz; + mgt->fc_ops.get_flow_stat_sz = &nbl_fc_get_flow_sz; + mgt->fc_ops.get_spec_stats = &nbl_fc_get_spec_stats; + mgt->fc_ops.get_flow_stats = &nbl_fc_get_flow_stats; + mgt->fc_ops.update_stats = &nbl_fc_update_flow_stats; +} + +int nbl_fc_add_stats_leonis(void *priv, enum nbl_pp_fc_type fc_type, unsigned long cookie) +{ + return nbl_fc_add_stats(priv, fc_type, cookie); +} + +int nbl_fc_del_stats_leonis(void *priv, unsigned long cookie) +{ + return nbl_fc_del_stats(priv, cookie); +} + +int nbl_fc_setup_ops_leonis(struct nbl_resource_ops *res_ops) +{ + return nbl_fc_setup_ops(res_ops); +} + +void nbl_fc_remove_ops_leonis(struct nbl_resource_ops *res_ops) +{ + return nbl_fc_remove_ops(res_ops); +} + +int nbl_fc_mgt_start_leonis(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_fc_mgt **fc_mgt; + struct device *dev; + int ret = -ENOMEM; + struct nbl_fc_mgt *mgt; + struct nbl_phy_ops *phy_ops; + struct nbl_common_info *common; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + fc_mgt = &NBL_RES_MGT_TO_COUNTER_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + ret = phy_ops->init_acl_stats(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl flow fc init phy-stats failed"); + return ret; + } + + ret = nbl_fc_setup_mgt(dev, fc_mgt); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl flow fc init mgt failed"); + return ret; + } + + mgt = (*fc_mgt); + mgt->common = common; + nbl_fc_init_ops_leonis(mgt); + nbl_fc_get_cmd_hdr(mgt); + return nbl_fc_mgt_start(mgt); +} + +void nbl_fc_mgt_stop_leonis(struct nbl_resource_mgt *res_mgt) +{ + return nbl_fc_mgt_stop(res_mgt); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.h new file mode 100644 index 0000000000000000000000000000000000000000..10103e325993d9f0e2d5c897db45637250594e8d --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_FC_LEONIS_H_ +#define _NBL_FC_LEONIS_H_ +#include "nbl_resource.h" +#include "nbl_core.h" +#include "nbl_hw.h" + +#define NBL_FLOW_STAT_HIT_SIZE 5 +#define NBL_FLOW_STAT_BYTES_SIZE 6 +#define NBL_SPEC_STAT_HIT_SIZE 8 +#define NBL_SPEC_STAT_BYTES_SIZE 8 + +#define NBL_FLOW_STATS_BYTES_WIDE (0xffffffffffff) +#define NBL_FLOW_STATS_HITS_WIDE (0xffffffffff) +#define NBL_GET_FLOW_STAT_BYTES(_cur_v, _pre_v, _v) do \ +{ \ + typeof(_v) v = _v; \ + typeof(_cur_v) cur_v = _cur_v; \ + typeof(_pre_v) pre_v = _pre_v; \ + if (cur_v >= pre_v) \ + *v = cur_v - pre_v; \ + else \ + *v = NBL_FLOW_STATS_BYTES_WIDE - pre_v + cur_v; \ +} while (0) + +#define NBL_GET_FLOW_STAT_HITS(_cur_v, _pre_v, _v) do \ +{ \ + typeof(_v) v = _v; \ + typeof(_cur_v) cur_v = _cur_v; \ + typeof(_pre_v) pre_v = _pre_v; \ + if (cur_v >= pre_v) \ + *v = cur_v - pre_v; \ + else \ + *v = NBL_FLOW_STATS_HITS_WIDE - pre_v + cur_v; \ +} while (0) + +#define NBL_SPEC_STATS_BYTES_WIDE (0xffffffffffffffff) +#define NBL_SPEC_STATS_HITS_WIDE (0xffffffffffffffff) +#define NBL_GET_SPEC_STAT_BYTES(_cur_v, _pre_v, _v) do \ +{ \ + typeof(_v) v = _v; \ + typeof(_cur_v) cur_v = _cur_v; \ + typeof(_pre_v) pre_v = _pre_v; \ + if (cur_v >= pre_v) \ + *v = cur_v - pre_v; \ + else \ + *v = NBL_SPEC_STATS_BYTES_WIDE - pre_v + cur_v; \ +} while (0) + +#define NBL_GET_SPEC_STAT_HITS(_cur_v, _pre_v, _v) do \ +{ \ + typeof(_v) v = _v; \ + typeof(_cur_v) cur_v = _cur_v; \ + typeof(_pre_v) pre_v = _pre_v; \ + if (cur_v >= pre_v) \ + *v = cur_v - pre_v; \ + else \ + *v = NBL_SPEC_STATS_HITS_WIDE - pre_v + cur_v; \ +} while (0) + +#pragma pack(1) +/* CMDQ data content for ACL-FLOW ID */ +struct nbl_cmd_acl_stat_flowid_addr { + u32 addr:17; + u32 rsv:15; +} __packed; + +struct nbl_cmd_acl_stat_flowid_data { + u8 bytes[NBL_FLOW_STAT_BYTES_SIZE]; + u8 hits[NBL_FLOW_STAT_HIT_SIZE]; + u8 rsv; + +} __packed; + +union nbl_cmd_acl_flowid_u { + struct nbl_cmd_acl_flowid { + struct nbl_cmd_acl_stat_flowid_addr all_addr[NBL_FLOW_COUNT_NUM]; + struct nbl_cmd_acl_stat_flowid_data all_data[NBL_FLOW_COUNT_NUM]; + } __packed info; +#define NBL_CMD_ACL_FLOWID_TAB_WIDTH (sizeof(struct nbl_cmd_acl_flowid) \ + / sizeof(u32)) + u32 data[NBL_CMD_ACL_FLOWID_TAB_WIDTH]; +}; + +/* CMDQ data content for ACL-STAT ID */ +struct nbl_cmd_acl_stat_statid_addr { + u32 addr:11; + u32 rsv:21; +} __packed; + +struct nbl_cmd_acl_stat_statid_data { + u8 bytes[NBL_SPEC_STAT_BYTES_SIZE]; + u8 hits[NBL_SPEC_STAT_HIT_SIZE]; +} __packed; + +union nbl_cmd_acl_statid_u { + struct nbl_cmd_acl_statid { + struct nbl_cmd_acl_stat_statid_addr all_addr[NBL_FLOW_COUNT_NUM]; + struct nbl_cmd_acl_stat_statid_data all_data[NBL_FLOW_COUNT_NUM]; + } __packed info; +#define NBL_CMD_ACL_STATID_TAB_WIDTH (sizeof(struct nbl_cmd_acl_statid) \ + / sizeof(u32)) + u32 data[NBL_CMD_ACL_STATID_TAB_WIDTH]; +}; + +#pragma pack() + +int nbl_fc_add_stats_leonis(void *priv, enum nbl_pp_fc_type fc_type, unsigned long cookie); +int nbl_fc_del_stats_leonis(void *priv, unsigned long cookie); +int nbl_fc_setup_ops_leonis(struct nbl_resource_ops *res_ops); +void nbl_fc_remove_ops_leonis(struct nbl_resource_ops *res_ops); +int nbl_fc_mgt_start_leonis(struct nbl_resource_mgt *res_mgt); +void nbl_fc_mgt_stop_leonis(struct nbl_resource_mgt *res_mgt); +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c index 83c9e99b68918736e00337f4832c6e94a5376123..0c4b9f599c55032da2cab21542b5d659262f7e7b 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c @@ -6,6 +6,27 @@ #include "nbl_flow_leonis.h" #include "nbl_p4_actions.h" +#include "nbl_resource_leonis.h" + +#define NBL_FLOW_LEONIS_VSI_NUM_PER_ETH 256 + +static bool nbl_flow_is_mirror_outputport(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + u16 func_id; + int i; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + if (func_id == U16_MAX) + return false; + + for (i = 0; i < NBL_MIRROR_OUTPUTPORT_MAX_FUNC; i++) { + if (func_id == flow_mgt->mirror_outputport_func[i]) + return true; + } + + return false; +} static u32 nbl_flow_cfg_action_set_dport(u16 upcall_flag, u16 port_type, u16 vsi, u16 next_stg_sel) { @@ -40,11 +61,23 @@ static u16 nbl_flow_cfg_action_set_dport_mcc_vsi(u16 vsi) set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_NML_FWD; set_dport.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; set_dport.dport.up.port_id = vsi; - set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_EPRO; + set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_ACL_S0; return set_dport.data; } +static u32 nbl_flow_cfg_action_set_dport_mcc_bmc(void) +{ + union nbl_action_data set_dport = {.data = 0}; + + set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_NML_FWD; + set_dport.dport.up.port_type = SET_DPORT_TYPE_SP_PORT; + set_dport.dport.up.port_id = NBL_FLOW_MCC_BMC_DPORT; + set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_EPRO; + + return set_dport.data + (NBL_ACT_SET_DPORT << 16); +} + static int nbl_flow_cfg_action_mcc(u16 mcc_id, u32 *action0, u32 *action1) { union nbl_action_data mcc_idx_act = {.data = 0}, set_aux_act = {.data = 0}; @@ -63,8 +96,12 @@ static int nbl_flow_cfg_action_mcc(u16 mcc_id, u32 *action0, u32 *action1) static int nbl_flow_cfg_action_up_tnl(struct nbl_flow_param param, u32 *action0, u32 *action1) { *action1 = 0; - *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, SET_DPORT_TYPE_VSI_HOST, - param.vsi, NEXT_STG_SEL_EPRO); + if (param.mcc_id == NBL_MCC_ID_INVALID) + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, + SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_ACL_S0); + else + nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); return 0; } @@ -73,7 +110,7 @@ static int nbl_flow_cfg_action_lldp_lacp_up(struct nbl_flow_param param, u32 *ac { *action1 = 0; *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, SET_DPORT_TYPE_VSI_HOST, - param.vsi, NEXT_STG_SEL_EPRO); + param.vsi, NEXT_STG_SEL_ACL_S0); return 0; } @@ -81,8 +118,12 @@ static int nbl_flow_cfg_action_lldp_lacp_up(struct nbl_flow_param param, u32 *ac static int nbl_flow_cfg_action_up(struct nbl_flow_param param, u32 *action0, u32 *action1) { *action1 = 0; - *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, SET_DPORT_TYPE_VSI_HOST, - param.vsi, NEXT_STG_SEL_NONE); + if (param.mcc_id == NBL_MCC_ID_INVALID) + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, + SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_NONE); + else + nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); return 0; } @@ -90,32 +131,16 @@ static int nbl_flow_cfg_action_up(struct nbl_flow_param param, u32 *action0, u32 static int nbl_flow_cfg_action_down(struct nbl_flow_param param, u32 *action0, u32 *action1) { *action1 = 0; - *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, SET_DPORT_TYPE_VSI_HOST, - param.vsi, NEXT_STG_SEL_EPRO); + if (param.mcc_id == NBL_MCC_ID_INVALID) + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, + SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_ACL_S0); + else + nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); return 0; } -static int nbl_flow_cfg_action_l2_up(struct nbl_flow_param param, u32 *action0, u32 *action1) -{ - return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); -} - -static int nbl_flow_cfg_action_l2_down(struct nbl_flow_param param, u32 *action0, u32 *action1) -{ - return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); -} - -static int nbl_flow_cfg_action_l3_up(struct nbl_flow_param param, u32 *action0, u32 *action1) -{ - return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); -} - -static int nbl_flow_cfg_action_l3_down(struct nbl_flow_param param, u32 *action0, u32 *action1) -{ - return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); -} - static int nbl_flow_cfg_up_tnl_key_value(union nbl_common_data_u *data, struct nbl_flow_param param, u8 eth_mode) { @@ -131,7 +156,7 @@ static int nbl_flow_cfg_up_tnl_key_value(union nbl_common_data_u *data, kt_data->info.dst_mac = dst_mac; kt_data->info.svlan_id = param.vid; - kt_data->info.template = NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2; + kt_data->info.template = NBL_EM0_PT_PHY_UP_TUNNEL_L2; kt_data->info.padding = 0; sport = param.eth; @@ -171,7 +196,7 @@ static int nbl_flow_cfg_up_key_value(union nbl_common_data_u *data, kt_data->info.dst_mac = dst_mac; kt_data->info.svlan_id = param.vid; - kt_data->info.template = NBL_EM0_PT_PHY_UP_UNICAST_L2; + kt_data->info.template = NBL_EM0_PT_PHY_UP_L2; kt_data->info.padding = 0; sport = param.eth; @@ -195,144 +220,271 @@ static int nbl_flow_cfg_down_key_value(union nbl_common_data_u *data, kt_data->info.dst_mac = dst_mac; kt_data->info.svlan_id = param.vid; - kt_data->info.template = NBL_EM0_PT_PHY_DOWN_UNICAST_L2; + kt_data->info.template = NBL_EM0_PT_PHY_DOWN_L2; kt_data->info.padding = 0; sport = param.vsi >> 8; if (eth_mode == NBL_TWO_ETHERNET_PORT) sport &= 0xFE; + if (eth_mode == NBL_ONE_ETHERNET_PORT) + sport = 0; kt_data->info.sport = sport; return 0; } -static int nbl_flow_cfg_l2_up_key_value(union nbl_common_data_u *data, - struct nbl_flow_param param, u8 eth_mode) +static void nbl_flow_cfg_kt_action_up_tnl(union nbl_common_data_u *data, u32 action0, u32 action1) { - union nbl_l2_phy_up_multi_data_u *kt_data = (union nbl_l2_phy_up_multi_data_u *)data; - u8 sport; + union nbl_l2_phy_up_data_u *kt_data = (union nbl_l2_phy_up_data_u *)data; - kt_data->info.dst_mac = 0xFFFFFFFFFFFF; - kt_data->info.template = NBL_EM0_PT_PHY_UP_MULTICAST_L2; - kt_data->info.padding = 0; + kt_data->info.act0 = action0; + kt_data->info.act1 = action1; +} - sport = param.eth; - kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; +static void nbl_flow_cfg_kt_action_lldp_lacp_up(union nbl_common_data_u *data, + u32 action0, u32 action1) +{ + union nbl_l2_phy_lldp_lacp_data_u *kt_data = (union nbl_l2_phy_lldp_lacp_data_u *)data; - return 0; + kt_data->info.act0 = action0; } -static int nbl_flow_cfg_l2_down_key_value(union nbl_common_data_u *data, - struct nbl_flow_param param, u8 eth_mode) +static void nbl_flow_cfg_kt_action_up(union nbl_common_data_u *data, u32 action0, u32 action1) { - union nbl_l2_phy_down_multi_data_u *kt_data = (union nbl_l2_phy_down_multi_data_u *)data; - u8 sport; + union nbl_l2_phy_up_data_u *kt_data = (union nbl_l2_phy_up_data_u *)data; - kt_data->info.dst_mac = 0xFFFFFFFFFFFF; - kt_data->info.template = NBL_EM0_PT_PHY_DOWN_MULTICAST_L2; - kt_data->info.padding = 0; + kt_data->info.act0 = action0; + kt_data->info.act1 = action1; +} - sport = param.eth; - if (eth_mode == NBL_TWO_ETHERNET_PORT) - sport &= 0xFE; - kt_data->info.sport = sport; +static void nbl_flow_cfg_kt_action_down(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_l2_phy_down_data_u *kt_data = (union nbl_l2_phy_down_data_u *)data; + + kt_data->info.act0 = action0; + kt_data->info.act1 = action1; +} + +static int nbl_flow_cfg_action_tls_up(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + union nbl_action_data set_prbac_idx = {.data = 0}; + + set_prbac_idx.prbac_idx.prbac_id = (u16)param.index; + + *action0 = set_prbac_idx.data + (NBL_ACT_SET_PRBAC << 16); + + return 0; +} + +static int nbl_flow_cfg_tls_up_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_phy_ul4s_data_u *kt_data = (union nbl_phy_ul4s_data_u *)data; + u16 sport, dport; + + sport = param.eth + NBL_SPORT_ETH_OFFSET; + dport = (0x2 << 10) + param.vsi; + + if (param.type == NBL_KT_HALF_MODE) { + kt_data->ipv4_info.template = NBL_EM0_PT_PHY_UL4S_IPV4; + kt_data->ipv4_info.sip_high = param.data[1] >> 4; + kt_data->ipv4_info.sip_low = param.data[1]; + kt_data->ipv4_info.dip_high = param.data[5] >> 4; + kt_data->ipv4_info.dip_low = param.data[5]; + kt_data->ipv4_info.l4_sport = param.data[9]; + kt_data->ipv4_info.l4_dport = param.data[10]; + kt_data->ipv4_info.sport = sport; + } else { + kt_data->ipv6_info.template = NBL_EM0_PT_PHY_UL4S_IPV6; + kt_data->ipv6_info.sip1 = ((u64)param.data[1] << 28) + (param.data[2] >> 4); + kt_data->ipv6_info.sip2 = ((u64)param.data[2] << 60) + + ((u64)param.data[3] << 28) + (param.data[4] >> 4); + kt_data->ipv6_info.sip3 = param.data[4]; + kt_data->ipv6_info.l4_sport = param.data[9]; + kt_data->ipv6_info.l4_dport = param.data[10]; + kt_data->ipv6_info.dport = dport; + kt_data->ipv6_info.sport = sport; + } return 0; } -static int nbl_flow_cfg_l3_up_key_value(union nbl_common_data_u *data, - struct nbl_flow_param param, u8 eth_mode) +static void nbl_flow_cfg_kt_action_tls_up(union nbl_common_data_u *data, u32 action0, u32 action1) { - union nbl_l3_phy_up_multi_data_u *kt_data = (union nbl_l3_phy_up_multi_data_u *)data; - u8 sport; + union nbl_phy_ul4s_data_u *kt_data = (union nbl_phy_ul4s_data_u *)data; - kt_data->info.dst_mac = 0x3333; - kt_data->info.template = NBL_EM0_PT_PHY_UP_MULTICAST_L3; - kt_data->info.padding = 0; + kt_data->ipv4_info.act0 = action0; +} + +static int nbl_flow_cfg_action_ipsec_down(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + union nbl_action_data set_prbac_idx = {.data = 0}; + + set_prbac_idx.prbac_idx.prbac_id = (u16)param.index; + + *action0 = set_prbac_idx.data + (NBL_ACT_SET_PRBAC << 16); + + return 0; +} + +static int nbl_flow_cfg_ipsec_down_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_phy_dprbac_data_u *kt_data = (union nbl_phy_dprbac_data_u *)data; + u16 sport; sport = param.eth; - kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + if (param.type == NBL_KT_HALF_MODE) { + kt_data->ipv4_info.template = NBL_EM0_PT_PHY_DPRBAC_IPV4; + kt_data->ipv4_info.sip_high = param.data[1] >> 4; + kt_data->ipv4_info.sip_low = param.data[1]; + kt_data->ipv4_info.dip_high = param.data[5] >> 4; + kt_data->ipv4_info.dip_low = param.data[5]; + kt_data->ipv4_info.sport = sport; + } else { + kt_data->ipv6_info.template = NBL_EM0_PT_PHY_DPRBAC_IPV6; + kt_data->ipv6_info.sip1 = (param.data[1] >> 4); + kt_data->ipv6_info.sip2 = ((u64)param.data[1] << 60) + ((u64)param.data[2] << 28) + + (param.data[3] >> 4); + kt_data->ipv6_info.sip3 = ((u64)param.data[3] << 32) + param.data[4]; + kt_data->ipv6_info.dip1 = (param.data[5] >> 4); + kt_data->ipv6_info.dip2 = ((u64)param.data[5] << 60) + ((u64)param.data[6] << 28) + + (param.data[7] >> 4); + kt_data->ipv6_info.dip3 = ((u64)param.data[7] << 32) + param.data[8]; + kt_data->ipv6_info.sport = sport; + } return 0; } -static int nbl_flow_cfg_l3_down_key_value(union nbl_common_data_u *data, - struct nbl_flow_param param, u8 eth_mode) +static void nbl_flow_cfg_kt_action_ipsec_down(union nbl_common_data_u *data, + u32 action0, u32 action1) { - union nbl_l3_phy_down_multi_data_u *kt_data = (union nbl_l3_phy_down_multi_data_u *)data; - u8 sport; + union nbl_phy_dprbac_data_u *kt_data = (union nbl_phy_dprbac_data_u *)data; - kt_data->info.dst_mac = 0x3333; - kt_data->info.template = NBL_EM0_PT_PHY_DOWN_MULTICAST_L3; - kt_data->info.padding = 0; + kt_data->ipv4_info.act0 = action0; +} - sport = param.eth; - if (eth_mode == NBL_TWO_ETHERNET_PORT) - sport &= 0xFE; - kt_data->info.sport = sport; +static int nbl_flow_cfg_action_nd_upcall(struct nbl_flow_param param, + u32 *action0, u32 *action1) +{ + *action1 = 0; + /* For TC, jump to ACL, the upcall action will be overwritten; + * For PMD, upcall and jump to EPRO, skipping ACL + */ + if (param.for_pmd) + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_UPCALL, + SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_EPRO); + else + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_UPCALL, + SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_ACL_S0); return 0; } -static void nbl_flow_cfg_kt_action_up_tnl(union nbl_common_data_u *data, u32 action0, u32 action1) +static int nbl_flow_cfg_nd_upcall_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) { - union nbl_l2_phy_up_data_u *kt_data = (union nbl_l2_phy_up_data_u *)data; + union nbl_nd_upcall_data_u *kt_data = (union nbl_nd_upcall_data_u *)data; - kt_data->info.act0 = action0; + kt_data->info.template = NBL_EM0_PT_PMD_ND_UPCALL; + kt_data->info.ptype = param.priv_data; + + return 0; } -static void nbl_flow_cfg_kt_action_lldp_lacp_up(union nbl_common_data_u *data, - u32 action0, u32 action1) +static void nbl_flow_cfg_kt_action_nd_upcall(union nbl_common_data_u *data, + u32 action0, u32 action1) { - union nbl_l2_phy_lldp_lacp_data_u *kt_data = (union nbl_l2_phy_lldp_lacp_data_u *)data; + union nbl_nd_upcall_data_u *kt_data = (union nbl_nd_upcall_data_u *)data; kt_data->info.act0 = action0; + kt_data->info.act1 = action1; } -static void nbl_flow_cfg_kt_action_up(union nbl_common_data_u *data, u32 action0, u32 action1) +static int nbl_flow_cfg_action_multi_mcast(struct nbl_flow_param param, u32 *action0, u32 *action1) { - union nbl_l2_phy_up_data_u *kt_data = (union nbl_l2_phy_up_data_u *)data; + return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); +} - kt_data->info.act0 = action0; +static int nbl_flow_cfg_l2up_multi_mcast_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_up_multi_mcast_data_u *kt_data = + (union nbl_l2_phy_up_multi_mcast_data_u *)data; + u8 sport; + + kt_data->info.template = NBL_EM0_PT_PHY_L2_UP_MULTI_MCAST; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; } -static void nbl_flow_cfg_kt_action_down(union nbl_common_data_u *data, u32 action0, u32 action1) +static void nbl_flow_cfg_kt_action_l2up_multi_mcast(union nbl_common_data_u *data, + u32 action0, u32 action1) { - union nbl_l2_phy_down_data_u *kt_data = (union nbl_l2_phy_down_data_u *)data; + union nbl_l2_phy_up_multi_mcast_data_u *kt_data = + (union nbl_l2_phy_up_multi_mcast_data_u *)data; kt_data->info.act0 = action0; } -static void nbl_flow_cfg_kt_action_l2_up(union nbl_common_data_u *data, u32 action0, u32 action1) +static int nbl_flow_cfg_l3up_multi_mcast_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) { - union nbl_l2_phy_up_multi_data_u *kt_data = (union nbl_l2_phy_up_multi_data_u *)data; + union nbl_l2_phy_up_multi_mcast_data_u *kt_data = + (union nbl_l2_phy_up_multi_mcast_data_u *)data; + u8 sport; - kt_data->info.act0 = action0; - kt_data->info.act1 = action1; + kt_data->info.template = NBL_EM0_PT_PHY_L3_UP_MULTI_MCAST; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; } -static void nbl_flow_cfg_kt_action_l2_down(union nbl_common_data_u *data, u32 action0, u32 action1) +static int nbl_flow_cfg_l2down_multi_mcast_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) { - union nbl_l2_phy_down_multi_data_u *kt_data = (union nbl_l2_phy_down_multi_data_u *)data; + union nbl_l2_phy_down_multi_mcast_data_u *kt_data = + (union nbl_l2_phy_down_multi_mcast_data_u *)data; + u8 sport; - kt_data->info.act0 = action0; - kt_data->info.act1 = action1; + kt_data->info.template = NBL_EM0_PT_PHY_L2_DOWN_MULTI_MCAST; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; } -static void nbl_flow_cfg_kt_action_l3_up(union nbl_common_data_u *data, u32 action0, u32 action1) +static void nbl_flow_cfg_kt_action_l2down_multi_mcast(union nbl_common_data_u *data, + u32 action0, u32 action1) { - union nbl_l3_phy_up_multi_data_u *kt_data = (union nbl_l3_phy_up_multi_data_u *)data; + union nbl_l2_phy_down_multi_mcast_data_u *kt_data = + (union nbl_l2_phy_down_multi_mcast_data_u *)data; kt_data->info.act0 = action0; - kt_data->info.act1 = action1; } -static void nbl_flow_cfg_kt_action_l3_down(union nbl_common_data_u *data, u32 action0, u32 action1) +static int nbl_flow_cfg_l3down_multi_mcast_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) { - union nbl_l3_phy_down_multi_data_u *kt_data = (union nbl_l3_phy_down_multi_data_u *)data; + union nbl_l2_phy_down_multi_mcast_data_u *kt_data = + (union nbl_l2_phy_down_multi_mcast_data_u *)data; + u8 sport; - kt_data->info.act0 = action0; - kt_data->info.act1 = action1; + kt_data->info.template = NBL_EM0_PT_PHY_L3_DOWN_MULTI_MCAST; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; } #define NBL_FLOW_OPS_ARR_ENTRY(type, action_func, kt_func, kt_action_func) \ @@ -343,10 +495,6 @@ static const struct nbl_flow_rule_cfg_ops cfg_ops[] = { nbl_flow_cfg_action_up_tnl, nbl_flow_cfg_up_tnl_key_value, nbl_flow_cfg_kt_action_up_tnl), - NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_LLDP_LACP_UP, - nbl_flow_cfg_action_lldp_lacp_up, - nbl_flow_cfg_lldp_lacp_up_key_value, - nbl_flow_cfg_kt_action_lldp_lacp_up), NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_UP, nbl_flow_cfg_action_up, nbl_flow_cfg_up_key_value, @@ -355,55 +503,58 @@ static const struct nbl_flow_rule_cfg_ops cfg_ops[] = { nbl_flow_cfg_action_down, nbl_flow_cfg_down_key_value, nbl_flow_cfg_kt_action_down), - NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L2_UP, - nbl_flow_cfg_action_l2_up, - nbl_flow_cfg_l2_up_key_value, - nbl_flow_cfg_kt_action_l2_up), - NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L2_DOWN, - nbl_flow_cfg_action_l2_down, - nbl_flow_cfg_l2_down_key_value, - nbl_flow_cfg_kt_action_l2_down), - NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L3_UP, - nbl_flow_cfg_action_l3_up, - nbl_flow_cfg_l3_up_key_value, - nbl_flow_cfg_kt_action_l3_up), - NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L3_DOWN, - nbl_flow_cfg_action_l3_down, - nbl_flow_cfg_l3_down_key_value, - nbl_flow_cfg_kt_action_l3_down), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_LLDP_LACP_UP, + nbl_flow_cfg_action_lldp_lacp_up, + nbl_flow_cfg_lldp_lacp_up_key_value, + nbl_flow_cfg_kt_action_lldp_lacp_up), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L2_UP_MULTI_MCAST, + nbl_flow_cfg_action_multi_mcast, + nbl_flow_cfg_l2up_multi_mcast_key_value, + nbl_flow_cfg_kt_action_l2up_multi_mcast), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L3_UP_MULTI_MCAST, + nbl_flow_cfg_action_multi_mcast, + nbl_flow_cfg_l3up_multi_mcast_key_value, + nbl_flow_cfg_kt_action_l2up_multi_mcast), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_PMD_ND_UPCALL, + nbl_flow_cfg_action_nd_upcall, + nbl_flow_cfg_nd_upcall_key_value, + nbl_flow_cfg_kt_action_nd_upcall), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_TLS_UP, + nbl_flow_cfg_action_tls_up, + nbl_flow_cfg_tls_up_key_value, + nbl_flow_cfg_kt_action_tls_up), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_IPSEC_DOWN, + nbl_flow_cfg_action_ipsec_down, + nbl_flow_cfg_ipsec_down_key_value, + nbl_flow_cfg_kt_action_ipsec_down), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L2_DOWN_MULTI_MCAST, + nbl_flow_cfg_action_multi_mcast, + nbl_flow_cfg_l2down_multi_mcast_key_value, + nbl_flow_cfg_kt_action_l2down_multi_mcast), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L3_DOWN_MULTI_MCAST, + nbl_flow_cfg_action_multi_mcast, + nbl_flow_cfg_l3down_multi_mcast_key_value, + nbl_flow_cfg_kt_action_l2down_multi_mcast), }; -static unsigned long find_two_zero_bit(const unsigned long *addr, unsigned long size) -{ - unsigned long flow_id, next_id; - - flow_id = find_first_zero_bit(addr, size); - next_id = find_next_zero_bit(addr, size, flow_id + 1); - while ((flow_id + 1) != next_id || (flow_id % 2)) { - flow_id = next_id; - next_id = find_next_zero_bit(addr, size, flow_id + 1); - if (next_id == size) - return size; - } - - return flow_id; -} - static int nbl_flow_alloc_flow_id(struct nbl_flow_mgt *flow_mgt, struct nbl_flow_fem_entry *flow) { u32 flow_id; if (flow->flow_type == NBL_KT_HALF_MODE) { - flow_id = find_first_zero_bit(flow_mgt->flow_id, NBL_MACVLAN_TABLE_LEN); + flow_id = find_first_zero_bit(flow_mgt->flow_id_bitmap, NBL_MACVLAN_TABLE_LEN); if (flow_id == NBL_MACVLAN_TABLE_LEN) return -ENOSPC; - set_bit(flow_id, flow_mgt->flow_id); + set_bit(flow_id, flow_mgt->flow_id_bitmap); + flow_mgt->flow_id_cnt--; } else { - flow_id = find_two_zero_bit(flow_mgt->flow_id, NBL_MACVLAN_TABLE_LEN); + flow_id = nbl_common_find_available_idx(flow_mgt->flow_id_bitmap, + NBL_MACVLAN_TABLE_LEN, 2, 2); if (flow_id == NBL_MACVLAN_TABLE_LEN) return -ENOSPC; - set_bit(flow_id, flow_mgt->flow_id); - set_bit(flow_id + 1, flow_mgt->flow_id); + set_bit(flow_id, flow_mgt->flow_id_bitmap); + set_bit(flow_id + 1, flow_mgt->flow_id_bitmap); + flow_mgt->flow_id_cnt -= 2; } flow->flow_id = flow_id; @@ -416,12 +567,14 @@ static void nbl_flow_free_flow_id(struct nbl_flow_mgt *flow_mgt, struct nbl_flow return; if (flow->flow_type == NBL_KT_HALF_MODE) { - clear_bit(flow->flow_id, flow_mgt->flow_id); + clear_bit(flow->flow_id, flow_mgt->flow_id_bitmap); flow->flow_id = 0xFFFF; + flow_mgt->flow_id_cnt++; } else { - clear_bit(flow->flow_id, flow_mgt->flow_id); - clear_bit(flow->flow_id + 1, flow_mgt->flow_id); + clear_bit(flow->flow_id, flow_mgt->flow_id_bitmap); + clear_bit(flow->flow_id + 1, flow_mgt->flow_id_bitmap); flow->flow_id = 0xFFFF; + flow_mgt->flow_id_cnt += 2; } } @@ -447,8 +600,27 @@ static void nbl_flow_free_tcam_id(struct nbl_flow_mgt *flow_mgt, tcam_item->tcam_index = 0; } -void nbl_flow_set_mt_input(struct nbl_mt_input *mt_input, union nbl_common_data_u *kt_data, - u8 type, u16 flow_id) +static int nbl_flow_alloc_mcc_id(struct nbl_flow_mgt *flow_mgt) +{ + u32 mcc_id; + + mcc_id = find_first_zero_bit(flow_mgt->mcc_id_bitmap, NBL_FLOW_MCC_INDEX_SIZE); + if (mcc_id == NBL_FLOW_MCC_INDEX_SIZE) + return -ENOSPC; + + set_bit(mcc_id, flow_mgt->mcc_id_bitmap); + + return mcc_id + NBL_FLOW_MCC_INDEX_START; +} + +static void nbl_flow_free_mcc_id(struct nbl_flow_mgt *flow_mgt, u32 mcc_id) +{ + if (mcc_id >= NBL_FLOW_MCC_INDEX_START) + clear_bit(mcc_id - NBL_FLOW_MCC_INDEX_START, flow_mgt->mcc_id_bitmap); +} + +static void nbl_flow_set_mt_input(struct nbl_mt_input *mt_input, union nbl_common_data_u *kt_data, + u8 type, u16 flow_id) { int i; u16 key_len; @@ -459,7 +631,7 @@ void nbl_flow_set_mt_input(struct nbl_mt_input *mt_input, union nbl_common_data_ mt_input->tbl_id = flow_id + NBL_EM_PHY_KT_OFFSET; mt_input->depth = 0; - mt_input->power = 10; + mt_input->power = NBL_PP0_POWER; } static void nbl_flow_key_hash(struct nbl_flow_fem_entry *flow, struct nbl_mt_input *mt_input) @@ -487,9 +659,9 @@ static bool nbl_pp_ht0_ht1_search(struct nbl_flow_ht_mng *pp_ht0_mng, u16 ht0_ha for (i = 0; i < NBL_HASH_CFT_MAX; i++) if (node0->key[i].vid && node0->key[i].ht_other_index == ht1_hash) { is_find = true; - nbl_info(common, NBL_DEBUG_FLOW, - "Conflicted ht on vid %d and kt_index %u\n", - node0->key[i].vid, node0->key[i].kt_index); + nbl_debug(common, NBL_DEBUG_FLOW, + "Conflicted ht on vid %d and kt_index %u\n", + node0->key[i].vid, node0->key[i].kt_index); return is_find; } @@ -498,9 +670,9 @@ static bool nbl_pp_ht0_ht1_search(struct nbl_flow_ht_mng *pp_ht0_mng, u16 ht0_ha for (i = 0; i < NBL_HASH_CFT_MAX; i++) if (node1->key[i].vid && node1->key[i].ht_other_index == ht0_hash) { is_find = true; - nbl_info(common, NBL_DEBUG_FLOW, - "Conflicted ht on vid %d and kt_index %u\n", - node1->key[i].vid, node1->key[i].kt_index); + nbl_debug(common, NBL_DEBUG_FLOW, + "Conflicted ht on vid %d and kt_index %u\n", + node1->key[i].vid, node1->key[i].kt_index); return is_find; } @@ -553,8 +725,8 @@ static int nbl_flow_find_ht_avail_table(struct nbl_flow_ht_mng *pp_ht0_mng, } } -int nbl_flow_insert_pp_ht(struct nbl_flow_ht_mng *pp_ht_mng, - u16 hash, u16 hash_other, u32 key_index) +static int nbl_flow_insert_pp_ht(struct nbl_flow_ht_mng *pp_ht_mng, + u16 hash, u16 hash_other, u32 key_index) { struct nbl_flow_ht_tbl *node; int i; @@ -679,10 +851,6 @@ static int nbl_flow_del_2hw(struct nbl_resource_mgt *res_mgt, struct nbl_ht_item phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - memset(kt_item.kt_data.hash_key, 0, sizeof(kt_item.kt_data.hash_key)); - phy_ops->set_kt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), kt_item.kt_data.hash_key, - ht_item.key_index, key_type); - hash = ht_item.ht_table == NBL_HT0 ? ht_item.ht0_hash : ht_item.ht1_hash; phy_ops->set_ht(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), hash, 0, ht_item.ht_table, ht_item.hash_bucket, 0, 0); @@ -727,16 +895,20 @@ static int nbl_flow_add_flow(struct nbl_resource_mgt *res_mgt, struct nbl_flow_p struct nbl_mt_input mt_input; struct nbl_ht_item ht_item; struct nbl_kt_item kt_item; - struct nbl_tcam_item tcam_item; + struct nbl_tcam_item *tcam_item = NULL; struct nbl_flow_ht_mng *pp_ht_mng = NULL; u32 action0, action1; + u32 cost = 0; int ht_table; int ret = 0; memset(&mt_input, 0, sizeof(mt_input)); memset(&ht_item, 0, sizeof(ht_item)); memset(&kt_item, 0, sizeof(kt_item)); - memset(&tcam_item, 0, sizeof(tcam_item)); + + tcam_item = kzalloc(sizeof(*tcam_item), GFP_ATOMIC); + if (!tcam_item) + return -ENOMEM; flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); common = NBL_RES_MGT_TO_COMMON(res_mgt); @@ -746,17 +918,28 @@ static int nbl_flow_add_flow(struct nbl_resource_mgt *res_mgt, struct nbl_flow_p flow->type = type; flow->flow_id = 0xFFFF; + if (type >= NBL_FLOW_ACCEL_BEGIN && type < NBL_FLOW_ACCEL_END) { + if (flow->flow_type == NBL_KT_FULL_MODE) + cost = 2; + else + cost = 1; + + if ((flow_mgt->accel_flow_count + cost) > NBL_MACVLAN_TABLE_LEN / 2) { + ret = -ENOSPC; + goto free_mem; + } + } ret = nbl_flow_alloc_flow_id(flow_mgt, flow); if (ret) - return ret; + goto free_mem; ret = cfg_ops[type].cfg_action(param, &action0, &action1); if (ret) - return ret; + goto free_mem; ret = cfg_ops[type].cfg_key(&kt_item.kt_data, param, NBL_COMMON_TO_ETH_MODE(common)); if (ret) - return ret; + goto free_mem; nbl_flow_set_mt_input(&mt_input, &kt_item.kt_data, param.type, flow->flow_id); nbl_flow_key_hash(flow, &mt_input); @@ -778,26 +961,31 @@ static int nbl_flow_add_flow(struct nbl_resource_mgt *res_mgt, struct nbl_flow_p cfg_ops[type].cfg_kt_action(&kt_item.kt_data, action0, action1); ret = nbl_flow_send_2hw(res_mgt, ht_item, kt_item, param.type); } else { - ret = nbl_flow_alloc_tcam_id(flow_mgt, &tcam_item); + ret = nbl_flow_alloc_tcam_id(flow_mgt, tcam_item); if (ret) goto out; - nbl_flow_cfg_tcam(&tcam_item, &ht_item, &kt_item, action0, action1); - flow->tcam_index = tcam_item.tcam_index; + nbl_flow_cfg_tcam(tcam_item, &ht_item, &kt_item, action0, action1); + flow->tcam_index = tcam_item->tcam_index; - ret = nbl_flow_add_tcam(res_mgt, tcam_item); + ret = nbl_flow_add_tcam(res_mgt, *tcam_item); } out: if (ret) { if (flow->tcam_flag) - nbl_flow_free_tcam_id(flow_mgt, &tcam_item); + nbl_flow_free_tcam_id(flow_mgt, tcam_item); else nbl_flow_del_ht(&ht_item, flow, pp_ht_mng); nbl_flow_free_flow_id(flow_mgt, flow); + } else { + flow_mgt->accel_flow_count += cost; } +free_mem: + kfree(tcam_item); + return ret; } @@ -838,167 +1026,722 @@ static void nbl_flow_del_flow(struct nbl_resource_mgt *res_mgt, struct nbl_flow_ } nbl_flow_free_flow_id(flow_mgt, flow); + if (flow->type >= NBL_FLOW_ACCEL_BEGIN && flow->type < NBL_FLOW_ACCEL_END) { + if (flow->flow_type == NBL_KT_FULL_MODE) + flow_mgt->accel_flow_count -= 2; + else + flow_mgt->accel_flow_count -= 1; + } } -static int nbl_flow_add_mcc_node(struct nbl_flow_multi_group *multi_group, - struct nbl_resource_mgt *res_mgt, int eth, u16 vsi_id, u16 mcc_id) +static struct nbl_flow_mcc_node *nbl_flow_alloc_mcc_node(struct nbl_flow_mgt *flow_mgt, + u8 type, u16 data, u16 head) { - struct nbl_flow_mcc_node *mcc_node = NULL; - struct nbl_phy_ops *phy_ops; - u16 prev_mcc_id, mcc_action; + struct nbl_flow_mcc_node *node; + int mcc_id; + u16 mcc_action; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return NULL; + + mcc_id = nbl_flow_alloc_mcc_id(flow_mgt); + if (mcc_id < 0) { + kfree(node); + return NULL; + } + + switch (type) { + case NBL_MCC_INDEX_BOND: + case NBL_MCC_INDEX_ETH: + mcc_action = nbl_flow_cfg_action_set_dport_mcc_eth((u8)data); + break; + case NBL_MCC_INDEX_VSI: + mcc_action = nbl_flow_cfg_action_set_dport_mcc_vsi(data); + break; + case NBL_MCC_INDEX_BMC: + mcc_action = nbl_flow_cfg_action_set_dport_mcc_bmc(); + break; + default: + nbl_flow_free_mcc_id(flow_mgt, mcc_id); + kfree(node); + return NULL; + } + + INIT_LIST_HEAD(&node->node); + node->mcc_id = mcc_id; + node->mcc_head = head; + node->type = type; + node->data = data; + node->mcc_action = mcc_action; + + return node; +} + +static void nbl_flow_free_mcc_node(struct nbl_flow_mgt *flow_mgt, struct nbl_flow_mcc_node *node) +{ + nbl_flow_free_mcc_id(flow_mgt, node->mcc_id); + kfree(node); +} + +/* not consider multicast node first change, need modify all macvlan mcc */ +static int nbl_flow_add_mcc_node(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_mcc_node *mcc_node, + struct list_head *head, + struct list_head *list, + struct list_head *suffix) +{ + struct nbl_flow_mcc_node *mcc_head = NULL; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 prev_mcc_id, next_mcc_id = NBL_MCC_ID_INVALID; int ret = 0; - phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + /* mcc_head must init before mcc_list */ + if (mcc_node->mcc_head) { + list_add_tail(&mcc_node->node, head); + prev_mcc_id = NBL_MCC_ID_INVALID; - mcc_node = kzalloc(sizeof(*mcc_node), GFP_KERNEL); - if (!mcc_node) - return -ENOMEM; + WARN_ON(!nbl_list_empty(list)); + ret = phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, + prev_mcc_id, NBL_MCC_ID_INVALID, mcc_node->mcc_action); + goto check_ret; + } - mcc_action = eth >= 0 ? nbl_flow_cfg_action_set_dport_mcc_eth((u8)eth) - : nbl_flow_cfg_action_set_dport_mcc_vsi(vsi_id); - mcc_node->mcc_id = mcc_id; - list_add_tail(&mcc_node->node, &multi_group->mcc_list); + list_add_tail(&mcc_node->node, list); - if (nbl_list_is_first(&mcc_node->node, &multi_group->mcc_list)) + if (nbl_list_is_first(&mcc_node->node, list)) prev_mcc_id = NBL_MCC_ID_INVALID; else prev_mcc_id = list_prev_entry(mcc_node, node)->mcc_id; - ret = phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_id, prev_mcc_id, mcc_action); + /* not head, next mcc may point suffix */ + if (suffix && !nbl_list_empty(suffix)) + next_mcc_id = list_first_entry(suffix, struct nbl_flow_mcc_node, node)->mcc_id; + else + next_mcc_id = NBL_MCC_ID_INVALID; + + /* first add mcc_list */ + if (prev_mcc_id == NBL_MCC_ID_INVALID && !nbl_list_empty(head)) { + list_for_each_entry(mcc_head, head, node) { + prev_mcc_id = mcc_head->mcc_id; + ret |= phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, + prev_mcc_id, next_mcc_id, + mcc_node->mcc_action); + } + goto check_ret; + } + + ret = phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + mcc_node->mcc_id, prev_mcc_id, next_mcc_id, mcc_node->mcc_action); +check_ret: if (ret) { list_del(&mcc_node->node); - kfree(mcc_node); - return -EFAULT; + return -EINVAL; } return 0; } -static void nbl_flow_del_mcc_node(struct nbl_flow_multi_group *multi_group, - struct nbl_resource_mgt *res_mgt, - struct nbl_flow_mcc_node *mcc_node) +/* not consider multicast node first change, need modify all macvlan mcc */ +static void nbl_flow_del_mcc_node(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_mcc_node *mcc_node, + struct list_head *head, + struct list_head *list, + struct list_head *suffix) { - struct nbl_phy_ops *phy_ops; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_flow_mcc_node *mcc_head = NULL; u16 prev_mcc_id, next_mcc_id; - phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - - if (list_entry_is_head(mcc_node, &multi_group->mcc_list, node)) + if (nbl_list_entry_is_head(mcc_node, head, node) || + nbl_list_entry_is_head(mcc_node, list, node)) return; - if (nbl_list_is_first(&mcc_node->node, &multi_group->mcc_list)) + if (mcc_node->mcc_head) { + WARN_ON(!nbl_list_empty(list)); + prev_mcc_id = NBL_MCC_ID_INVALID; + next_mcc_id = NBL_MCC_ID_INVALID; + phy_ops->del_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, + prev_mcc_id, next_mcc_id); + goto free_node; + } + + if (nbl_list_is_first(&mcc_node->node, list)) prev_mcc_id = NBL_MCC_ID_INVALID; else prev_mcc_id = list_prev_entry(mcc_node, node)->mcc_id; - if (nbl_list_is_last(&mcc_node->node, &multi_group->mcc_list)) + if (nbl_list_is_last(&mcc_node->node, list)) next_mcc_id = NBL_MCC_ID_INVALID; else next_mcc_id = list_next_entry(mcc_node, node)->mcc_id; + /* not head, next mcc may point suffix */ + if (next_mcc_id == NBL_MCC_ID_INVALID && suffix && !nbl_list_empty(suffix)) + next_mcc_id = list_first_entry(suffix, struct nbl_flow_mcc_node, node)->mcc_id; + + if (prev_mcc_id == NBL_MCC_ID_INVALID && !nbl_list_empty(head)) { + list_for_each_entry(mcc_head, head, node) { + prev_mcc_id = mcc_head->mcc_id; + phy_ops->del_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + mcc_node->mcc_id, prev_mcc_id, next_mcc_id); + } + goto free_node; + } + phy_ops->del_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, prev_mcc_id, next_mcc_id); - +free_node: list_del(&mcc_node->node); - kfree(mcc_node); } -static void nbl_flow_macvlan_node_del_action_func(void *priv, void *x_key, void *y_key, - void *data) +static struct nbl_flow_mcc_group *nbl_flow_alloc_mcc_group(struct nbl_resource_mgt *res_mgt, + unsigned long *vsi_bitmap, + u16 eth_id, bool multi, u16 vsi_num) { - struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - struct nbl_flow_macvlan_node_data *rule_data = (struct nbl_flow_macvlan_node_data *)data; - int i; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_switch_res *res = &flow_mgt->switch_res[eth_id]; + struct nbl_flow_mcc_group *group; + struct nbl_flow_mcc_node *mcc_node, *mcc_node_safe; + int ret; + int bit; + + /* The structure for mc macvlan list is: + * + * macvlan up + * | + * | + * BMC -> | + * VSI 0 -> VSI 1 -> -> allmulti list + * ETH -> | + * | + * | + * macvlan down + * + * So that the up mc pkts will be send to BMC, not need broadcast to eth, + * but the down mc pkts will send to eth, not send to BMC. + * + * Per mac flow entry has independent bmc/eth mcc nodes. + * All mac flow entry share all allmuti vsi nodes. + */ + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) + return NULL; + + group->vsi_base = eth_id * NBL_FLOW_LEONIS_VSI_NUM_PER_ETH; + group->multi = multi; + group->nbits = flow_mgt->vsi_max_per_switch; + group->ref_cnt = 1; + group->vsi_num = vsi_num; + + INIT_LIST_HEAD(&group->group_node); + INIT_LIST_HEAD(&group->mcc_node); + INIT_LIST_HEAD(&group->mcc_head); + + group->vsi_bitmap = kcalloc(BITS_TO_LONGS(flow_mgt->vsi_max_per_switch), sizeof(long), + GFP_KERNEL); + if (!group->vsi_bitmap) + goto alloc_vsi_bitmap_failed; + + bitmap_copy(group->vsi_bitmap, vsi_bitmap, flow_mgt->vsi_max_per_switch); + if (!multi) + goto add_mcc_node; + + mcc_node = nbl_flow_alloc_mcc_node(flow_mgt, NBL_MCC_INDEX_ETH, eth_id, 1); + if (!mcc_node) + goto free_nodes; - for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) - nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); -} + ret = nbl_flow_add_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, NULL); + if (ret) { + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + goto free_nodes; + } + + group->down_mcc_id = mcc_node->mcc_id; + mcc_node = nbl_flow_alloc_mcc_node(flow_mgt, NBL_MCC_INDEX_BMC, NBL_FLOW_MCC_BMC_DPORT, 1); + if (!mcc_node) + goto free_nodes; + + ret = nbl_flow_add_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, NULL); + if (ret) { + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + goto free_nodes; + } + group->up_mcc_id = mcc_node->mcc_id; + +add_mcc_node: + for_each_set_bit(bit, vsi_bitmap, flow_mgt->vsi_max_per_switch) { + mcc_node = nbl_flow_alloc_mcc_node(flow_mgt, NBL_MCC_INDEX_VSI, + bit + group->vsi_base, 0); + if (!mcc_node) + goto free_nodes; + + if (multi) + ret = nbl_flow_add_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, &res->allmulti_list); + else + ret = nbl_flow_add_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, NULL); + + if (ret) { + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + goto free_nodes; + } + } + + if (nbl_list_empty(&group->mcc_head)) { + group->down_mcc_id = list_first_entry(&group->mcc_node, + struct nbl_flow_mcc_node, node)->mcc_id; + group->up_mcc_id = list_first_entry(&group->mcc_node, + struct nbl_flow_mcc_node, node)->mcc_id; + } + list_add_tail(&group->group_node, &res->mcc_group_head); + + return group; + +free_nodes: + list_for_each_entry_safe(mcc_node, mcc_node_safe, &group->mcc_node, node) { + nbl_flow_del_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, NULL); + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + } + + list_for_each_entry_safe(mcc_node, mcc_node_safe, &group->mcc_head, node) { + nbl_flow_del_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, NULL); + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + } + kfree(group->vsi_bitmap); +alloc_vsi_bitmap_failed: + kfree(group); + + return NULL; +} + +static void nbl_flow_free_mcc_group(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_mcc_group *group) +{ + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_mcc_node *mcc_node, *mcc_node_safe; + + group->ref_cnt--; + if (group->ref_cnt) + return; + + list_del(&group->group_node); + list_for_each_entry_safe(mcc_node, mcc_node_safe, &group->mcc_node, node) { + nbl_flow_del_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, NULL); + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + } + + list_for_each_entry_safe(mcc_node, mcc_node_safe, &group->mcc_head, node) { + nbl_flow_del_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, NULL); + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + } + + kfree(group->vsi_bitmap); + kfree(group); +} + +static struct nbl_flow_mcc_group *nbl_find_same_mcc_group(struct nbl_flow_switch_res *res, + unsigned long *vsi_bitmap, + bool multi) +{ + struct nbl_flow_mcc_group *group = NULL; + + list_for_each_entry(group, &res->mcc_group_head, group_node) + if (group->multi == multi && + __bitmap_equal(group->vsi_bitmap, vsi_bitmap, group->nbits)) { + group->ref_cnt++; + return group; + } + + return NULL; +} + +static void nbl_flow_macvlan_node_del_action_func(void *priv, void *x_key, void *y_key, + void *data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_l2_data *rule_data = (struct nbl_flow_l2_data *)data; + int i; + + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; + nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); + } + + /* delete mcc */ + if (rule_data->mcast_flow) + nbl_flow_free_mcc_group(res_mgt, rule_data->mcc_group); +} + +static u32 nbl_flow_get_reserve_macvlan_cnt(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_flow_switch_res *res; + int i; + u32 reserve_cnt = 0; + + for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + res = &flow_mgt->switch_res[i]; + if (res->num_vfs) + reserve_cnt += (res->num_vfs - res->active_vfs) * 3; + } + + return reserve_cnt; +} + +static int nbl_flow_macvlan_node_vsi_match_func(void *condition, void *x_key, void *y_key, + void *data) +{ + u16 vsi = *(u16 *)condition; + struct nbl_flow_l2_data *rule_data = (struct nbl_flow_l2_data *)data; + + if (!rule_data->mcast_flow) + return rule_data->vsi == vsi ? 0 : -1; + else + return !test_bit(vsi - rule_data->mcc_group->vsi_base, + rule_data->mcc_group->vsi_bitmap); +} + +static void nbl_flow_macvlan_node_found_vsi_action(void *priv, void *x_key, void *y_key, + void *data) +{ + bool *match = (bool *)(priv); + + *match = 1; +} static int nbl_flow_add_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) { + struct nbl_hash_xy_tbl_scan_key scan_key; struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - struct nbl_flow_mgt *flow_mgt; - struct nbl_common_info *common; - struct nbl_flow_macvlan_node_data *rule_data; - void *mac_hash_tbl; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_switch_res *res; + struct nbl_flow_l2_data *rule_data; + struct nbl_flow_mcc_group *mcc_group = NULL, *pend_group = NULL; + unsigned long *vsi_bitmap; struct nbl_flow_param param = {0}; int i; - int ret; + int ret = 0; + int pf_id, vf_id; + u32 reserve_cnt; u16 eth_id; - u16 node_num; - - flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); - common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 vsi_base; + u16 vsi_num = 0; + u16 func_id; + bool alloc_rule = 0; + bool need_mcast = 0; + bool vsi_match = 0; + + if (nbl_flow_is_mirror_outputport(res_mgt, vsi)) + return 0; eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); - mac_hash_tbl = flow_mgt->mac_hash_tbl[eth_id]; - node_num = nbl_common_get_hash_xy_node_num(mac_hash_tbl); - if (node_num >= flow_mgt->unicast_mac_threshold) - return -ENOSPC; + res = &flow_mgt->switch_res[eth_id]; - if (nbl_common_get_hash_xy_node(mac_hash_tbl, mac, &vlan)) - return -EEXIST; + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi); + nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pf_id, &vf_id); + reserve_cnt = nbl_flow_get_reserve_macvlan_cnt(res_mgt); - rule_data = kzalloc(sizeof(*rule_data), GFP_KERNEL); - if (!rule_data) + if (flow_mgt->flow_id_cnt <= reserve_cnt && + (vf_id == U32_MAX || test_bit(vf_id, res->vf_bitmap))) + return -ENOSPC; + + vsi_bitmap = kcalloc(BITS_TO_LONGS(flow_mgt->vsi_max_per_switch), sizeof(long), GFP_KERNEL); + if (!vsi_bitmap) return -ENOMEM; + NBL_HASH_XY_TBL_SCAN_KEY_INIT(&scan_key, NBL_HASH_TBL_OP_SHOW, NBL_HASH_TBL_ALL_SCAN, + false, NULL, NULL, &vsi, + &nbl_flow_macvlan_node_vsi_match_func, &vsi_match, + &nbl_flow_macvlan_node_found_vsi_action); + param.mac = mac; param.vid = vlan; param.eth = eth_id; param.vsi = vsi; + param.mcc_id = NBL_MCC_ID_INVALID; + + vsi_base = eth_id * NBL_FLOW_LEONIS_VSI_NUM_PER_ETH; + rule_data = (struct nbl_flow_l2_data *)nbl_common_get_hash_xy_node(res->mac_hash_tbl, + mac, &vlan); + if (rule_data) { + if (rule_data->mcast_flow && + test_bit(vsi - rule_data->mcc_group->vsi_base, + rule_data->mcc_group->vsi_bitmap)) + goto success; + else if (!rule_data->mcast_flow && rule_data->vsi == vsi) + goto success; + + if (!rule_data->mcast_flow) { + vsi_num = 1; + set_bit(rule_data->vsi - vsi_base, vsi_bitmap); + } else { + vsi_num = rule_data->mcc_group->vsi_num; + bitmap_copy(vsi_bitmap, rule_data->mcc_group->vsi_bitmap, + flow_mgt->vsi_max_per_switch); + } + need_mcast = 1; - for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { - if (nbl_flow_add_flow(res_mgt, param, i, &rule_data->entry[i])) - break; + } else { + rule_data = kzalloc(sizeof(*rule_data), GFP_KERNEL); + if (!rule_data) { + ret = -ENOMEM; + goto alloc_rule_failed; + } + alloc_rule = 1; + rule_data->multi = is_multicast_ether_addr(mac); + rule_data->mcast_flow = 0; + } + + if (rule_data->multi) + need_mcast = 1; + + if (need_mcast) { + set_bit(vsi - vsi_base, vsi_bitmap); + vsi_num++; + mcc_group = nbl_find_same_mcc_group(res, vsi_bitmap, rule_data->multi); + if (!mcc_group) { + mcc_group = nbl_flow_alloc_mcc_group(res_mgt, vsi_bitmap, eth_id, + rule_data->multi, vsi_num); + if (!mcc_group) { + ret = -ENOMEM; + goto alloc_mcc_group_failed; + } + } + if (rule_data->mcast_flow) + pend_group = rule_data->mcc_group; + } else { + rule_data->vsi = vsi; } - if (i != NBL_FLOW_MACVLAN_MAX) { - while (--i + 1) + + if (!alloc_rule) { + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; + nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); - goto rule_err; + } + + if (pend_group) + nbl_flow_free_mcc_group(res_mgt, pend_group); } - rule_data->vsi = vsi; - ret = nbl_common_alloc_hash_xy_node(mac_hash_tbl, mac, &vlan, rule_data); - if (ret) - goto node_err; + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; + if (mcc_group) { + if (i <= NBL_FLOW_UP) + param.mcc_id = mcc_group->up_mcc_id; + else + param.mcc_id = mcc_group->down_mcc_id; + } + ret = nbl_flow_add_flow(res_mgt, param, i, &rule_data->entry[i]); + if (ret) + goto add_flow_failed; + } - kfree(rule_data); + if (mcc_group) { + rule_data->mcast_flow = 1; + rule_data->mcc_group = mcc_group; + } else { + rule_data->mcast_flow = 0; + rule_data->vsi = vsi; + } + + if (alloc_rule) { + ret = nbl_common_alloc_hash_xy_node(res->mac_hash_tbl, mac, &vlan, rule_data); + if (ret) + goto add_flow_failed; + } + + if (alloc_rule) + kfree(rule_data); +success: + kfree(vsi_bitmap); + + if (vf_id != U32_MAX && !test_bit(vf_id, res->vf_bitmap)) { + set_bit(vf_id, res->vf_bitmap); + res->active_vfs++; + } return 0; -node_err: - for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) +add_flow_failed: + while (--i + 1) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); -rule_err: - kfree(rule_data); - return -EFAULT; + } + if (!alloc_rule) + nbl_common_free_hash_xy_node(res->mac_hash_tbl, mac, &vlan); + if (mcc_group) + nbl_flow_free_mcc_group(res_mgt, mcc_group); +alloc_mcc_group_failed: + if (alloc_rule) + kfree(rule_data); +alloc_rule_failed: + kfree(vsi_bitmap); + + nbl_common_scan_hash_xy_node(res->mac_hash_tbl, &scan_key); + if (vf_id != U32_MAX && test_bit(vf_id, res->vf_bitmap) && !vsi_match) { + clear_bit(vf_id, res->vf_bitmap); + res->active_vfs--; + } + + return ret; } static void nbl_flow_del_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - struct nbl_flow_mgt *flow_mgt; - struct nbl_flow_macvlan_node_data *rule_data; - void *mac_hash_tbl; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_mcc_group *mcc_group = NULL, *pend_group = NULL; + unsigned long *vsi_bitmap; + struct nbl_flow_switch_res *res; + struct nbl_flow_l2_data *rule_data; + struct nbl_flow_param param = {0}; + struct nbl_hash_xy_tbl_scan_key scan_key; int i; + int ret; + int pf_id, vf_id; + u32 vsi_num; + u16 vsi_base = 0; u16 eth_id; + u16 func_id; + bool need_mcast = false; + bool add_flow = false; + bool vsi_match = 0; - flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); - mac_hash_tbl = flow_mgt->mac_hash_tbl[eth_id]; + res = &flow_mgt->switch_res[eth_id]; - rule_data = nbl_common_get_hash_xy_node(mac_hash_tbl, mac, &vlan); + rule_data = nbl_common_get_hash_xy_node(res->mac_hash_tbl, mac, &vlan); if (!rule_data) return; + if (!rule_data->mcast_flow && rule_data->vsi != vsi) + return; + else if (rule_data->mcast_flow && + !test_bit(vsi - rule_data->mcc_group->vsi_base, rule_data->mcc_group->vsi_bitmap)) + return; - if (rule_data->vsi != vsi) + vsi_bitmap = kcalloc(BITS_TO_LONGS(flow_mgt->vsi_max_per_switch), sizeof(long), GFP_KERNEL); + if (!vsi_bitmap) return; - for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi); + nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pf_id, &vf_id); + NBL_HASH_XY_TBL_SCAN_KEY_INIT(&scan_key, NBL_HASH_TBL_OP_SHOW, NBL_HASH_TBL_ALL_SCAN, + false, NULL, NULL, &vsi, + &nbl_flow_macvlan_node_vsi_match_func, &vsi_match, + &nbl_flow_macvlan_node_found_vsi_action); + + if (rule_data->mcast_flow) { + bitmap_copy(vsi_bitmap, rule_data->mcc_group->vsi_bitmap, + flow_mgt->vsi_max_per_switch); + vsi_num = rule_data->mcc_group->vsi_num; + clear_bit(vsi - rule_data->mcc_group->vsi_base, vsi_bitmap); + vsi_num--; + vsi_base = (u16)rule_data->mcc_group->vsi_base; + + if (rule_data->mcc_group->vsi_num > 1) + add_flow = true; + + if ((rule_data->multi && rule_data->mcc_group->vsi_num > 1) || + (!rule_data->multi && rule_data->mcc_group->vsi_num > 2)) + need_mcast = 1; + pend_group = rule_data->mcc_group; + } + + if (need_mcast) { + mcc_group = nbl_find_same_mcc_group(res, vsi_bitmap, rule_data->multi); + if (!mcc_group) { + mcc_group = nbl_flow_alloc_mcc_group(res_mgt, vsi_bitmap, eth_id, + rule_data->multi, vsi_num); + if (!mcc_group) + goto alloc_mcc_group_failed; + } + } + + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; + + nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); + } + + if (pend_group) + nbl_flow_free_mcc_group(res_mgt, pend_group); + + if (add_flow) { + param.mac = mac; + param.vid = vlan; + param.eth = eth_id; + param.mcc_id = NBL_MCC_ID_INVALID; + param.vsi = (u16)find_first_bit(vsi_bitmap, + flow_mgt->vsi_max_per_switch) + vsi_base; + + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; + if (mcc_group) { + if (i <= NBL_FLOW_UP) + param.mcc_id = mcc_group->up_mcc_id; + else + param.mcc_id = mcc_group->down_mcc_id; + } + ret = nbl_flow_add_flow(res_mgt, param, i, &rule_data->entry[i]); + if (ret) + goto add_flow_failed; + } + + if (mcc_group) { + rule_data->mcast_flow = 1; + rule_data->mcc_group = mcc_group; + } else { + rule_data->mcast_flow = 0; + rule_data->vsi = param.vsi; + } + } + + if (!add_flow) + nbl_common_free_hash_xy_node(res->mac_hash_tbl, mac, &vlan); + +alloc_mcc_group_failed: + kfree(vsi_bitmap); + + nbl_common_scan_hash_xy_node(res->mac_hash_tbl, &scan_key); + if (vf_id != U32_MAX && test_bit(vf_id, res->vf_bitmap) && !vsi_match) { + clear_bit(vf_id, res->vf_bitmap); + res->active_vfs--; + } + + return; + +add_flow_failed: + while (--i + 1) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); + } + if (mcc_group) + nbl_flow_free_mcc_group(res_mgt, pend_group); + nbl_common_free_hash_xy_node(res->mac_hash_tbl, mac, &vlan); + kfree(vsi_bitmap); + nbl_common_scan_hash_xy_node(res->mac_hash_tbl, &scan_key); + if (vf_id != U32_MAX && test_bit(vf_id, res->vf_bitmap) && !vsi_match) { + clear_bit(vf_id, res->vf_bitmap); + res->active_vfs--; + } - nbl_common_free_hash_xy_node(mac_hash_tbl, mac, &vlan); } static int nbl_flow_add_lag(void *priv, u16 vsi) @@ -1045,7 +1788,7 @@ static void nbl_flow_del_lag(void *priv, u16 vsi) if (rule->vsi == vsi) break; - if (list_entry_is_head(rule, &flow_mgt->lacp_list, node)) + if (nbl_list_entry_is_head(rule, &flow_mgt->lacp_list, node)) return; nbl_flow_del_flow(res_mgt, &rule->entry); @@ -1098,7 +1841,7 @@ static void nbl_flow_del_lldp(void *priv, u16 vsi) if (rule->vsi == vsi) break; - if (list_entry_is_head(rule, &flow_mgt->lldp_list, node)) + if (nbl_list_entry_is_head(rule, &flow_mgt->lldp_list, node)) return; nbl_flow_del_flow(res_mgt, &rule->entry); @@ -1107,100 +1850,197 @@ static void nbl_flow_del_lldp(void *priv, u16 vsi) kfree(rule); } -static int nbl_flow_add_multi_rule(void *priv, u16 vsi) +static int nbl_flow_change_mcc_group_chain(struct nbl_resource_mgt *res_mgt, u8 eth, + u16 current_mcc_id) +{ + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_switch_res *switch_res = &flow_mgt->switch_res[eth]; + struct nbl_flow_mcc_group *group; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 node_mcc; + + list_for_each_entry(group, &switch_res->mcc_group_head, group_node) + if (group->multi && !nbl_list_empty(&group->mcc_node)) { + node_mcc = list_last_entry(&group->mcc_node, + struct nbl_flow_mcc_node, node)->mcc_id; + phy_ops->update_mcc_next_node(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + node_mcc, current_mcc_id); + + } + + switch_res->allmulti_first_mcc = current_mcc_id; + return 0; +} + +static int nbl_flow_add_multi_mcast(void *priv, u16 vsi) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); - struct nbl_flow_multi_group *multi_group; - struct nbl_flow_mcc_index_key index_key = {0}; - u16 mcc_id; + struct nbl_flow_switch_res *switch_res; + struct nbl_flow_mcc_node *node; + int ret; + u16 current_mcc_id; u8 eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); - NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_VSI, vsi); - mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, sizeof(index_key)); + switch_res = &flow_mgt->switch_res[eth]; + list_for_each_entry(node, &switch_res->allmulti_list, node) + if (node->data == vsi && node->type == NBL_MCC_INDEX_VSI) + return 0; + + node = nbl_flow_alloc_mcc_node(flow_mgt, NBL_MCC_INDEX_VSI, vsi, 0); + if (!node) + return -ENOSPC; + + switch_res = &flow_mgt->switch_res[eth]; + ret = nbl_flow_add_mcc_node(res_mgt, node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); + if (ret) { + nbl_flow_free_mcc_node(flow_mgt, node); + return ret; + } - multi_group = &flow_mgt->multi_flow[eth]; + if (nbl_list_empty(&switch_res->allmulti_list)) + current_mcc_id = NBL_MCC_ID_INVALID; + else + current_mcc_id = list_first_entry(&switch_res->allmulti_list, + struct nbl_flow_mcc_node, node)->mcc_id; - return nbl_flow_add_mcc_node(multi_group, res_mgt, -1, vsi, mcc_id); + if (current_mcc_id != switch_res->allmulti_first_mcc) + nbl_flow_change_mcc_group_chain(res_mgt, eth, current_mcc_id); + + return 0; } -static void nbl_flow_del_multi_rule(void *priv, u16 vsi) +static void nbl_flow_del_multi_mcast(void *priv, u16 vsi) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); - struct nbl_flow_multi_group *multi_group; + struct nbl_flow_switch_res *switch_res; struct nbl_flow_mcc_node *mcc_node; - struct nbl_flow_mcc_index_key index_key = {0}; + u16 current_mcc_id; u8 eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); - u16 mcc_id; - NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_VSI, vsi); - mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, sizeof(index_key)); - nbl_common_free_index(flow_mgt->mcc_tbl_priv, &index_key, sizeof(index_key)); + switch_res = &flow_mgt->switch_res[eth]; + list_for_each_entry(mcc_node, &switch_res->allmulti_list, node) + if (mcc_node->data == vsi && mcc_node->type == NBL_MCC_INDEX_VSI) { + nbl_flow_del_mcc_node(res_mgt, mcc_node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + break; + } - multi_group = &flow_mgt->multi_flow[eth]; + if (nbl_list_empty(&switch_res->allmulti_list)) + current_mcc_id = NBL_MCC_ID_INVALID; + else + current_mcc_id = list_first_entry(&switch_res->allmulti_list, + struct nbl_flow_mcc_node, node)->mcc_id; - list_for_each_entry(mcc_node, &multi_group->mcc_list, node) - if (mcc_node->mcc_id == mcc_id) { - nbl_flow_del_mcc_node(multi_group, res_mgt, mcc_node); - return; - } + if (current_mcc_id != switch_res->allmulti_first_mcc) + nbl_flow_change_mcc_group_chain(res_mgt, eth, current_mcc_id); } static int nbl_flow_add_multi_group(struct nbl_resource_mgt *res_mgt, u8 eth) { struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); - struct nbl_flow_multi_group *multi_group; - struct nbl_flow_mcc_index_key index_key = {0}; - struct nbl_flow_param param = {0}; + struct nbl_flow_switch_res *switch_res = &flow_mgt->switch_res[eth]; + struct nbl_flow_param param_up = {0}; + struct nbl_flow_mcc_node *up_node; + struct nbl_flow_param param_down = {0}; + struct nbl_flow_mcc_node *down_node; int i, ret; - NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_ETH, eth); - param.mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, sizeof(index_key)); - param.eth = eth; + down_node = nbl_flow_alloc_mcc_node(flow_mgt, NBL_MCC_INDEX_ETH, eth, 1); + if (!down_node) + return -ENOSPC; + + ret = nbl_flow_add_mcc_node(res_mgt, down_node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); + if (ret) + goto add_eth_mcc_node_failed; - multi_group = &flow_mgt->multi_flow[eth]; - for (i = NBL_FLOW_MACVLAN_MAX; i < NBL_FLOW_TYPE_MAX; i++) { - ret = nbl_flow_add_flow(res_mgt, param, i, - &multi_group->entry[i - NBL_FLOW_MACVLAN_MAX]); + param_down.mcc_id = down_node->mcc_id; + param_down.eth = eth; + for (i = 0; i < NBL_FLOW_DOWN_MULTI_MCAST_END - NBL_FLOW_L2_DOWN_MULTI_MCAST; i++) { + ret = nbl_flow_add_flow(res_mgt, param_down, i + NBL_FLOW_L2_DOWN_MULTI_MCAST, + &switch_res->allmulti_down[i]); if (ret) - goto add_macvlan_fail; + goto add_down_flow_failed; } - ret = nbl_flow_add_mcc_node(multi_group, res_mgt, eth, -1, param.mcc_id); + up_node = nbl_flow_alloc_mcc_node(flow_mgt, NBL_MCC_INDEX_BMC, NBL_FLOW_MCC_BMC_DPORT, 1); + if (!up_node) { + ret = -ENOSPC; + goto alloc_bmc_node_failed; + } + + ret = nbl_flow_add_mcc_node(res_mgt, up_node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); if (ret) - goto add_mcc_fail; + goto add_bmc_mcc_node_failed; + + param_up.mcc_id = up_node->mcc_id; + param_up.eth = eth; + for (i = 0; i < NBL_FLOW_UP_MULTI_MCAST_END - NBL_FLOW_L2_UP_MULTI_MCAST; i++) { + ret = nbl_flow_add_flow(res_mgt, param_up, i + NBL_FLOW_L2_UP_MULTI_MCAST, + &switch_res->allmulti_up[i]); + if (ret) + goto add_up_flow_failed; + } - multi_group->ether_id = eth; - multi_group->mcc_id = param.mcc_id; + switch_res->ether_id = eth; + switch_res->allmulti_first_mcc = NBL_MCC_ID_INVALID; + switch_res->vld = 1; return 0; -add_mcc_fail: -add_macvlan_fail: - while (--i >= NBL_FLOW_MACVLAN_MAX) - nbl_flow_del_flow(res_mgt, &multi_group->entry[i - NBL_FLOW_MACVLAN_MAX]); +add_up_flow_failed: + while (--i >= 0) + nbl_flow_del_flow(res_mgt, &switch_res->allmulti_up[i]); + nbl_flow_del_mcc_node(res_mgt, up_node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); +add_bmc_mcc_node_failed: + nbl_flow_free_mcc_node(flow_mgt, up_node); +alloc_bmc_node_failed: +add_down_flow_failed: + while (--i >= 0) + nbl_flow_del_flow(res_mgt, &switch_res->allmulti_down[i]); + nbl_flow_del_mcc_node(res_mgt, down_node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); +add_eth_mcc_node_failed: + nbl_flow_free_mcc_node(flow_mgt, down_node); return ret; } static void nbl_flow_del_multi_group(struct nbl_resource_mgt *res_mgt, u8 eth) { struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); - struct nbl_flow_multi_group *multi_group = &flow_mgt->multi_flow[eth]; + struct nbl_flow_switch_res *switch_res = &flow_mgt->switch_res[eth]; struct nbl_flow_mcc_node *mcc_node, *mcc_node_safe; - int i; - if (!multi_group->mcc_id) + if (!switch_res->vld) return; - for (i = NBL_FLOW_MACVLAN_MAX; i < NBL_FLOW_TYPE_MAX; i++) - nbl_flow_del_flow(res_mgt, &multi_group->entry[i - NBL_FLOW_MACVLAN_MAX]); + nbl_flow_del_flow(res_mgt, &switch_res->allmulti_up[0]); + nbl_flow_del_flow(res_mgt, &switch_res->allmulti_up[1]); + nbl_flow_del_flow(res_mgt, &switch_res->allmulti_down[0]); + nbl_flow_del_flow(res_mgt, &switch_res->allmulti_down[1]); - list_for_each_entry_safe(mcc_node, mcc_node_safe, &multi_group->mcc_list, node) - nbl_flow_del_mcc_node(multi_group, res_mgt, mcc_node); + list_for_each_entry_safe(mcc_node, mcc_node_safe, &switch_res->allmulti_list, node) { + nbl_flow_del_mcc_node(res_mgt, mcc_node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + } + + list_for_each_entry_safe(mcc_node, mcc_node_safe, &switch_res->allmulti_head, node) { + nbl_flow_del_mcc_node(res_mgt, mcc_node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + } - memset(multi_group, 0, sizeof(*multi_group)); - INIT_LIST_HEAD(&multi_group->mcc_list); + INIT_LIST_HEAD(&switch_res->allmulti_list); + INIT_LIST_HEAD(&switch_res->allmulti_head); + switch_res->vld = 0; + switch_res->allmulti_first_mcc = NBL_MCC_ID_INVALID; } static void nbl_flow_remove_multi_group(void *priv) @@ -1232,13 +2072,56 @@ static int nbl_flow_setup_multi_group(void *priv) return ret; } -static int nbl_flow_macvlan_node_vsi_match_func(void *condition, void *x_key, void *y_key, - void *data) +static int nbl_res_flow_cfg_duppkt_mcc(void *priv, struct nbl_lag_member_list_param *param) { - u16 vsi = *(u16 *)condition; - struct nbl_flow_macvlan_node_data *rule_data = (struct nbl_flow_macvlan_node_data *)data; + return 0; +} + +static void nbl_flow_clear_accel_flow(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_dipsec_rule *dipsec_rule, *dipsec_rule_safe; + struct nbl_flow_ul4s_rule *ul4s_rule, *ul4s_rule_safe; + + list_for_each_entry_safe(dipsec_rule, dipsec_rule_safe, &flow_mgt->dprbac_head, node) + if (dipsec_rule->vsi == vsi_id) { + nbl_flow_del_flow(res_mgt, &dipsec_rule->dipsec_entry); + list_del(&dipsec_rule->node); + kfree(dipsec_rule); + } - return rule_data->vsi == vsi ? 0 : -1; + list_for_each_entry_safe(ul4s_rule, ul4s_rule_safe, &flow_mgt->ul4s_head, node) + if (ul4s_rule->vsi == vsi_id) { + nbl_flow_del_flow(res_mgt, &ul4s_rule->ul4s_entry); + list_del(&ul4s_rule->node); + kfree(ul4s_rule); + } +} + +static u16 nbl_vsi_mtu_index(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 index; + + index = phy_ops->get_mtu_index(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id); + return index - 1; +} + +static void nbl_clear_mtu_entry(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 mtu_index; + + mtu_index = nbl_vsi_mtu_index(res_mgt, vsi_id); + if (mtu_index < NBL_MAX_MTU) { + res_mgt->resource_info->mtu_list[mtu_index].ref_count--; + phy_ops->set_vsi_mtu(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, 0); + if (res_mgt->resource_info->mtu_list[mtu_index].ref_count == 0) { + phy_ops->set_mtu(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mtu_index + 1, 0); + res_mgt->resource_info->mtu_list[mtu_index].mtu_value = 0; + } + } } static void nbl_flow_clear_flow(void *priv, u16 vsi_id) @@ -1250,25 +2133,29 @@ static void nbl_flow_clear_flow(void *priv, u16 vsi_id) u8 eth_id; eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi_id); - mac_hash_tbl = flow_mgt->mac_hash_tbl[eth_id]; + mac_hash_tbl = flow_mgt->switch_res[eth_id].mac_hash_tbl; + nbl_clear_mtu_entry(res_mgt, vsi_id); NBL_HASH_XY_TBL_SCAN_KEY_INIT(&scan_key, NBL_HASH_TBL_OP_DELETE, NBL_HASH_TBL_ALL_SCAN, false, NULL, NULL, &vsi_id, &nbl_flow_macvlan_node_vsi_match_func, res_mgt, &nbl_flow_macvlan_node_del_action_func); nbl_common_scan_hash_xy_node(mac_hash_tbl, &scan_key); - - nbl_flow_del_multi_rule(res_mgt, vsi_id); + nbl_flow_del_multi_mcast(res_mgt, vsi_id); } char templete_name[NBL_FLOW_TYPE_MAX][16] = { "up_tnl", "up", "down", - "l2_mc_up", - "l2_mc_down", - "l3_mc_up", - "l3_mc_down" + "lldp/lacp", + "pmd_nd_upcall", + "l2_mul_up", + "l3_mul_up", + "l2_mul_down", + "l3_mul_down", + "tls_up", + "ipsec_down", }; static void nbl_flow_id_dump(struct seq_file *m, struct nbl_flow_fem_entry *entry, char *title) @@ -1278,19 +2165,49 @@ static void nbl_flow_id_dump(struct seq_file *m, struct nbl_flow_fem_entry *entr entry->hash_table, entry->hash_bucket); } +static void nbl_flow_mcc_node_dump(struct seq_file *m, struct nbl_flow_mcc_node *node) +{ + seq_printf(m, " head: %u, type: %u, id: %u, data: %u; ", node->mcc_head, + node->type, node->mcc_id, node->data); +} + +static void nbl_flow_mcc_group_dump(struct seq_file *m, struct nbl_flow_mcc_group *group) +{ + struct nbl_flow_mcc_node *mcc_node; + + seq_printf(m, "vsi_base: %u, nbits: %u, vsi_number: %u, ref_cnt %u, multi %u, up_mcc_id %u, down_mcc_id %u\n", + group->vsi_base, group->nbits, group->vsi_num, group->ref_cnt, group->multi, + group->up_mcc_id, group->down_mcc_id); + seq_puts(m, "mcc head list\n"); + list_for_each_entry(mcc_node, &group->mcc_head, node) + nbl_flow_mcc_node_dump(m, mcc_node); + seq_puts(m, "\nmcc body list\n"); + list_for_each_entry(mcc_node, &group->mcc_node, node) + nbl_flow_mcc_node_dump(m, mcc_node); + seq_puts(m, "\n"); +} + static void nbl_flow_macvlan_node_show_action_func(void *priv, void *x_key, void *y_key, void *data) { struct seq_file *m = (struct seq_file *)priv; u8 *mac = (u8 *)x_key; u16 vlan = *(u16 *)y_key; - struct nbl_flow_macvlan_node_data *rule_data = (struct nbl_flow_macvlan_node_data *)data; + struct nbl_flow_l2_data *rule_data = (struct nbl_flow_l2_data *)data; int i; - seq_printf(m, "\nvsi %d, vlan %d MAC address %02X:%02X:%02X:%02X:%02X:%02X\n", - rule_data->vsi, vlan, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); - for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) + seq_printf(m, "\nvlan %d MAC address %02X:%02X:%02X:%02X:%02X:%02X, multi %u, mcast %u\n", + vlan, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], rule_data->multi, + rule_data->mcast_flow); + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; nbl_flow_id_dump(m, &rule_data->entry[i], templete_name[i]); + } + if (!rule_data->mcast_flow) + seq_printf(m, "rule action to vsi %u\n", rule_data->vsi); + else + nbl_flow_mcc_group_dump(m, rule_data->mcc_group); } static void nbl_flow_dump_flow(void *priv, struct seq_file *m) @@ -1298,28 +2215,45 @@ static void nbl_flow_dump_flow(void *priv, struct seq_file *m) struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); - struct nbl_flow_multi_group *multi_group; + struct nbl_flow_switch_res *switch_res; + struct nbl_flow_mcc_node *mcc_node; struct nbl_flow_lldp_rule *lldp_rule; struct nbl_flow_lacp_rule *lacp_rule; + struct nbl_flow_fem_entry *entry; struct nbl_hash_xy_tbl_scan_key scan_key; int i, j; - for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { - multi_group = &flow_mgt->multi_flow[i]; - seq_printf(m, "\nether_id %d, mcc_id %d, status %u\n" + !i, - multi_group->ether_id, multi_group->mcc_id, multi_group->network_status); - for (j = NBL_FLOW_MACVLAN_MAX; j < NBL_FLOW_TYPE_MAX; j++) - nbl_flow_id_dump(m, &multi_group->entry[j - NBL_FLOW_MACVLAN_MAX], - templete_name[j]); - } - NBL_HASH_XY_TBL_SCAN_KEY_INIT(&scan_key, NBL_HASH_TBL_OP_SHOW, NBL_HASH_TBL_ALL_SCAN, false, NULL, NULL, NULL, NULL, m, &nbl_flow_macvlan_node_show_action_func); - for (i = 0; i < NBL_MAX_ETHERNET; i++) - nbl_common_scan_hash_xy_node(flow_mgt->mac_hash_tbl[i], &scan_key); - seq_puts(m, "\n"); + seq_printf(m, "\n flow_mgt flow_id_cnt %u, pp_tcam_count %u, accel_flow_count %u, vsi_max_per_switch %u.\n", + flow_mgt->flow_id_cnt, flow_mgt->pp_tcam_count, + flow_mgt->accel_flow_count, flow_mgt->vsi_max_per_switch); + for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + switch_res = &flow_mgt->switch_res[i]; + seq_printf(m, "\nether_id %d, status %u\n", + switch_res->ether_id, switch_res->network_status); + entry = &switch_res->allmulti_up[0]; + for (j = NBL_FLOW_L2_UP_MULTI_MCAST; j < NBL_FLOW_UP_MULTI_MCAST_END; j++) + nbl_flow_id_dump(m, &entry[j - NBL_FLOW_L2_UP_MULTI_MCAST], + templete_name[j]); + entry = &switch_res->allmulti_down[0]; + for (j = NBL_FLOW_L2_DOWN_MULTI_MCAST; j < NBL_FLOW_DOWN_MULTI_MCAST_END; j++) + nbl_flow_id_dump(m, &entry[j - NBL_FLOW_L2_DOWN_MULTI_MCAST], + templete_name[j]); + seq_printf(m, "\nether_id %d, mcc head list\n", switch_res->ether_id); + list_for_each_entry(mcc_node, &switch_res->allmulti_head, node) + nbl_flow_mcc_node_dump(m, mcc_node); + seq_printf(m, "\n\nether_id %d, mcc body list\n", switch_res->ether_id); + list_for_each_entry(mcc_node, &switch_res->allmulti_list, node) + nbl_flow_mcc_node_dump(m, mcc_node); + seq_printf(m, "\nnumber vf %u, active vf %u, vf bitmap: %*pb\n", + switch_res->num_vfs, switch_res->active_vfs, + switch_res->num_vfs, switch_res->vf_bitmap); + nbl_common_scan_hash_xy_node(switch_res->mac_hash_tbl, &scan_key); + seq_puts(m, "\n"); + } list_for_each_entry(lldp_rule, &flow_mgt->lldp_list, node) seq_printf(m, "LLDP rule: vsi %d\n", lldp_rule->vsi); @@ -1329,6 +2263,328 @@ static void nbl_flow_dump_flow(void *priv, struct seq_file *m) seq_printf(m, "LACP rule: vsi %d\n", lacp_rule->vsi); } +static int nbl_flow_add_ktls_rx_flow(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_common_info *common; + struct nbl_flow_ul4s_rule *rule; + struct nbl_flow_param param = {0}; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + + list_for_each_entry(rule, &flow_mgt->ul4s_head, node) + if (rule->index == index) + return -EEXIST; + + rule = kzalloc(sizeof(*rule), GFP_ATOMIC); + if (!rule) + return -ENOMEM; + + param.index = index; + param.data = data; + param.vsi = vsi; + param.eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + if (data[0] == AF_INET6) + param.type = NBL_KT_FULL_MODE; + if (nbl_flow_add_flow(res_mgt, param, NBL_FLOW_TLS_UP, &rule->ul4s_entry)) { + kfree(rule); + return -EFAULT; + } + + rule->index = index; + rule->vsi = vsi; + list_add_tail(&rule->node, &flow_mgt->ul4s_head); + return 0; +} + +static void nbl_flow_del_ktls_rx_flow(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_flow_ul4s_rule *rule; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + + list_for_each_entry(rule, &flow_mgt->ul4s_head, node) + if (rule->index == index) + break; + + if (nbl_list_entry_is_head(rule, &flow_mgt->ul4s_head, node)) + return; + + nbl_flow_del_flow(res_mgt, &rule->ul4s_entry); + list_del(&rule->node); + kfree(rule); +} + +static int nbl_flow_add_ipsec_tx_flow(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_common_info *common; + struct nbl_flow_dipsec_rule *rule; + struct nbl_flow_param param = {0}; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + + list_for_each_entry(rule, &flow_mgt->dprbac_head, node) + if (rule->index == index) + return -EEXIST; + + rule = kzalloc(sizeof(*rule), GFP_ATOMIC); + if (!rule) + return -ENOMEM; + + param.index = index; + param.data = data; + param.vsi = vsi; + param.eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + if (data[0] == AF_INET6) + param.type = NBL_KT_FULL_MODE; + if (nbl_flow_add_flow(res_mgt, param, NBL_FLOW_IPSEC_DOWN, &rule->dipsec_entry)) { + kfree(rule); + return -EFAULT; + } + + rule->index = index; + rule->vsi = vsi; + list_add_tail(&rule->node, &flow_mgt->dprbac_head); + return 0; +} + +static void nbl_flow_del_ipsec_tx_flow(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_flow_dipsec_rule *rule; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + + list_for_each_entry(rule, &flow_mgt->dprbac_head, node) + if (rule->index == index) + break; + + if (nbl_list_entry_is_head(rule, &flow_mgt->dprbac_head, node)) + return; + + nbl_flow_del_flow(res_mgt, &rule->dipsec_entry); + list_del(&rule->node); + kfree(rule); +} + +static void nbl_res_flr_clear_accel_flow(void *priv, u16 vf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = vf_id + NBL_MAX_PF; + u16 vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_VF_DATA_TYPE); + + if (nbl_res_vf_is_active(priv, func_id)) + nbl_flow_clear_accel_flow(priv, vsi_id); +} + +static void nbl_res_flr_clear_flow(void *priv, u16 vf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = vf_id + NBL_MAX_PF; + u16 vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_VF_DATA_TYPE); + + if (nbl_res_vf_is_active(priv, func_id)) + nbl_flow_clear_flow(priv, vsi_id); +} + +static void nbl_res_flow_del_nd_upcall_flow(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_nd_upcall_rule *rule = NULL; + int i; + + info->nd_upcall_refnt--; + if (info->nd_upcall_refnt > 0) { + nbl_info(common, NBL_DEBUG_FLOW, "nd upcall flow reference count %d", + info->nd_upcall_refnt); + return; + } + + rule = list_entry(flow_mgt->nd_upcall_list.next, struct nbl_flow_nd_upcall_rule, node); + if (nbl_list_entry_is_head(rule, &flow_mgt->nd_upcall_list, node)) + return; + + for (i = 0; i < NBL_FLOW_PMD_ND_UPCALL_FLOW_NUM; i++) + nbl_flow_del_flow(res_mgt, &rule->entry[i]); + + list_del(&rule->node); + kfree(rule); + nbl_info(common, NBL_DEBUG_FLOW, "deleting all flows for nd upcall"); +} + +static int nbl_res_flow_add_nd_upcall_flow(void *priv, u16 vsi, bool for_pmd) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_flow_nd_upcall_rule *rule; + struct nbl_flow_param param = {0}; + + /* TC case: use refcount to track adding flow */ + if (info->nd_upcall_refnt && !for_pmd) { + info->nd_upcall_refnt++; + nbl_info(common, NBL_DEBUG_FLOW, "tc: nd upcall flow reference count %d", + info->nd_upcall_refnt); + return 0; + } + + /* PMD case: if nd flows exist, simply delete them and add flow again */ + if (info->nd_upcall_refnt && for_pmd) { + nbl_info(common, NBL_DEBUG_FLOW, "pmd active: nd upcall flow will be reset"); + nbl_res_flow_del_nd_upcall_flow(priv); + } + + nbl_info(common, NBL_DEBUG_FLOW, "adding all flows for nd upcall"); + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + param.vsi = vsi; + param.for_pmd = for_pmd; + param.ether_type = ETH_P_IPV6; + param.priv_data = NBL_DUPPKT_PTYPE_NA; + if (nbl_flow_add_flow(res_mgt, param, NBL_FLOW_PMD_ND_UPCALL, + &rule->entry[NBL_FLOW_PMD_ND_UPCALL_NA])) { + nbl_err(common, NBL_DEBUG_FLOW, "Fail to add icmpv6 na flow for vsi %d", vsi); + kfree(rule); + return -EFAULT; + } + + param.priv_data = NBL_DUPPKT_PTYPE_NS; + if (nbl_flow_add_flow(res_mgt, param, NBL_FLOW_PMD_ND_UPCALL, + &rule->entry[NBL_FLOW_PMD_ND_UPCALL_NS])) { + nbl_flow_del_flow(res_mgt, &rule->entry[NBL_FLOW_PMD_ND_UPCALL_NA]); + nbl_err(common, NBL_DEBUG_FLOW, "Fail to add icmpv6 ns flow for vsi %d", vsi); + kfree(rule); + return -EFAULT; + } + + info->nd_upcall_refnt++; + list_add_tail(&rule->node, &flow_mgt->nd_upcall_list); + return 0; +} + +static int nbl_res_flow_check_flow_table_spec(void *priv, u16 vlan_cnt, + u16 unicast_cnt, u16 multicast_cnt) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + u32 reserve_cnt = nbl_flow_get_reserve_macvlan_cnt(res_mgt); + u32 need = vlan_cnt * (3 * unicast_cnt + 2 * multicast_cnt); + + if (reserve_cnt + need > flow_mgt->flow_id_cnt) + return -ENOSPC; + + return 0; +} + +static int nbl_res_set_mtu(void *priv, u16 vsi_id, u16 mtu) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_mtu_entry *mtu_list = &res_mgt->resource_info->mtu_list[0]; + int i, found_idx = -1, first_zero_idx = -1; + u16 real_mtu = mtu + ETH_HLEN + 2 * VLAN_HLEN; + + nbl_clear_mtu_entry(res_mgt, vsi_id); + if (mtu == 0) + return 0; + + for (i = 0; i < NBL_MAX_MTU; i++) { + if (mtu_list[i].mtu_value == real_mtu) { + found_idx = i; + break; + } + + if (!mtu_list[i].mtu_value) + first_zero_idx = i; + } + + if (first_zero_idx == -1 && found_idx == -1) + return 0; + + if (found_idx != -1) { + mtu_list[found_idx].ref_count++; + phy_ops->set_vsi_mtu(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, found_idx + 1); + return 0; + } + + if (first_zero_idx != -1) { + mtu_list[first_zero_idx].ref_count++; + mtu_list[first_zero_idx].mtu_value = real_mtu; + phy_ops->set_vsi_mtu(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, first_zero_idx + 1); + phy_ops->set_mtu(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), first_zero_idx + 1, real_mtu); + } + + return 0; +} + +static int nbl_flow_handle_mirror_outputport_event(u16 type, void *event_data, void *callback_data) +{ + int i; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)callback_data; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_event_mirror_outputport_data *mirror_outputport = + (struct nbl_event_mirror_outputport_data *)event_data; + + if (mirror_outputport->opcode) { + for (i = 0; i < NBL_MIRROR_OUTPUTPORT_MAX_FUNC; i++) { + if (flow_mgt->mirror_outputport_func[i] == mirror_outputport->func_id) + return 0; + } + for (i = 0; i < NBL_MIRROR_OUTPUTPORT_MAX_FUNC; i++) { + if (flow_mgt->mirror_outputport_func[i] == U16_MAX) { + flow_mgt->mirror_outputport_func[i] = mirror_outputport->func_id; + break; + } + + if (i >= NBL_MIRROR_OUTPUTPORT_MAX_FUNC) + nbl_err(common, NBL_DEBUG_FLOW, "Macvlan blacklist exceed max func:%d", + mirror_outputport->func_id); + } + } else { + for (i = 0; i < NBL_MIRROR_OUTPUTPORT_MAX_FUNC; i++) { + if (flow_mgt->mirror_outputport_func[i] == mirror_outputport->func_id) { + flow_mgt->mirror_outputport_func[i] = U16_MAX; + break; + } + } + } + + return 0; +} + +static void nbl_flow_cfg_mirror_outputport_event(void *priv, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_event_callback event_callback = {0}; + + event_callback.callback_data = res_mgt; + if (enable) { + event_callback.callback = nbl_flow_handle_mirror_outputport_event; + nbl_event_register(NBL_EVENT_MIRROR_OUTPUTPORT, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } else { + event_callback.callback = nbl_flow_handle_mirror_outputport_event; + nbl_event_unregister(NBL_EVENT_MIRROR_OUTPUTPORT, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } +} + /* NBL_FLOW_SET_OPS(ops_name, func) * * Use X Macros to reduce setup and remove codes. @@ -1341,12 +2597,25 @@ do { \ NBL_FLOW_SET_OPS(del_lag_flow, nbl_flow_del_lag); \ NBL_FLOW_SET_OPS(add_lldp_flow, nbl_flow_add_lldp); \ NBL_FLOW_SET_OPS(del_lldp_flow, nbl_flow_del_lldp); \ - NBL_FLOW_SET_OPS(add_multi_rule, nbl_flow_add_multi_rule); \ - NBL_FLOW_SET_OPS(del_multi_rule, nbl_flow_del_multi_rule); \ + NBL_FLOW_SET_OPS(add_multi_mcast, nbl_flow_add_multi_mcast); \ + NBL_FLOW_SET_OPS(del_multi_mcast, nbl_flow_del_multi_mcast); \ NBL_FLOW_SET_OPS(setup_multi_group, nbl_flow_setup_multi_group); \ NBL_FLOW_SET_OPS(remove_multi_group, nbl_flow_remove_multi_group); \ + NBL_FLOW_SET_OPS(clear_accel_flow, nbl_flow_clear_accel_flow); \ NBL_FLOW_SET_OPS(clear_flow, nbl_flow_clear_flow); \ NBL_FLOW_SET_OPS(dump_flow, nbl_flow_dump_flow); \ + NBL_FLOW_SET_OPS(add_ktls_rx_flow, nbl_flow_add_ktls_rx_flow); \ + NBL_FLOW_SET_OPS(del_ktls_rx_flow, nbl_flow_del_ktls_rx_flow); \ + NBL_FLOW_SET_OPS(add_ipsec_tx_flow, nbl_flow_add_ipsec_tx_flow); \ + NBL_FLOW_SET_OPS(del_ipsec_tx_flow, nbl_flow_del_ipsec_tx_flow); \ + NBL_FLOW_SET_OPS(flr_clear_accel_flow, nbl_res_flr_clear_accel_flow); \ + NBL_FLOW_SET_OPS(flr_clear_flows, nbl_res_flr_clear_flow); \ + NBL_FLOW_SET_OPS(cfg_duppkt_mcc, nbl_res_flow_cfg_duppkt_mcc); \ + NBL_FLOW_SET_OPS(add_nd_upcall_flow, nbl_res_flow_add_nd_upcall_flow); \ + NBL_FLOW_SET_OPS(del_nd_upcall_flow, nbl_res_flow_del_nd_upcall_flow); \ + NBL_FLOW_SET_OPS(set_mtu, nbl_res_set_mtu); \ + NBL_FLOW_SET_OPS(cfg_mirror_outputport_event, nbl_flow_cfg_mirror_outputport_event); \ + NBL_FLOW_SET_OPS(check_flow_table_spec, nbl_res_flow_check_flow_table_spec); \ } while (0) static void nbl_flow_remove_mgt(struct device *dev, struct nbl_resource_mgt *res_mgt) @@ -1355,59 +2624,91 @@ static void nbl_flow_remove_mgt(struct device *dev, struct nbl_resource_mgt *res int i; struct nbl_hash_xy_tbl_del_key del_key; - nbl_common_remove_index_table(flow_mgt->mcc_tbl_priv); - NBL_HASH_XY_TBL_DEL_KEY_INIT(&del_key, res_mgt, &nbl_flow_macvlan_node_del_action_func); - for (i = 0; i < NBL_MAX_ETHERNET; i++) - nbl_common_remove_hash_xy_table(flow_mgt->mac_hash_tbl[i], &del_key); + for (i = 0; i < NBL_MAX_ETHERNET; i++) { + nbl_common_remove_hash_xy_table(flow_mgt->switch_res[i].mac_hash_tbl, &del_key); + if (flow_mgt->switch_res[i].vf_bitmap) + devm_kfree(dev, flow_mgt->switch_res[i].vf_bitmap); + } + if (flow_mgt->flow_id_bitmap) + devm_kfree(dev, flow_mgt->flow_id_bitmap); + if (flow_mgt->mcc_id_bitmap) + devm_kfree(dev, flow_mgt->mcc_id_bitmap); + flow_mgt->flow_id_cnt = 0; devm_kfree(dev, flow_mgt); NBL_RES_MGT_TO_FLOW_MGT(res_mgt) = NULL; } static int nbl_flow_setup_mgt(struct device *dev, struct nbl_resource_mgt *res_mgt) { - struct nbl_index_tbl_key mcc_tbl_key; struct nbl_hash_xy_tbl_key macvlan_tbl_key; struct nbl_flow_mgt *flow_mgt; struct nbl_eth_info *eth_info; int i; + int vf_num = -1; + u16 pf_id; flow_mgt = devm_kzalloc(dev, sizeof(struct nbl_flow_mgt), GFP_KERNEL); if (!flow_mgt) return -ENOMEM; NBL_RES_MGT_TO_FLOW_MGT(res_mgt) = flow_mgt; + eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + + flow_mgt->flow_id_bitmap = devm_kcalloc(dev, BITS_TO_LONGS(NBL_MACVLAN_TABLE_LEN), + sizeof(long), GFP_KERNEL); + if (!flow_mgt->flow_id_bitmap) + goto setup_mgt_failed; + flow_mgt->flow_id_cnt = NBL_MACVLAN_TABLE_LEN; - NBL_INDEX_TBL_KEY_INIT(&mcc_tbl_key, dev, NBL_FLOW_MCC_INDEX_START, - NBL_FLOW_MCC_INDEX_SIZE, sizeof(struct nbl_flow_mcc_index_key)); - flow_mgt->mcc_tbl_priv = nbl_common_init_index_table(&mcc_tbl_key); - if (!flow_mgt->mcc_tbl_priv) - goto alloc_mcc_tbl_failed; + flow_mgt->mcc_id_bitmap = devm_kcalloc(dev, BITS_TO_LONGS(NBL_FLOW_MCC_INDEX_SIZE), + sizeof(long), GFP_KERNEL); + if (!flow_mgt->mcc_id_bitmap) + goto setup_mgt_failed; NBL_HASH_XY_TBL_KEY_INIT(&macvlan_tbl_key, dev, ETH_ALEN, sizeof(u16), - sizeof(struct nbl_flow_macvlan_node_data), + sizeof(struct nbl_flow_l2_data), NBL_MACVLAN_TBL_BUCKET_SIZE, NBL_MACVLAN_X_AXIS_BUCKET_SIZE, NBL_MACVLAN_Y_AXIS_BUCKET_SIZE, false); for (i = 0; i < NBL_MAX_ETHERNET; i++) { - (flow_mgt)->mac_hash_tbl[i] = nbl_common_init_hash_xy_table(&macvlan_tbl_key); - if (!flow_mgt->mac_hash_tbl[i]) - goto alloc_machash_fail; + INIT_LIST_HEAD(&flow_mgt->switch_res[i].allmulti_head); + INIT_LIST_HEAD(&flow_mgt->switch_res[i].allmulti_list); + INIT_LIST_HEAD(&flow_mgt->switch_res[i].mcc_group_head); + + flow_mgt->switch_res[i].mac_hash_tbl = + nbl_common_init_hash_xy_table(&macvlan_tbl_key); + if (!flow_mgt->switch_res[i].mac_hash_tbl) + goto setup_mgt_failed; + pf_id = find_first_bit((unsigned long *)ð_info->pf_bitmap[i], 8); + if (pf_id != 8) + vf_num = nbl_res_get_pf_vf_num(res_mgt, pf_id); + + if (vf_num != -1) { + flow_mgt->switch_res[i].num_vfs = vf_num; + flow_mgt->switch_res[i].vf_bitmap = devm_kcalloc(dev, BITS_TO_LONGS(vf_num), + sizeof(long), GFP_KERNEL); + if (!flow_mgt->switch_res[i].vf_bitmap) + goto setup_mgt_failed; + } else { + flow_mgt->switch_res[i].num_vfs = 0; + flow_mgt->switch_res[i].vf_bitmap = NULL; + } + flow_mgt->switch_res[i].active_vfs = 0; } - for (i = 0; i < NBL_MAX_ETHERNET; i++) - INIT_LIST_HEAD(&flow_mgt->multi_flow[i].mcc_list); - + memset(flow_mgt->mirror_outputport_func, 0xff, sizeof(flow_mgt->mirror_outputport_func)); INIT_LIST_HEAD(&flow_mgt->lldp_list); INIT_LIST_HEAD(&flow_mgt->lacp_list); + INIT_LIST_HEAD(&flow_mgt->ul4s_head); + INIT_LIST_HEAD(&flow_mgt->dprbac_head); + INIT_LIST_HEAD(&flow_mgt->nd_upcall_list); - eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); - flow_mgt->unicast_mac_threshold = NBL_TOTAL_MACVLAN_NUM / eth_info->eth_num; + flow_mgt->vsi_max_per_switch = NBL_VSI_MAX_ID / eth_info->eth_num; return 0; -alloc_machash_fail: -alloc_mcc_tbl_failed: +setup_mgt_failed: nbl_flow_remove_mgt(dev, res_mgt); return -1; } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h index 68c8ce7a0fef16173e4e4e470bef6b98e64edd91..c9f86483ebb57787fc6b9dde238485e78b4d1ded 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h @@ -10,62 +10,50 @@ #include "nbl_hw.h" #include "nbl_resource.h" -#define NBL_EM_PHY_KT_OFFSET (0x1F000) +#define NBL_EM_PHY_KT_OFFSET (0x1E000) -#define NBL_TOTAL_MACVLAN_NUM 2048 +#define NBL_TOTAL_MACVLAN_NUM 4096 #define NBL_MAX_ACTION_NUM 16 -#define NBL_SPORT_ETH_OFFSET 8 -#define NBL_MCC_NUM_PER_SWITCH 256 - -#define NBL_FLOW_MCC_INDEX_SIZE 1024 -#define NBL_FLOW_MCC_INDEX_START (7 * 1024) +#define NBL_FLOW_MCC_PXE_SIZE 8 +#define NBL_FLOW_MCC_INDEX_SIZE (4096 - NBL_FLOW_MCC_PXE_SIZE) +#define NBL_FLOW_MCC_INDEX_START (4 * 1024) +#define NBL_FLOW_MCC_BMC_DPORT 0x30D #define NBL_MACVLAN_TBL_BUCKET_SIZE 64 #define NBL_MACVLAN_X_AXIS_BUCKET_SIZE 64 #define NBL_MACVLAN_Y_AXIS_BUCKET_SIZE 16 +#define NBL_PP0_POWER 11 + enum nbl_flow_mcc_index_type { NBL_MCC_INDEX_ETH, NBL_MCC_INDEX_VSI, NBL_MCC_INDEX_BOND, + NBL_MCC_INDEX_BMC, }; -struct nbl_flow_mcc_index_key { - enum nbl_flow_mcc_index_type type; - union { - u8 eth_id; - u16 vsi_id; - u32 data; - }; -}; - -#define NBL_FLOW_MCC_INDEX_KEY_INIT(key, key_type_arg, value_arg) \ -do { \ - typeof(key) __key = key; \ - typeof(key_type_arg) __type = key_type_arg; \ - typeof(value_arg) __value = value_arg; \ - __key->type = __type; \ - if (__type == NBL_MCC_INDEX_ETH) \ - __key->eth_id = __value; \ - else if (__type == NBL_MCC_INDEX_VSI || __type == NBL_MCC_INDEX_BOND) \ - __key->vsi_id = __value; \ -} while (0) - #pragma pack(1) #define NBL_DUPPKT_PTYPE_NA 135 #define NBL_DUPPKT_PTYPE_NS 136 -struct nbl_flow_macvlan_node_data { +struct nbl_flow_l2_data { struct nbl_flow_fem_entry entry[NBL_FLOW_MACVLAN_MAX]; - u16 vsi; + union { + struct nbl_flow_mcc_group *mcc_group; + u16 vsi; + }; + bool multi; + bool mcast_flow; + }; union nbl_l2_phy_up_data_u { struct nbl_l2_phy_up_data { u32 act0:22; - u64 rsv1:62; + u32 act1:22; + u64 rsv1:40; u32 padding:4; u32 sport:4; u32 svlan_id:16; @@ -95,95 +83,135 @@ union nbl_l2_phy_lldp_lacp_data_u { u8 hash_key[sizeof(struct nbl_l2_phy_lldp_lacp_data)]; }; -union nbl_l2_phy_down_data_u { - struct nbl_l2_phy_down_data { +union nbl_l2_phy_up_multi_mcast_data_u { + struct nbl_l2_phy_up_multi_mcast_data { u32 act0:22; - u32 rsv2:10; - u64 rsv1:52; - u32 padding:6; - u32 sport:2; - u32 svlan_id:16; - u64 dst_mac:48; + u32 rsv1:2; + u8 padding[16]; + u32 sport:4; u32 template:4; u32 rsv[5]; } __packed info; -#define NBL_L2_PHY_DOWN_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_down_data) \ +#define NBL_L2_PHY_UP_MULTI_MCAST_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_up_multi_mcast_data) \ / sizeof(u32)) - u32 data[NBL_L2_PHY_DOWN_DATA_TAB_WIDTH]; - u8 hash_key[sizeof(struct nbl_l2_phy_down_data)]; + u32 data[NBL_L2_PHY_UP_MULTI_MCAST_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_up_multi_mcast_data)]; }; -union nbl_l2_phy_up_multi_data_u { - struct nbl_l2_phy_up_multi_data { +union nbl_l2_phy_down_multi_mcast_data_u { + struct nbl_l2_phy_down_multi_mcast_data { u32 act0:22; - u32 act1:22; - u32 rsv2:20; - u64 rsv1:36; - u32 padding:4; - u32 sport:4; - u64 dst_mac:48; + u32 rsv1:2; + u8 rsv2[16]; + u32 padding:2; + u32 sport:2; u32 template:4; u32 rsv[5]; } __packed info; -#define NBL_L2_PHY_UP_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_up_multi_data) \ - / sizeof(u32)) - u32 data[NBL_L2_PHY_UP_MULTI_DATA_TAB_WIDTH]; - u8 hash_key[sizeof(struct nbl_l2_phy_up_multi_data)]; +#define NBL_L2_PHY_DOWN_MULTI_MCAST_DATA_TAB_WIDTH \ + (sizeof(struct nbl_l2_phy_down_multi_mcast_data) / sizeof(u32)) + u32 data[NBL_L2_PHY_DOWN_MULTI_MCAST_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_down_multi_mcast_data)]; }; -union nbl_l2_phy_down_multi_data_u { - struct nbl_l2_phy_down_multi_data { +union nbl_l2_phy_down_data_u { + struct nbl_l2_phy_down_data { u32 act0:22; u32 act1:22; - u32 rsv2:20; - u64 rsv1:36; + u64 rsv2:40; u32 padding:6; u32 sport:2; + u32 svlan_id:16; u64 dst_mac:48; u32 template:4; u32 rsv[5]; } __packed info; -#define NBL_L2_PHY_DOWN_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_down_multi_data) \ +#define NBL_L2_PHY_DOWN_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_down_data) \ / sizeof(u32)) - u32 data[NBL_L2_PHY_DOWN_MULTI_DATA_TAB_WIDTH]; - u8 hash_key[sizeof(struct nbl_l2_phy_down_multi_data)]; + u32 data[NBL_L2_PHY_DOWN_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_down_data)]; }; -union nbl_l3_phy_up_multi_data_u { - struct nbl_l3_phy_up_multi_data { +union nbl_phy_ul4s_data_u { + struct nbl_phy_ul4s_ipv4 { u32 act0:22; - u32 act1:22; - u32 rsv2:20; - u64 rsv1:60; - u32 padding:12; + u32 rsv2:10; + u32 rsv1:24; u32 sport:4; - u64 dst_mac:16; + u32 l4_dport:16; + u32 l4_sport:16; + u32 dip_low:4; + u32 dip_high:28; + u32 sip_low:4; + u32 sip_high:28; u32 template:4; u32 rsv[5]; - } __packed info; -#define NBL_L3_PHY_UP_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l3_phy_up_multi_data) \ - / sizeof(u32)) - u32 data[NBL_L3_PHY_UP_MULTI_DATA_TAB_WIDTH]; - u8 hash_key[sizeof(struct nbl_l3_phy_up_multi_data)]; + } __packed ipv4_info; + struct nbl_phy_ul4s_ipv6 { + u64 act0:22; + u64 rsv3:42; + u64 rsv2; + u64 rsv1:8; + u64 sport:4; + u64 dport:16; + u64 l4_dport:16; + u64 l4_sport:16; + u64 sip3:4; + u64 sip2; + u64 sip1:60; + u64 template:4; + } __packed ipv6_info; + u32 data[NBL_KT_BYTE_LEN / 4]; + u8 hash_key[NBL_KT_BYTE_LEN]; }; -union nbl_l3_phy_down_multi_data_u { - struct nbl_l3_phy_down_multi_data { +union nbl_phy_dprbac_data_u { + struct nbl_phy_dprbac_ipv4 { u32 act0:22; - u32 act1:22; - u32 rsv3:20; - u64 rsv2; - u64 rsv1:4; - u32 padding:6; + u32 rsv2:10; + u64 rsv1:56; + u32 padding:2; + u32 sport:2; + u32 dip_low:4; + u32 dip_high:28; + u32 sip_low:4; + u32 sip_high:28; + u32 template:4; + u32 rsv[5]; + } __packed ipv4_info; + struct nbl_phy_dprbac_ipv6 { + u32 act0:22; + u32 rsv2:10; + u64 rsv1:24; + u32 padding:2; u32 sport:2; - u64 dst_mac:16; + u64 dip3:36; + u64 dip2; + u64 dip1:28; + u64 sip3:36; + u64 sip2; + u64 sip1:28; + u32 template:4; + } __packed ipv6_info; + u32 data[NBL_KT_BYTE_LEN / 4]; + u8 hash_key[NBL_KT_BYTE_LEN]; +}; + +union nbl_nd_upcall_data_u { + struct nbl_nd_upcall_data { + u32 act0:22; + u32 act1:22; + u32 rsv2:4; + u8 padding[10]; + u32 rsv1:12; + u32 ptype:16; u32 template:4; u32 rsv[5]; } __packed info; -#define NBL_L3_PHY_DOWN_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l3_phy_down_multi_data) \ +#define NBL_PMD_ND_UPCALL_DATA_TAB_WIDTH (sizeof(struct nbl_nd_upcall_data) \ / sizeof(u32)) - u32 data[NBL_L3_PHY_DOWN_MULTI_DATA_TAB_WIDTH]; - u8 hash_key[sizeof(struct nbl_l3_phy_down_multi_data)]; + u32 data[NBL_PMD_ND_UPCALL_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_nd_upcall_data)]; }; union nbl_common_data_u { diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c index d850704357d54d3761cafa0a724909cce20f0d07..d85c6ddf86c3787689208fd61c53f5e80b16be86 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c @@ -6,6 +6,43 @@ #include "nbl_phy_leonis.h" #include "nbl_hw/nbl_p4_actions.h" +#include "nbl_hw/nbl_hw_leonis/base/nbl_datapath.h" +#include "nbl_hw/nbl_hw_leonis/base/nbl_ppe.h" +#include "nbl_hw/nbl_hw_leonis/base/nbl_intf.h" +#include "nbl_hw/nbl_hw_leonis/base/nbl_datapath_dped.h" +#include "nbl_phy_leonis_regs.h" + +static int dvn_descreq_num_cfg = DEFAULT_DVN_DESCREQ_NUMCFG; /* default 8 and 8 */ +module_param(dvn_descreq_num_cfg, int, 0); +MODULE_PARM_DESC(dvn_descreq_num_cfg, "bit[31:16]:split ring,support 8/16," + " bit[15:0]:packed ring, support 8/12/16/20/24/28/32"); + +static u32 nbl_phy_dump_registers[] = { + NBL_UVN_DIF_DELAY_REQ, + NBL_UVN_DIF_DELAY_TIME, + NBL_UVN_DIF_DELAY_MAX, + NBL_UVN_DESC_PRE_DESC_REQ_NULL, + NBL_UVN_DESC_PRE_DESC_REQ_LACK, + NBL_UVN_DESC_RD_DROP_DESC_LACK, + NBL_DVN_DESCRD_L2_UNAVAIL_CNT, + NBL_DVN_DESCRD_L2_NOAVAIL_CNT, + NBL_USTORE_BUF_TOTAL_DROP_PKT, + NBL_USTORE_BUF_TOTAL_TRUN_PKT +}; + +static u32 nbl_phy_get_quirks(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = priv; + u32 quirks; + + nbl_hw_read_mbx_regs(phy_mgt, NBL_LEONIS_QUIRKS_OFFSET, + (u8 *)&quirks, sizeof(u32)); + + if (quirks == NBL_LEONIS_ILLEGAL_REG_VALUE) + return 0; + + return quirks; +} static int nbl_send_kt_data(struct nbl_phy_mgt *phy_mgt, union nbl_fem_kt_acc_ctrl_u *kt_ctrl, u8 *data, struct nbl_common_info *common) @@ -162,114 +199,6 @@ static void nbl_phy_fem_clear_tcam_ad(struct nbl_phy_mgt *phy_mgt) } } -static int nbl_phy_fem_em0_pt_phy_l2_init(struct nbl_phy_mgt *phy_mgt, int pt_idx) -{ - union nbl_fem_profile_tbl_u em0_pt_tbl = {.info = {0}}; - - em0_pt_tbl.info.pt_vld = 1; - em0_pt_tbl.info.pt_hash_sel0 = 0; - em0_pt_tbl.info.pt_hash_sel1 = 3; - - switch (pt_idx) { - case NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_12; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_72; - em0_pt_tbl.info.pt_act_num = 1; - break; - case NBL_EM0_PT_PHY_UP_UNICAST_L2: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_12; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_72; - em0_pt_tbl.info.pt_act_num = 1; - break; - case NBL_EM0_PT_PHY_DOWN_UNICAST_L2: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_4; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_72; - em0_pt_tbl.info.pt_act_num = 1; - break; - case NBL_EM0_PT_PHY_UP_MULTICAST_L2: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_68; - em0_pt_tbl.info.pt_act_num = 2; - break; - case NBL_EM0_PT_PHY_DOWN_MULTICAST_L2: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_60; - em0_pt_tbl.info.pt_act_num = 2; - break; - case NBL_EM0_PT_PHY_UP_MULTICAST_L3: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_36; - em0_pt_tbl.info.pt_act_num = 2; - break; - case NBL_EM0_PT_PHY_DOWN_MULTICAST_L3: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_28; - em0_pt_tbl.info.pt_act_num = 2; - break; - case NBL_EM0_PT_PHY_DPRBAC_IPV4: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; - em0_pt_tbl.info.pt_act_num = 1; - break; - case NBL_EM0_PT_PHY_DPRBAC_IPV6: - em0_pt_tbl.info.pt_key_size = 1; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_64 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_128; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; - em0_pt_tbl.info.pt_act_num = 1; - break; - case NBL_EM0_PT_PHY_UL4S_IPV4: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_32; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; - em0_pt_tbl.info.pt_act_num = 1; - break; - case NBL_EM0_PT_PHY_UL4S_IPV6: - em0_pt_tbl.info.pt_key_size = 1; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_112; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; - em0_pt_tbl.info.pt_act_num = 1; - break; - default: - return -EOPNOTSUPP; - } - - nbl_hw_write_regs(phy_mgt, NBL_FEM0_PROFILE_TABLE(pt_idx), em0_pt_tbl.data, - NBL_FEM_PROFILE_TBL_WIDTH); - return 0; -} - -static __maybe_unused int nbl_phy_fem_em0_pt_init(struct nbl_phy_mgt *phy_mgt) -{ - int i, ret = 0; - - for (i = NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2; i <= NBL_EM0_PT_PHY_UL4S_IPV6; i++) { - ret = nbl_phy_fem_em0_pt_phy_l2_init(phy_mgt, i); - if (ret) - return ret; - } - - return 0; -} - static int nbl_phy_set_ht(void *priv, u16 hash, u16 hash_other, u8 ht_table, u8 bucket, u32 key_index, u8 valid) { @@ -455,14 +384,20 @@ static void nbl_phy_del_tcam(void *priv, u32 index, u8 key_type, u8 pp_type) ad_table.hash_key, NBL_FLOW_AD_TOTAL_LEN); } -static int nbl_phy_add_mcc(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 action) +static int nbl_phy_add_mcc(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 next_mcc_id, u16 action) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; struct nbl_mcc_tbl node = {0}; node.vld = 1; - node.next_pntr = 0; - node.tail = 1; + if (next_mcc_id == NBL_MCC_ID_INVALID) { + node.next_pntr = 0; + node.tail = 1; + } else { + node.next_pntr = next_mcc_id; + node.tail = 0; + } + node.stateid_filter = 1; node.flowid_filter = 1; node.dport_act = action; @@ -504,6 +439,59 @@ static void nbl_phy_del_mcc(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 next_mc nbl_hw_write_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(mcc_id), (u8 *)&node, sizeof(node)); } +static void nbl_phy_update_mcc_next_node(void *priv, u16 mcc_id, u16 next_mcc_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_mcc_tbl node = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(mcc_id), + (u8 *)&node, sizeof(node)); + if (next_mcc_id != NBL_MCC_ID_INVALID) { + node.next_pntr = next_mcc_id; + node.tail = 0; + } else { + node.next_pntr = 0; + node.tail = 1; + } + + nbl_hw_write_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(mcc_id), + (u8 *)&node, sizeof(node)); +} + +static int nbl_phy_add_tnl_encap(void *priv, const u8 encap_buf[], u16 encap_idx, + union nbl_flow_encap_offset_tbl_u encap_idx_info) +{ + u8 id; + u8 temp = 0; + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u8 send_buf[NBL_FLOW_ACTION_ENCAP_TOTAL_LEN] = { 0 }; + + memcpy(send_buf, encap_buf, NBL_FLOW_ACTION_ENCAP_MAX_LEN); + + for (id = 0; id < NBL_FLOW_ACTION_ENCAP_HALF_LEN; id++) { + temp = send_buf[id]; + send_buf[id] = send_buf[NBL_FLOW_ACTION_ENCAP_MAX_LEN - 1 - id]; + send_buf[NBL_FLOW_ACTION_ENCAP_MAX_LEN - 1 - id] = temp; + } + + memcpy(&send_buf[NBL_FLOW_ACTION_ENCAP_MAX_LEN], + encap_idx_info.data, NBL_FLOW_ACTION_ENCAP_OFFSET_LEN); + + nbl_hw_write_regs(phy_mgt, NBL_DPED_TAB_TNL_REG(encap_idx), + (u8 *)send_buf, NBL_FLOW_ACTION_ENCAP_TOTAL_LEN); + + return 0; +} + +static void nbl_phy_del_tnl_encap(void *priv, u16 encap_idx) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u8 send_buf[NBL_FLOW_ACTION_ENCAP_TOTAL_LEN] = { 0 }; + + nbl_hw_write_regs(phy_mgt, NBL_DPED_TAB_TNL_REG(encap_idx), + (u8 *)send_buf, NBL_FLOW_ACTION_ENCAP_TOTAL_LEN); +} + static int nbl_phy_init_fem(void *priv) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; @@ -528,12 +516,12 @@ static int nbl_phy_init_fem(void *priv) static void nbl_configure_dped_checksum(struct nbl_phy_mgt *phy_mgt) { - struct dped_l4_ck_cmd_40 l4_ck_cmd_40; + union dped_l4_ck_cmd_40_u l4_ck_cmd_40; /* DPED dped_l4_ck_cmd_40 for sctp */ nbl_hw_read_regs(phy_mgt, NBL_DPED_L4_CK_CMD_40_ADDR, (u8 *)&l4_ck_cmd_40, sizeof(l4_ck_cmd_40)); - l4_ck_cmd_40.en = 1; + l4_ck_cmd_40.info.en = 1; nbl_hw_write_regs(phy_mgt, NBL_DPED_L4_CK_CMD_40_ADDR, (u8 *)&l4_ck_cmd_40, sizeof(l4_ck_cmd_40)); } @@ -611,6 +599,8 @@ static void nbl_shaping_eth_init(struct nbl_phy_mgt *phy_mgt, u8 eth_id, u8 spee static int nbl_shaping_init(struct nbl_phy_mgt *phy_mgt, u8 speed) { struct dsch_psha_en psha_en = {0}; + struct nbl_shaping_net net_shaping = {0}; + int i; for (i = 0; i < NBL_MAX_ETHERNET; i++) @@ -619,6 +609,9 @@ static int nbl_shaping_init(struct nbl_phy_mgt *phy_mgt, u8 speed) psha_en.en = 0xF; nbl_hw_write_regs(phy_mgt, NBL_DSCH_PSHA_EN_ADDR, (u8 *)&psha_en, sizeof(psha_en)); + for (i = 0; i < NBL_MAX_FUNC; i++) + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_NET_REG(i), + (u8 *)&net_shaping, sizeof(net_shaping)); return 0; } @@ -661,6 +654,11 @@ static int nbl_ustore_init(struct nbl_phy_mgt *phy_mgt, u8 eth_num) nbl_hw_write_regs(phy_mgt, NBL_USTORE_PORT_DROP_TH_REG_ARR(i), (u8 *)&drop_th, sizeof(drop_th)); + for (i = 0; i < NBL_MAX_ETHERNET; i++) { + nbl_hw_rd32(phy_mgt, NBL_USTORE_BUF_PORT_DROP_PKT(i)); + nbl_hw_rd32(phy_mgt, NBL_USTORE_BUF_PORT_TRUN_PKT(i)); + } + return 0; } @@ -704,8 +702,20 @@ static int nbl_dstore_init(struct nbl_phy_mgt *phy_mgt, u8 speed) return 0; } -static void nbl_dvn_descreq_num_cfg(struct nbl_phy_mgt *phy_mgt, u32 descreq_num) +static int nbl_ul4s_init(struct nbl_phy_mgt *phy_mgt) +{ + struct ul4s_sch_pad sch_pad; + + nbl_hw_read_regs(phy_mgt, NBL_UL4S_SCH_PAD_ADDR, (u8 *)&sch_pad, sizeof(sch_pad)); + sch_pad.en = 1; + nbl_hw_write_regs(phy_mgt, NBL_UL4S_SCH_PAD_ADDR, (u8 *)&sch_pad, sizeof(sch_pad)); + + return 0; +} + +static void nbl_dvn_descreq_num_cfg(void *priv, u32 descreq_num) { + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; struct nbl_dvn_descreq_num_cfg descreq_num_cfg = { 0 }; u32 packet_ring_prefect_num = descreq_num & 0xffff; u32 split_ring_prefect_num = (descreq_num >> 16) & 0xffff; @@ -722,6 +732,42 @@ static void nbl_dvn_descreq_num_cfg(struct nbl_phy_mgt *phy_mgt, u32 descreq_num (u8 *)&descreq_num_cfg, sizeof(descreq_num_cfg)); } +static u32 nbl_dvn_descreq_num_get(void *priv) +{ + u16 split_req; + u16 packed_req; + struct nbl_dvn_descreq_num_cfg descreq_num_cfg = { 0 }; + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_hw_read_regs(phy_mgt, NBL_DVN_DESCREQ_NUM_CFG, + (u8 *)&descreq_num_cfg, sizeof(descreq_num_cfg)); + + split_req = (descreq_num_cfg.avring_cfg_num + 1) * 8; + packed_req = descreq_num_cfg.packed_l1_num * 4 + 8; + + return (split_req << 16) + packed_req; +} + +static void nbl_phy_cfg_dvn_bp_mask(struct dvn_back_pressure_mask *mask, u8 eth_id, bool enable) +{ + switch (eth_id) { + case 0: + mask->dstore_port0_flag = enable; + break; + case 1: + mask->dstore_port1_flag = enable; + break; + case 2: + mask->dstore_port2_flag = enable; + break; + case 3: + mask->dstore_port3_flag = enable; + break; + default: + return; + } +} + static int nbl_dvn_init(struct nbl_phy_mgt *phy_mgt, u8 speed) { struct nbl_dvn_desc_wr_merge_timeout timeout = {0}; @@ -740,19 +786,29 @@ static int nbl_dvn_init(struct nbl_phy_mgt *phy_mgt, u8 speed) if (speed == NBL_FW_PORT_SPEED_100G) nbl_dvn_descreq_num_cfg(phy_mgt, DEFAULT_DVN_100G_DESCREQ_NUMCFG); else - nbl_dvn_descreq_num_cfg(phy_mgt, DEFAULT_DVN_DESCREQ_NUMCFG); + nbl_dvn_descreq_num_cfg(phy_mgt, dvn_descreq_num_cfg); return 0; } static int nbl_uvn_init(struct nbl_phy_mgt *phy_mgt) { + struct pci_dev *pdev; struct uvn_queue_err_mask mask = {0}; struct uvn_dif_req_ro_flag flag = {0}; + struct uvn_desc_prefetch_init prefetch_init = {0}; u32 timeout = 119760; /* 200us 200000/1.67 */ + u32 quirks; + struct uvn_desc_wr_timeout desc_wr_timeout = {0}; + u16 wr_timeout = 0x12c; + pdev = NBL_COMMON_TO_PDEV(phy_mgt->common); nbl_hw_wr32(phy_mgt, NBL_UVN_DESC_RD_WAIT, timeout); + desc_wr_timeout.num = wr_timeout; + nbl_hw_write_regs(phy_mgt, NBL_UVN_DESC_WR_TIMEOUT, + (u8 *)&desc_wr_timeout, sizeof(desc_wr_timeout)); + flag.avail_rd = 1; flag.desc_rd = 1; flag.pkt_wr = 1; @@ -763,6 +819,55 @@ static int nbl_uvn_init(struct nbl_phy_mgt *phy_mgt) mask.dif_err = 1; nbl_hw_write_regs(phy_mgt, NBL_UVN_QUEUE_ERR_MASK, (u8 *)&mask, sizeof(mask)); + prefetch_init.num = NBL_UVN_DESC_PREFETCH_NUM; + prefetch_init.sel = 0; + + quirks = nbl_phy_get_quirks(phy_mgt); + + if (performance_mode & BIT(NBL_QUIRKS_UVN_PREFETCH_ALIGN) || + !(quirks & BIT(NBL_QUIRKS_UVN_PREFETCH_ALIGN))) + prefetch_init.sel = 1; + + nbl_hw_write_regs(phy_mgt, NBL_UVN_DESC_PREFETCH_INIT, + (u8 *)&prefetch_init, sizeof(prefetch_init)); + + return 0; +} + +static int nbl_uqm_init(struct nbl_phy_mgt *phy_mgt) +{ + struct nbl_uqm_que_type que_type = {0}; + u32 cnt = 0; + int i; + + nbl_hw_write_regs(phy_mgt, NBL_UQM_FWD_DROP_CNT, (u8 *)&cnt, sizeof(cnt)); + + nbl_hw_write_regs(phy_mgt, NBL_UQM_DROP_PKT_CNT, (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_DROP_PKT_SLICE_CNT, (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_DROP_PKT_LEN_ADD_CNT, (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_DROP_HEAD_PNTR_ADD_CNT, (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_DROP_WEIGHT_ADD_CNT, (u8 *)&cnt, sizeof(cnt)); + + for (i = 0; i < NBL_UQM_PORT_DROP_DEPTH; i++) { + nbl_hw_write_regs(phy_mgt, NBL_UQM_PORT_DROP_PKT_CNT + (sizeof(cnt) * i), + (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_PORT_DROP_PKT_SLICE_CNT + (sizeof(cnt) * i), + (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_PORT_DROP_PKT_LEN_ADD_CNT + (sizeof(cnt) * i), + (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_PORT_DROP_HEAD_PNTR_ADD_CNT + (sizeof(cnt) * i), + (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_PORT_DROP_WEIGHT_ADD_CNT + (sizeof(cnt) * i), + (u8 *)&cnt, sizeof(cnt)); + } + + for (i = 0; i < NBL_UQM_DPORT_DROP_DEPTH; i++) + nbl_hw_write_regs(phy_mgt, NBL_UQM_DPORT_DROP_CNT + (sizeof(cnt) * i), + (u8 *)&cnt, sizeof(cnt)); + + que_type.bp_drop = 0; + nbl_hw_write_regs(phy_mgt, NBL_UQM_QUE_TYPE, (u8 *)&que_type, sizeof(que_type)); + return 0; } @@ -774,12 +879,30 @@ static int nbl_dp_init(struct nbl_phy_mgt *phy_mgt, u8 speed, u8 eth_num) nbl_dsch_qid_max_init(phy_mgt); nbl_ustore_init(phy_mgt, eth_num); nbl_dstore_init(phy_mgt, speed); + nbl_ul4s_init(phy_mgt); nbl_dvn_init(phy_mgt, speed); nbl_uvn_init(phy_mgt); + nbl_uqm_init(phy_mgt); return 0; } +static void nbl_epro_mirror_act_pri_init(struct nbl_phy_mgt *phy_mgt, + struct nbl_epro_mirror_act_pri *cfg) +{ + struct nbl_epro_mirror_act_pri epro_mirror_act_pri_def = { + .car_idx_pri = EPRO_MIRROR_ACT_CARIDX_PRI, + .dqueue_pri = EPRO_MIRROR_ACT_DQUEUE_PRI, + .dport_pri = EPRO_MIRROR_ACT_DPORT_PRI, + .rsv = 0 + }; + + if (cfg) + epro_mirror_act_pri_def = *cfg; + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_MIRROR_ACT_PRI_REG, (u8 *)&epro_mirror_act_pri_def, 1); +} + static struct nbl_epro_action_filter_tbl epro_action_filter_tbl_def[NBL_FWD_TYPE_MAX] = { [NBL_FWD_TYPE_NORMAL] = { BIT(NBL_MD_ACTION_MCIDX) | BIT(NBL_MD_ACTION_TABLE_INDEX) | @@ -796,6 +919,7 @@ static struct nbl_epro_action_filter_tbl epro_action_filter_tbl_def[NBL_FWD_TYPE BIT(NBL_MD_ACTION_VNI1) | BIT(NBL_MD_ACTION_PRBAC_IDX) | BIT(NBL_MD_ACTION_L4S_IDX) | BIT(NBL_MD_ACTION_DP_HASH0) | BIT(NBL_MD_ACTION_DP_HASH1) | BIT(NBL_MD_ACTION_MDF_PRI) | + BIT(NBL_MD_ACTION_FLOW_CARIDX) | ((u64)0xffffffff << 32)}, [NBL_FWD_TYPE_OTHER_MIRROR] = { BIT(NBL_MD_ACTION_FLOWID0) | BIT(NBL_MD_ACTION_FLOWID1) | @@ -825,6 +949,8 @@ static int nbl_epro_init(struct nbl_phy_mgt *phy_mgt) { u32 fwd_type = 0; + nbl_epro_mirror_act_pri_init(phy_mgt, NULL); + for (fwd_type = 0; fwd_type < NBL_FWD_TYPE_MAX; fwd_type++) nbl_epro_action_filter_cfg(phy_mgt, fwd_type, &epro_action_filter_tbl_def[fwd_type]); @@ -894,17 +1020,31 @@ static int nbl_intf_init(struct nbl_phy_mgt *phy_mgt) return 0; } +static void nbl_rdma_init(struct nbl_phy_mgt *phy_mgt) +{ + u32 data; + + data = nbl_hw_rd32(phy_mgt, NBL_TOP_CTRL_LB_CLK); + data |= NBL_TOP_CTRL_RDMA_LB_CLK; + nbl_hw_wr32(phy_mgt, NBL_TOP_CTRL_LB_CLK, data); + + data = nbl_hw_rd32(phy_mgt, NBL_TOP_CTRL_LB_RST); + data &= ~NBL_TOP_CTRL_RDMA_LB_RST; + nbl_hw_wr32(phy_mgt, NBL_TOP_CTRL_LB_RST, data); +} static int nbl_phy_init_chip_module(void *priv, u8 eth_speed, u8 eth_num) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; nbl_info(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_PHY, "phy_chip_init"); + nbl_rdma_init(phy_mgt); nbl_dp_init(phy_mgt, eth_speed, eth_num); nbl_ppe_init(phy_mgt); nbl_intf_init(phy_mgt); - phy_mgt->version = nbl_hw_rd32(phy_mgt, 0x1300904); + nbl_write_all_regs(phy_mgt); + phy_mgt->version = nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); return 0; } @@ -956,6 +1096,9 @@ static int nbl_phy_set_qid_map_table(void *priv, void *data, int qid_map_select) u64 reg; int i, j; + if (phy_mgt->hw_status) + return 0; + for (i = 0; i < param->len; i++) { j = 0; @@ -1057,12 +1200,12 @@ static int nbl_phy_set_vnet_queue_info(void *priv, struct nbl_vnet_queue_info_pa host_vnet_qinfo.valid = param->valid; host_vnet_qinfo.msix_idx = param->msix_idx; host_vnet_qinfo.msix_idx_valid = param->msix_idx_valid; -#ifndef NBL_DISABLE_RO + if (phy_mgt_leonis->ro_enable) { host_vnet_qinfo.ido_en = 1; host_vnet_qinfo.rlo_en = 1; } -#endif + nbl_hw_write_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(queue_id), (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); @@ -1375,7 +1518,8 @@ static void nbl_phy_deactive_shaping(void *priv, u16 func_id) (u8 *)&sha2net, sizeof(sha2net)); } -static int nbl_phy_set_shaping(void *priv, u16 func_id, u64 total_tx_rate, u8 vld, bool active) +static int nbl_phy_set_shaping(void *priv, u16 func_id, u64 total_tx_rate, u64 burst, + u8 vld, bool active) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; struct nbl_shaping_net shaping_net = {0}; @@ -1400,7 +1544,11 @@ static int nbl_phy_set_shaping(void *priv, u16 func_id, u64 total_tx_rate, u8 vl shaping_net.cir = total_tx_rate; /* pir equal cir */ shaping_net.pir = shaping_net.cir; - shaping_net.depth = max(shaping_net.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH); + if (burst) + shaping_net.depth = burst; + else + shaping_net.depth = max(shaping_net.cir * 2, + NBL_LR_LEONIS_NET_BUCKET_DEPTH); shaping_net.cbs = shaping_net.depth; shaping_net.pbs = shaping_net.depth; } @@ -1421,6 +1569,131 @@ static int nbl_phy_set_shaping(void *priv, u16 func_id, u64 total_tx_rate, u8 vl return 0; } +static void nbl_phy_set_offload_shaping(struct nbl_phy_mgt *phy_mgt, + struct nbl_chan_regs_info *reg_info, u32 *value) +{ + struct nbl_shaping_net *shaping_net; + struct dsch_vn_sha2net_map_tbl *sha2net; + struct dsch_vn_net2sha_map_tbl *net2sha; + struct dsch_vn_n2g_cfg_tbl dsch_info = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_N2G_CFG_TABLE_REG_ARR(reg_info->depth), + (u8 *)&dsch_info, sizeof(dsch_info)); + + switch (reg_info->tbl_name) { + case NBL_FLOW_SHAPING_NET_REG: + shaping_net = (struct nbl_shaping_net *)value; + shaping_net->valid &= dsch_info.vld; + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_NET(reg_info->depth), + (u8 *)shaping_net, sizeof(*shaping_net)); + break; + case NBL_FLOW_DSCH_VN_NET2SHA_MAP_TBL_REG: + sha2net = (struct dsch_vn_sha2net_map_tbl *)value; + sha2net->vld &= dsch_info.vld; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_SHA2NET_MAP_TABLE_REG_ARR(reg_info->depth), + (u8 *)sha2net, sizeof(*sha2net)); + break; + case NBL_FLOW_DSCH_VN_SHA2NET_MAP_TBL_REG: + net2sha = (struct dsch_vn_net2sha_map_tbl *)value; + net2sha->vld &= dsch_info.vld; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_NET2SHA_MAP_TABLE_REG_ARR(reg_info->depth), + (u8 *)net2sha, sizeof(*net2sha)); + break; + } +} + +static int nbl_phy_set_ucar(void *priv, u16 vsi_id, u64 totel_rx_rate, u64 burst, + u8 vld) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + union ucar_flow_u ucar_flow = {.info = {0}}; + union epro_vpt_u epro_vpt = {.info = {0}}; + int car_id = 0; + int index = 0; + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), + (u8 *)&epro_vpt, sizeof(epro_vpt)); + if (vld) { + if (epro_vpt.info.car_en) { + car_id = epro_vpt.info.car_id; + } else { + epro_vpt.info.car_en = 1; + for (; index < 1024; index++) { + nbl_hw_read_regs(phy_mgt, NBL_UCAR_FLOW_REG(index), + (u8 *)&ucar_flow, sizeof(ucar_flow)); + if (ucar_flow.info.valid == 0) { + car_id = index; + break; + } + } + if (car_id == 1024) { + nbl_err(common, NBL_DEBUG_PHY, "Car ID exceeds the valid range!"); + return -ENOMEM; + } + epro_vpt.info.car_id = car_id; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), + (u8 *)&epro_vpt, sizeof(epro_vpt)); + } + } else { + epro_vpt.info.car_en = 0; + car_id = epro_vpt.info.car_id; + epro_vpt.info.car_id = 0; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), + (u8 *)&epro_vpt, sizeof(epro_vpt)); + } + + if (vld) { + ucar_flow.info.valid = 1; + ucar_flow.info.cir = totel_rx_rate; + ucar_flow.info.pir = totel_rx_rate; + if (burst) + ucar_flow.info.depth = burst; + else + ucar_flow.info.depth = NBL_UCAR_MAX_BUCKET_DEPTH; + ucar_flow.info.cbs = ucar_flow.info.depth; + ucar_flow.info.pbs = ucar_flow.info.depth; + } + nbl_hw_write_regs(phy_mgt, NBL_UCAR_FLOW_REG(car_id), + (u8 *)&ucar_flow, sizeof(ucar_flow)); + + return 0; +} + +static void nbl_phy_set_shaping_dport_vld(void *priv, u8 eth_id, bool vld) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_shaping_net shaping_net = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_SHAPING_DPORT_REG(eth_id), + (u8 *)&shaping_net, sizeof(shaping_net)); + + if (vld) + shaping_net.valid = 1; + else + shaping_net.valid = 0; + + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DPORT_REG(eth_id), + (u8 *)&shaping_net, sizeof(shaping_net)); +} + +static void nbl_phy_set_dport_fc_th_vld(void *priv, u8 eth_id, bool vld) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct dstore_d_dport_fc_th fc_th = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(eth_id), + (u8 *)&fc_th, sizeof(fc_th)); + + if (vld) + fc_th.fc_en = 1; + else + fc_th.fc_en = 0; + + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(eth_id), + (u8 *)&fc_th, sizeof(fc_th)); +} + static int nbl_phy_cfg_dsch_net_to_group(void *priv, u16 func_id, u16 group_id, u16 vld) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; @@ -1433,7 +1706,8 @@ static int nbl_phy_cfg_dsch_net_to_group(void *priv, u16 func_id, u16 group_id, return 0; } -static int nbl_phy_cfg_epro_rss_ret(void *priv, u32 index, u8 size_type, u32 q_num, u16 *queue_list) +static int nbl_phy_cfg_epro_rss_ret(void *priv, u32 index, u8 size_type, u32 q_num, + u16 *queue_list, const u32 *indir) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); @@ -1441,7 +1715,7 @@ static int nbl_phy_cfg_epro_rss_ret(void *priv, u32 index, u8 size_type, u32 q_n u32 table_id, table_end, group_count, odd_num, queue_id = 0; group_count = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << size_type; - if (group_count > 256) { + if (group_count > NBL_EPRO_RSS_ENTRY_MAX_COUNT) { nbl_err(common, NBL_DEBUG_QUEUE, "Rss group entry size type %u exceed the max value %u", size_type, NBL_EPRO_RSS_ENTRY_SIZE_256); @@ -1467,34 +1741,63 @@ static int nbl_phy_cfg_epro_rss_ret(void *priv, u32 index, u8 size_type, u32 q_n nbl_hw_read_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), (u8 *)&rss_ret, sizeof(rss_ret)); - if (odd_num) { - rss_ret.vld1 = 1; - rss_ret.dqueue1 = queue_list[queue_id++]; - nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), - (u8 *)&rss_ret, sizeof(rss_ret)); - table_id++; - } + if (indir) { + if (odd_num) { + rss_ret.vld1 = 1; + rss_ret.dqueue1 = indir[queue_id++]; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + table_id++; + } + + for (; table_id < table_end; table_id++) { + rss_ret.vld0 = 1; + rss_ret.dqueue0 = indir[queue_id++]; + rss_ret.vld1 = 1; + rss_ret.dqueue1 = indir[queue_id++]; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + } + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + + if (odd_num) { + rss_ret.vld0 = 1; + rss_ret.dqueue0 = indir[queue_id++]; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + } + } else { + if (odd_num) { + rss_ret.vld1 = 1; + rss_ret.dqueue1 = queue_list[queue_id++]; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + table_id++; + } - queue_id = queue_id % q_num; - for (; table_id < table_end; table_id++) { - rss_ret.vld0 = 1; - rss_ret.dqueue0 = queue_list[queue_id++]; - queue_id = queue_id % q_num; - rss_ret.vld1 = 1; - rss_ret.dqueue1 = queue_list[queue_id++]; queue_id = queue_id % q_num; - nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), - (u8 *)&rss_ret, sizeof(rss_ret)); - } + for (; table_id < table_end; table_id++) { + rss_ret.vld0 = 1; + rss_ret.dqueue0 = queue_list[queue_id++]; + queue_id = queue_id % q_num; + rss_ret.vld1 = 1; + rss_ret.dqueue1 = queue_list[queue_id++]; + queue_id = queue_id % q_num; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + } - nbl_hw_read_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), - (u8 *)&rss_ret, sizeof(rss_ret)); + nbl_hw_read_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); - if (odd_num) { - rss_ret.vld0 = 1; - rss_ret.dqueue0 = queue_list[queue_id++]; - nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), - (u8 *)&rss_ret, sizeof(rss_ret)); + if (odd_num) { + rss_ret.vld0 = 1; + rss_ret.dqueue0 = queue_list[queue_id++]; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + } } return 0; @@ -1556,19 +1859,41 @@ static void nbl_phy_read_rss_indir(void *priv, u16 vsi_id, u32 *rss_indir, } } -static void nbl_phy_get_rss_alg_sel(void *priv, u8 eth_id, u8 *alg_sel) +static void nbl_phy_get_rss_alg_sel(void *priv, u16 vsi_id, u8 *alg_sel) { - struct nbl_epro_ept_tbl ept_tbl = {0}; + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_vpt_tbl epro_vpt_tbl = {0}; - nbl_hw_read_regs(priv, NBL_EPRO_EPT_TABLE(eth_id), (u8 *)&ept_tbl, - sizeof(struct nbl_epro_ept_tbl)); + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), (u8 *)&epro_vpt_tbl, + sizeof(epro_vpt_tbl)); - if (ept_tbl.lag_alg_sel == NBL_EPRO_RSS_ALG_TOEPLITZ_HASH) + if (epro_vpt_tbl.rss_alg_sel == NBL_EPRO_RSS_ALG_TOEPLITZ_HASH) *alg_sel = ETH_RSS_HASH_TOP; - else if (ept_tbl.lag_alg_sel == NBL_EPRO_RSS_ALG_CRC32) + else if (epro_vpt_tbl.rss_alg_sel == NBL_EPRO_RSS_ALG_CRC32) *alg_sel = ETH_RSS_HASH_CRC32; } +static int nbl_phy_set_rss_alg_sel(void *priv, u16 vsi_id, u8 alg_sel) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_vpt_tbl epro_vpt_tbl = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), (u8 *)&epro_vpt_tbl, + sizeof(epro_vpt_tbl)); + + if (alg_sel == ETH_RSS_HASH_TOP) + epro_vpt_tbl.rss_alg_sel = NBL_EPRO_RSS_ALG_TOEPLITZ_HASH; + else if (alg_sel == ETH_RSS_HASH_CRC32) + epro_vpt_tbl.rss_alg_sel = NBL_EPRO_RSS_ALG_CRC32; + else + return -EOPNOTSUPP; + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), + (u8 *)&epro_vpt_tbl, + sizeof(struct nbl_epro_vpt_tbl)); + return 0; +} + static int nbl_phy_init_epro_vpt_tbl(void *priv, u16 vsi_id) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; @@ -1610,13 +1935,25 @@ static int nbl_phy_set_epro_rss_pt(void *priv, u16 vsi_id, u16 rss_ret_base, u16 struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; struct nbl_epro_rss_pt_tbl epro_rss_pt_tbl = {0}; struct nbl_epro_vpt_tbl epro_vpt_tbl; + u16 entry_size; + + if (rss_entry_size > NBL_EPRO_RSS_ENTRY_MAX_SIZE) + entry_size = NBL_EPRO_RSS_ENTRY_MAX_SIZE; + else + entry_size = rss_entry_size; epro_rss_pt_tbl.vld = 1; - epro_rss_pt_tbl.entry_size = rss_entry_size; + epro_rss_pt_tbl.entry_size = entry_size; epro_rss_pt_tbl.offset0_vld = 1; epro_rss_pt_tbl.offset0 = rss_ret_base; - epro_rss_pt_tbl.offset1_vld = 0; - epro_rss_pt_tbl.offset1 = 0; + if (rss_entry_size > NBL_EPRO_RSS_ENTRY_MAX_SIZE) { + epro_rss_pt_tbl.offset1_vld = 1; + epro_rss_pt_tbl.offset1 = + rss_ret_base + (NBL_EPRO_RSS_ENTRY_SIZE_UNIT << entry_size); + } else { + epro_rss_pt_tbl.offset1_vld = 0; + epro_rss_pt_tbl.offset1 = 0; + } nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_PT_TABLE(vsi_id), (u8 *)&epro_rss_pt_tbl, sizeof(epro_rss_pt_tbl)); @@ -1668,13 +2005,13 @@ static int nbl_phy_disable_uvn(void *priv, u16 queue_id) return 0; } -static bool nbl_phy_is_txq_drain_out(struct nbl_phy_mgt *phy_mgt, u16 queue_id) +static bool nbl_phy_is_txq_drain_out(struct nbl_phy_mgt *phy_mgt, u16 queue_id, + struct dsch_vn_tc_q_list_tbl *tc_q_list) { - struct dsch_vn_tc_q_list_tbl tc_q_list = {0}; nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_TC_Q_LIST_TABLE_REG_ARR(queue_id), - (u8 *)&tc_q_list, sizeof(tc_q_list)); - if (!tc_q_list.regi && !tc_q_list.fly && !tc_q_list.vld) + (u8 *)tc_q_list, sizeof(*tc_q_list)); + if (!tc_q_list->regi && !tc_q_list->fly) return true; return false; @@ -1696,17 +2033,25 @@ static int nbl_phy_lso_dsch_drain(void *priv, u16 queue_id) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct dsch_vn_tc_q_list_tbl tc_q_list = {0}; + struct dsch_vn_q2tc_cfg_tbl info; int i = 0; + nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(queue_id), + (u8 *)&info, sizeof(info)); + info.vld = 0; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(queue_id), + (u8 *)&info, sizeof(info)); do { - if (nbl_phy_is_txq_drain_out(phy_mgt, queue_id)) + if (nbl_phy_is_txq_drain_out(phy_mgt, queue_id, &tc_q_list)) break; usleep_range(10, 20); } while (++i < NBL_DRAIN_WAIT_TIMES); if (i >= NBL_DRAIN_WAIT_TIMES) { - nbl_err(common, NBL_DEBUG_QUEUE, "nbl queue %u lso dsch drain\n", queue_id); + nbl_err(common, NBL_DEBUG_QUEUE, "nbl queue %u lso dsch drain, regi %u, fly %u, vld %u\n", + queue_id, tc_q_list.regi, tc_q_list.fly, tc_q_list.vld); return -1; } @@ -1836,6 +2181,74 @@ static void nbl_phy_setup_queue_switch(void *priv, u16 eth_id) (u8 *)&info, sizeof(info)); } +static int nbl_phy_cfg_phy_flow(void *priv, u16 vsi_id, u16 count, u8 eth_id, bool status) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_upsport_tbl upsport = {0}; + struct nbl_ipro_dn_src_port_tbl dpsport = {0}; + int i = 0; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_UP_SPORT_TABLE(eth_id), (u8 *)&upsport, sizeof(upsport)); + + upsport.phy_flow = !status; + upsport.set_dport_en = !status; + if (!status) { + upsport.entry_vld = 1; + upsport.mirror_en = 0; + upsport.car_en = 0; + } + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_UP_SPORT_TABLE(eth_id), + (u8 *)&upsport, sizeof(upsport)); + + for (i = vsi_id; i < vsi_id + count; i++) { + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(i), + (u8 *)&dpsport, sizeof(dpsport)); + + dpsport.phy_flow = !status; + dpsport.set_dport_en = !status; + if (!status) { + dpsport.entry_vld = 1; + dpsport.mirror_en = 0; + dpsport.dqueue_en = 0; + } + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(i), + (u8 *)&dpsport, sizeof(dpsport)); + } + + return 0; +} + +static int nbl_phy_cfg_eth_port_priority_replace(void *priv, u8 eth_id, bool status) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_port_pri_mdf_en_cfg pri_mdf_en_cfg = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_PORT_PRI_MDF_EN, (u8 *)(&pri_mdf_en_cfg), + sizeof(pri_mdf_en_cfg)); + switch (eth_id) { + case 0: + pri_mdf_en_cfg.eth0 = status; + break; + case 1: + pri_mdf_en_cfg.eth1 = status; + break; + case 2: + pri_mdf_en_cfg.eth2 = status; + break; + case 3: + pri_mdf_en_cfg.eth3 = status; + break; + default: + break; + } + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_PORT_PRI_MDF_EN, (u8 *)(&pri_mdf_en_cfg), + sizeof(pri_mdf_en_cfg)); + return 0; +} + static void nbl_phy_init_pfc(void *priv, u8 ether_ports) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; @@ -1898,10 +2311,10 @@ static void nbl_phy_init_pfc(void *priv, u8 ether_ports) /* downstream: enable modify packet pri */ /* epro port_pri_mdf_en */ - pri_mdf_en_cfg.eth0 = 1; - pri_mdf_en_cfg.eth1 = 1; - pri_mdf_en_cfg.eth2 = 1; - pri_mdf_en_cfg.eth3 = 1; + pri_mdf_en_cfg.eth0 = 0; + pri_mdf_en_cfg.eth1 = 0; + pri_mdf_en_cfg.eth2 = 0; + pri_mdf_en_cfg.eth3 = 0; nbl_hw_write_regs(phy_mgt, NBL_EPRO_PORT_PRI_MDF_EN, (u8 *)(&pri_mdf_en_cfg), sizeof(pri_mdf_en_cfg)); @@ -1958,24 +2371,272 @@ static void nbl_phy_init_pfc(void *priv, u8 ether_ports) } } -static void nbl_phy_enable_mailbox_irq(void *priv, u16 func_id, bool enable_msix, - u16 global_vector_id) +static void nbl_phy_configure_pfc(void *priv, u8 eth_id, u8 *pfc) { - struct nbl_mailbox_qinfo_map_table mb_qinfo_map = { 0 }; + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_dqm_rxmac_tx_port_bp_en_cfg dqm_port_bp_en = {0}; + struct nbl_dqm_rxmac_tx_cos_bp_en_cfg dqm_cos_bp_en = {0}; + struct nbl_ustore_port_fc_th ustore_port_fc_th = {0}; + struct nbl_ustore_cos_fc_th ustore_cos_fc_th = {0}; + struct nbl_epro_cos_map cos_map = {0}; + u32 enable = 0; + u32 cos_en = 0; + int i; - nbl_hw_read_regs(priv, NBL_MAILBOX_QINFO_MAP_REG_ARR(func_id), - (u8 *)&mb_qinfo_map, sizeof(mb_qinfo_map)); + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) { + if (pfc[i]) + enable = 1; + cos_en |= pfc[i] << i; + } - if (enable_msix) { - mb_qinfo_map.msix_idx = global_vector_id; - mb_qinfo_map.msix_idx_vaild = 1; - } else { - mb_qinfo_map.msix_idx = 0; - mb_qinfo_map.msix_idx_vaild = 0; + /* set rx */ + nbl_hw_read_regs(phy_mgt, NBL_DQM_RXMAC_TX_PORT_BP_EN, + (u8 *)(&dqm_port_bp_en), sizeof(dqm_port_bp_en)); + nbl_hw_read_regs(phy_mgt, NBL_DQM_RXMAC_TX_COS_BP_EN, + (u8 *)(&dqm_cos_bp_en), sizeof(dqm_cos_bp_en)); + + switch (eth_id) { + case 0: + dqm_port_bp_en.eth0 = !enable; + dqm_cos_bp_en.eth0 = cos_en; + break; + case 1: + dqm_port_bp_en.eth1 = !enable; + dqm_cos_bp_en.eth1 = cos_en; + break; + case 2: + dqm_port_bp_en.eth2 = !enable; + dqm_cos_bp_en.eth2 = cos_en; + break; + case 3: + dqm_port_bp_en.eth3 = !enable; + dqm_cos_bp_en.eth3 = cos_en; + break; + default: + return; } - nbl_hw_write_regs(priv, NBL_MAILBOX_QINFO_MAP_REG_ARR(func_id), - (u8 *)&mb_qinfo_map, sizeof(mb_qinfo_map)); + nbl_hw_write_regs(phy_mgt, NBL_DQM_RXMAC_TX_PORT_BP_EN, + (u8 *)(&dqm_port_bp_en), sizeof(dqm_port_bp_en)); + nbl_hw_write_regs(phy_mgt, NBL_DQM_RXMAC_TX_COS_BP_EN, + (u8 *)(&dqm_cos_bp_en), sizeof(dqm_cos_bp_en)); + + /* set tx */ + nbl_hw_read_regs(phy_mgt, NBL_USTORE_PORT_FC_TH_REG_ARR(eth_id), + (u8 *)(&ustore_port_fc_th), sizeof(ustore_port_fc_th)); + ustore_port_fc_th.fc_en = !enable; + nbl_hw_write_regs(phy_mgt, NBL_USTORE_PORT_FC_TH_REG_ARR(eth_id), + (u8 *)(&ustore_port_fc_th), sizeof(ustore_port_fc_th)); + + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) { + nbl_hw_read_regs(phy_mgt, NBL_USTORE_COS_FC_TH_REG_ARR(eth_id * 8 + i), + (u8 *)(&ustore_cos_fc_th), sizeof(ustore_cos_fc_th)); + ustore_cos_fc_th.fc_en = pfc[i]; + nbl_hw_write_regs(phy_mgt, NBL_USTORE_COS_FC_TH_REG_ARR(eth_id * 8 + i), + (u8 *)(&ustore_cos_fc_th), sizeof(ustore_cos_fc_th)); + + /* downstream: sch_cos->pkt_cos or sch_cos->dscp */ + /* epro sch_cos_map */ + cos_map.pkt_cos = i; + cos_map.dscp = i << 3; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_SCH_COS_MAP_TABLE(eth_id, i), + (u8 *)(&cos_map), sizeof(cos_map)); + } +} + +static void nbl_phy_configure_trust(void *priv, u8 eth_id, u8 trust, u8 *dscp2prio_map) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_upa_pri_sel_conf sel_conf = {0}; + struct nbl_upa_pri_conf conf_table = {0}; + struct nbl_epro_ept_tbl ept_tbl = {0}; + int i; + + if (trust) { /* dscp */ + nbl_hw_read_regs(phy_mgt, NBL_EPRO_EPT_TABLE(eth_id), (u8 *)&ept_tbl, + sizeof(struct nbl_epro_ept_tbl)); + ept_tbl.pfc_mode = 1; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_EPT_TABLE(eth_id), (u8 *)&ept_tbl, + sizeof(struct nbl_epro_ept_tbl)); + + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) { + conf_table.pri0 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES]; + conf_table.pri1 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES + 1]; + conf_table.pri2 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES + 2]; + conf_table.pri3 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES + 3]; + conf_table.pri4 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES + 4]; + conf_table.pri5 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES + 5]; + conf_table.pri6 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES + 6]; + conf_table.pri7 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES + 7]; + + nbl_hw_write_regs(phy_mgt, NBL_UPA_PRI_CONF_TABLE(eth_id * 8 + i), + (u8 *)(&conf_table), sizeof(conf_table)); + } + + sel_conf.pri_sel = (1 << 3); + nbl_hw_write_regs(phy_mgt, NBL_UPA_PRI_SEL_CONF_TABLE(eth_id), + (u8 *)(&sel_conf), sizeof(sel_conf)); + } else { + /* upstream: when pfc_mode is 802.1p, vlan pri -> sch_cos map table */ + /* upa pri_conf_table */ + nbl_hw_read_regs(phy_mgt, NBL_EPRO_EPT_TABLE(eth_id), (u8 *)&ept_tbl, + sizeof(struct nbl_epro_ept_tbl)); + ept_tbl.pfc_mode = 0; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_EPT_TABLE(eth_id), (u8 *)&ept_tbl, + sizeof(struct nbl_epro_ept_tbl)); + conf_table.pri0 = 0; + conf_table.pri1 = 1; + conf_table.pri2 = 2; + conf_table.pri3 = 3; + conf_table.pri4 = 4; + conf_table.pri5 = 5; + conf_table.pri6 = 6; + conf_table.pri7 = 7; + nbl_hw_write_regs(phy_mgt, NBL_UPA_PRI_CONF_TABLE(eth_id * 8), + (u8 *)(&conf_table), sizeof(conf_table)); + + /* upstream: set default pfc_mode is 802.1p, use outer vlan */ + /* upa pri_sel_conf */ + sel_conf.pri_sel = (1 << 4 | 1 << 3); + nbl_hw_write_regs(phy_mgt, NBL_UPA_PRI_SEL_CONF_TABLE(eth_id), + (u8 *)(&sel_conf), sizeof(sel_conf)); + } +} + +static void nbl_phy_configure_rdma_bw(void *priv, u8 eth_id, int rdma_bw) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_shaping_dport dport = {0}; + struct nbl_shaping_dvn_dport dvn_dport = {0}; + struct nbl_shaping_rdma_dport rdma_dport = {0}; + u32 rate, rdma_rate, dvn_rate; + + nbl_hw_read_regs(phy_mgt, NBL_SHAPING_DPORT_REG(eth_id), (u8 *)&dport, sizeof(dport)); + + rate = dport.cir; + rdma_rate = rate * rdma_bw / 100; + dvn_rate = rate - rdma_rate; + + nbl_hw_read_regs(phy_mgt, NBL_SHAPING_DVN_DPORT_REG(eth_id), + (u8 *)&dvn_dport, sizeof(dvn_dport)); + dvn_dport.cir = dvn_rate; + dvn_dport.pir = rate; + dvn_dport.depth = dport.depth; + dvn_dport.cbs = dvn_dport.depth; + dvn_dport.pbs = dvn_dport.depth; + dvn_dport.valid = 1; + + nbl_hw_read_regs(phy_mgt, NBL_SHAPING_RDMA_DPORT_REG(eth_id), + (u8 *)&rdma_dport, sizeof(rdma_dport)); + rdma_dport.cir = rdma_rate; + rdma_dport.pir = rate; + rdma_dport.depth = dport.depth; + rdma_dport.cbs = rdma_dport.depth; + rdma_dport.pbs = rdma_dport.depth; + rdma_dport.valid = 1; + + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DVN_DPORT_REG(eth_id), + (u8 *)&dvn_dport, sizeof(dvn_dport)); + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_RDMA_DPORT_REG(eth_id), + (u8 *)&rdma_dport, sizeof(rdma_dport)); +} + +static void nbl_phy_configure_qos(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map) +{ + nbl_phy_configure_pfc(priv, eth_id, pfc); + nbl_phy_configure_trust(priv, eth_id, trust, dscp2prio_map); +} + +static int nbl_phy_set_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int xoff, int xon) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ustore_cos_fc_th ustore_cos_fc_th = {0}; + + if (xoff > NBL_MAX_USTORE_COS_FC_TH || xon > NBL_MAX_USTORE_COS_FC_TH || + xoff <= 0 || xon <= 0) + return -EINVAL; + + nbl_hw_read_regs(phy_mgt, NBL_USTORE_COS_FC_TH_REG_ARR(eth_id * 8 + prio), + (u8 *)(&ustore_cos_fc_th), sizeof(ustore_cos_fc_th)); + ustore_cos_fc_th.xoff_th = xoff; + ustore_cos_fc_th.xon_th = xon; + nbl_hw_write_regs(phy_mgt, NBL_USTORE_COS_FC_TH_REG_ARR(eth_id * 8 + prio), + (u8 *)(&ustore_cos_fc_th), sizeof(ustore_cos_fc_th)); + + return 0; +} + +static void nbl_phy_get_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ustore_cos_fc_th ustore_cos_fc_th = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_USTORE_COS_FC_TH_REG_ARR(eth_id * 8 + prio), + (u8 *)(&ustore_cos_fc_th), sizeof(ustore_cos_fc_th)); + *xoff = ustore_cos_fc_th.xoff_th; + *xon = ustore_cos_fc_th.xon_th; +} + +static void nbl_phy_set_rate_limit(void *priv, u16 func_id, enum nbl_traffic_type type, u32 rate) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_shaping_net net_shaping = {0}; + struct dsch_rdma_net2sha_map_tbl rdma_net2sha_map = {0}; + struct dsch_rdma_sha2net_map_tbl rdma_sha2net_map = {0}; + struct dsch_vn_sha2net_map_tbl sha2net = {0}; + struct dsch_vn_net2sha_map_tbl net2sha = {0}; + u64 addr; + + if (type == NBL_TRAFFIC_RDMA_TYPE) { + nbl_hw_read_regs(phy_mgt, NBL_DSCH_RDMA_NET2SHA_MAP_TBL_REG(func_id), + (u8 *)&rdma_net2sha_map, sizeof(rdma_net2sha_map)); + rdma_sha2net_map.rdma_vf_id = func_id; /* only pf */ + rdma_sha2net_map.vld = 1; + nbl_hw_read_regs(phy_mgt, NBL_DSCH_RDMA_SHA2NET_MAP_TBL_REG(func_id), + (u8 *)&rdma_sha2net_map, sizeof(rdma_sha2net_map)); + if (rdma_net2sha_map.vld) + addr = NBL_SHAPING_NET_REG(rdma_net2sha_map.net_shaping_id); + else + addr = NBL_SHAPING_NET_REG(func_id + NBL_NET_SHAPING_RDMA_BASE_ID); + } else { + sha2net.vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_SHA2NET_MAP_TABLE_REG_ARR(func_id), + (u8 *)&sha2net, sizeof(sha2net)); + + net2sha.vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_NET2SHA_MAP_TABLE_REG_ARR(func_id), + (u8 *)&net2sha, sizeof(net2sha)); + addr = NBL_SHAPING_NET_REG(func_id); + } + + net_shaping.cir = rate; + net_shaping.pir = rate; + net_shaping.depth = max(net_shaping.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH); + net_shaping.cbs = net_shaping.depth; + net_shaping.pbs = net_shaping.depth; + net_shaping.valid = 1; + + nbl_hw_write_regs(phy_mgt, addr, (u8 *)&net_shaping, sizeof(net_shaping)); +} + +static void nbl_phy_enable_mailbox_irq(void *priv, u16 func_id, bool enable_msix, + u16 global_vector_id) +{ + struct nbl_mailbox_qinfo_map_table mb_qinfo_map = { 0 }; + + nbl_hw_read_regs(priv, NBL_MAILBOX_QINFO_MAP_REG_ARR(func_id), + (u8 *)&mb_qinfo_map, sizeof(mb_qinfo_map)); + + if (enable_msix) { + mb_qinfo_map.msix_idx = global_vector_id; + mb_qinfo_map.msix_idx_valid = 1; + } else { + mb_qinfo_map.msix_idx = 0; + mb_qinfo_map.msix_idx_valid = 0; + } + + nbl_hw_write_regs(priv, NBL_MAILBOX_QINFO_MAP_REG_ARR(func_id), + (u8 *)&mb_qinfo_map, sizeof(mb_qinfo_map)); } static void nbl_abnormal_intr_init(struct nbl_phy_mgt *phy_mgt) @@ -2020,12 +2681,19 @@ static void nbl_phy_enable_abnormal_irq(void *priv, bool enable_msix, struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; struct nbl_abnormal_msix_vector abnormal_msix_vetcor = { 0 }; u32 abnormal_timeout = 0x927C0; /* 600000, 1ms */ + u32 quirks; if (enable_msix) { abnormal_msix_vetcor.idx = global_vector_id; abnormal_msix_vetcor.vld = 1; } + quirks = nbl_phy_get_quirks(phy_mgt); + + if (performance_mode & BIT(NBL_QUIRKS_NO_TOE) || + !(quirks & BIT(NBL_QUIRKS_NO_TOE))) + abnormal_timeout = 0x3938700; /* 1s */ + nbl_hw_write_regs(phy_mgt, NBL_PADPT_ABNORMAL_TIMEOUT, (u8 *)&abnormal_timeout, sizeof(abnormal_timeout)); @@ -2102,15 +2770,13 @@ static void nbl_phy_configure_msix_info(void *priv, u16 func_id, bool valid, u16 static void nbl_phy_update_mailbox_queue_tail_ptr(void *priv, u16 tail_ptr, u8 txrx) { - struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; - /* local_qid 0 and 1 denote rx and tx queue respectively */ u32 local_qid = txrx; u32 value = ((u32)tail_ptr << 16) | local_qid; /* wmb for doorbell */ wmb(); - writel(value, phy_mgt->mailbox_bar_hw_addr + NBL_MAILBOX_NOTIFY_ADDR); + nbl_mbx_wr32(priv, NBL_MAILBOX_NOTIFY_ADDR, value); } static void nbl_phy_config_mailbox_rxq(void *priv, dma_addr_t dma_addr, int size_bwid) @@ -2195,7 +2861,7 @@ static u32 nbl_phy_get_host_pf_mask(void *priv) return data; } -static u32 nbl_phy_get_host_pf_fid(void *priv, u8 func_id) +static u32 nbl_phy_get_host_pf_fid(void *priv, u16 func_id) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; u32 data; @@ -2204,6 +2870,57 @@ static u32 nbl_phy_get_host_pf_fid(void *priv, u8 func_id) return data; } +static u32 nbl_phy_get_real_bus(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 data; + + data = nbl_hw_rd32(phy_mgt, NBL_PCIE_HOST_TL_CFG_BUSDEV); + return data >> 5; +} + +static u64 nbl_phy_get_pf_bar_addr(void *priv, u16 func_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u64 addr; + u32 val; + u32 selector; + + selector = NBL_LB_PF_CONFIGSPACE_SELECT_OFFSET + + func_id * NBL_LB_PF_CONFIGSPACE_SELECT_STRIDE; + nbl_hw_wr32(phy_mgt, NBL_LB_PCIEX16_TOP_AHB, selector); + + val = nbl_hw_rd32(phy_mgt, NBL_LB_PF_CONFIGSPACE_BASE_ADDR + PCI_BASE_ADDRESS_0); + addr = (u64)(val & PCI_BASE_ADDRESS_MEM_MASK); + + val = nbl_hw_rd32(phy_mgt, NBL_LB_PF_CONFIGSPACE_BASE_ADDR + PCI_BASE_ADDRESS_0 + 4); + addr |= ((u64)val << 32); + + return addr; +} + +static u64 nbl_phy_get_vf_bar_addr(void *priv, u16 func_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u64 addr; + u32 val; + u32 selector; + + selector = NBL_LB_PF_CONFIGSPACE_SELECT_OFFSET + + func_id * NBL_LB_PF_CONFIGSPACE_SELECT_STRIDE; + nbl_hw_wr32(phy_mgt, NBL_LB_PCIEX16_TOP_AHB, selector); + + val = nbl_hw_rd32(phy_mgt, NBL_LB_PF_CONFIGSPACE_BASE_ADDR + + NBL_SRIOV_CAPS_OFFSET + PCI_SRIOV_BAR); + addr = (u64)(val & PCI_BASE_ADDRESS_MEM_MASK); + + val = nbl_hw_rd32(phy_mgt, NBL_LB_PF_CONFIGSPACE_BASE_ADDR + + NBL_SRIOV_CAPS_OFFSET + PCI_SRIOV_BAR + 4); + addr |= ((u64)val << 32); + + return addr; +} + static void nbl_phy_cfg_mailbox_qinfo(void *priv, u16 func_id, u16 bus, u16 devid, u16 function) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; @@ -2213,7 +2930,7 @@ static void nbl_phy_cfg_mailbox_qinfo(void *priv, u16 func_id, u16 bus, u16 devi mb_qinfo_map.function = function; mb_qinfo_map.devid = devid; mb_qinfo_map.bus = bus; - mb_qinfo_map.msix_idx_vaild = 0; + mb_qinfo_map.msix_idx_valid = 0; nbl_hw_write_regs(phy_mgt, NBL_MAILBOX_QINFO_MAP_REG_ARR(func_id), (u8 *)&mb_qinfo_map, sizeof(mb_qinfo_map)); } @@ -2296,6 +3013,20 @@ static int nbl_phy_set_spoof_check_addr(void *priv, u16 vsi_id, u8 *mac) return 0; } +static int nbl_phy_set_vsi_mtu(void *priv, u16 vsi_id, u16 mtu_sel) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_dn_src_port_tbl dpsport = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + dpsport.mtu_sel = mtu_sel; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + + return 0; +} + static int nbl_phy_set_spoof_check_enable(void *priv, u16 vsi_id, u8 enable) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; @@ -2382,16 +3113,16 @@ static void nbl_phy_enable_adminq_irq(void *priv, bool enable_msix, u16 global_v struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); struct nbl_adminq_qinfo_map_table adminq_qinfo_map = { 0 }; - adminq_qinfo_map.bus = common->bus; + adminq_qinfo_map.bus = common->hw_bus; adminq_qinfo_map.devid = common->devid; adminq_qinfo_map.function = NBL_COMMON_TO_PCI_FUNC_ID(common); if (enable_msix) { adminq_qinfo_map.msix_idx = global_vector_id; - adminq_qinfo_map.msix_idx_vaild = 1; + adminq_qinfo_map.msix_idx_valid = 1; } else { adminq_qinfo_map.msix_idx = 0; - adminq_qinfo_map.msix_idx_vaild = 0; + adminq_qinfo_map.msix_idx_valid = 0; } nbl_hw_write_mbx_regs(priv, NBL_ADMINQ_MSIX_MAP_TABLE_ADDR, @@ -2400,15 +3131,13 @@ static void nbl_phy_enable_adminq_irq(void *priv, bool enable_msix, u16 global_v static void nbl_phy_update_adminq_queue_tail_ptr(void *priv, u16 tail_ptr, u8 txrx) { - struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; - /* local_qid 0 and 1 denote rx and tx queue respectively */ u32 local_qid = txrx; u32 value = ((u32)tail_ptr << 16) | local_qid; /* wmb for doorbell */ wmb(); - writel(value, phy_mgt->mailbox_bar_hw_addr + NBL_ADMINQ_NOTIFY_ADDR); + nbl_mbx_wr32(priv, NBL_ADMINQ_NOTIFY_ADDR, value); } static u16 nbl_phy_get_adminq_rx_tail_ptr(void *priv) @@ -2447,236 +3176,3979 @@ static u8 __iomem *nbl_phy_get_hw_addr(void *priv, size_t *size) return phy_mgt->hw_addr; } -static unsigned long nbl_phy_get_fw_ping(void *priv) +static void nbl_phy_cfg_ktls_tx_keymat(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; - unsigned long ping; + struct nbl_ktls_keymat keymat; + u8 salt_len = 4; + int i; - nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_HEARTBEAT_PING, (u8 *)&ping, sizeof(ping)); + memset(&keymat, 0, sizeof(keymat)); - return ping; + keymat.ena = 1; + keymat.mode = mode; + + for (i = 0; i < salt_len; i++) + keymat.salt[salt_len - 1 - i] = salt[i]; + + for (i = 0; i < key_len; i++) + keymat.key[key_len - 1 - i] = key[i]; + + nbl_hw_write_regs(phy_mgt, NBL_DL4S_KEY_SALT(index), (u8 *)&keymat, sizeof(keymat)); } -static void nbl_phy_set_fw_ping(void *priv, unsigned long ping) +static void nbl_phy_cfg_ktls_rx_keymat(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ktls_keymat keymat; + u8 salt_len = 4; + int i; - nbl_hw_write_mbx_regs(phy_mgt, NBL_FW_HEARTBEAT_PING, (u8 *)&ping, sizeof(ping)); + memset(&keymat, 0, sizeof(keymat)); + + keymat.ena = 1; + keymat.mode = mode; + + for (i = 0; i < salt_len; i++) + keymat.salt[salt_len - 1 - i] = salt[i]; + + for (i = 0; i < key_len; i++) + keymat.key[key_len - 1 - i] = key[i]; + + nbl_hw_write_regs(phy_mgt, NBL_UL4S_KEY_SALT(index), (u8 *)&keymat, sizeof(keymat)); } -static unsigned long nbl_phy_get_fw_pong(void *priv) +static void nbl_phy_cfg_ktls_rx_record(void *priv, u32 index, u32 tcp_sn, u64 rec_num, bool init) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; - unsigned long pong; + union nbl_ktls_sync_trig sync_trig = {0}; - nbl_hw_read_regs(phy_mgt, NBL_FW_HEARTBEAT_PONG, (u8 *)&pong, sizeof(pong)); + if (init) { + sync_trig.trig = 0; + sync_trig.init_sync = 0; + } else { + sync_trig.trig = 0; + sync_trig.init_sync = 1; + } + nbl_hw_wr32(phy_mgt, NBL_UL4S_SYNC_TRIG, sync_trig.data); - return pong; + nbl_hw_wr32(phy_mgt, NBL_UL4S_SYNC_SID, index); + nbl_hw_wr32(phy_mgt, NBL_UL4S_SYNC_TCP_SN, tcp_sn); + nbl_hw_write_regs(phy_mgt, NBL_UL4S_SYNC_REC_NUM, (u8 *)&rec_num, sizeof(u64)); + + if (init) { + sync_trig.trig = 1; + sync_trig.init_sync = 0; + } else { + sync_trig.trig = 1; + sync_trig.init_sync = 1; + } + nbl_hw_wr32(phy_mgt, NBL_UL4S_SYNC_TRIG, sync_trig.data); } -static void nbl_phy_set_fw_pong(void *priv, unsigned long pong) +static void nbl_phy_cfg_dipsec_nat(void *priv, u16 sport) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_dprbac_nat dprbac_nat = {.data = 0}; - nbl_hw_write_regs(phy_mgt, NBL_FW_HEARTBEAT_PONG, (u8 *)&pong, sizeof(pong)); + dprbac_nat.sport = sport; + nbl_hw_wr32(phy_mgt, NBL_DPRBAC_NAT, dprbac_nat.data); } -static const u32 nbl_phy_reg_dump_list[] = { - NBL_TOP_CTRL_VERSION_INFO, - NBL_TOP_CTRL_VERSION_DATE, -}; - -static void nbl_phy_get_reg_dump(void *priv, u32 *data, u32 len) +static void nbl_phy_cfg_dipsec_sad_iv(void *priv, u32 index, u64 iv) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; - int i; + struct nbl_dprbac_sad_iv ipsec_iv = {0}; - for (i = 0; i < ARRAY_SIZE(nbl_phy_reg_dump_list) && i < len; i++) - nbl_hw_read_regs(phy_mgt, nbl_phy_reg_dump_list[i], - (u8 *)&data[i], sizeof(data[i])); + ipsec_iv.iv = iv; + nbl_hw_write_regs(phy_mgt, NBL_DPRBAC_SAD_IV(index), (u8 *)&ipsec_iv, sizeof(ipsec_iv)); } -static int nbl_phy_get_reg_dump_len(void *priv) +static void nbl_phy_cfg_dipsec_sad_esn(void *priv, u32 index, u32 sn, + u32 esn, u8 wrap_en, u8 enable) { - return ARRAY_SIZE(nbl_phy_reg_dump_list) * sizeof(u32); + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_dprbac_sad_esn ipsec_esn = {0}; + + ipsec_esn.sn = sn; + ipsec_esn.esn = esn; + ipsec_esn.wrap_en = wrap_en; + ipsec_esn.enable = enable; + nbl_hw_write_regs(phy_mgt, NBL_DPRBAC_SAD_ESN(index), (u8 *)&ipsec_esn, sizeof(ipsec_esn)); } -static u32 nbl_phy_get_chip_temperature(void *priv) +static void nbl_phy_cfg_dipsec_sad_lifetime(void *priv, u32 index, u32 lft_cnt, + u32 lft_diff, u8 limit_enable, u8 limit_type) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_dprbac_sad_lifetime lifetime = {0}; - return nbl_hw_rd32(phy_mgt, NBL_TOP_CTRL_TVSENSOR0); + lifetime.cnt = lft_cnt; + lifetime.diff = lft_diff; + lifetime.enable = limit_enable; + lifetime.unit = limit_type; + nbl_hw_write_regs(phy_mgt, NBL_DPRBAC_SAD_LIFETIME(index), + (u8 *)&lifetime, sizeof(lifetime)); } -static int nbl_phy_process_abnormal_queue(struct nbl_phy_mgt *phy_mgt, u16 queue_id, int type, - struct nbl_abnormal_details *detail) +static void nbl_phy_cfg_dipsec_sad_crypto(void *priv, u32 index, u32 *key, u32 salt, + u32 crypto_type, u8 tunnel_mode, u8 icv_len) { - struct nbl_ipro_queue_tbl ipro_queue_tbl = {0}; - struct nbl_host_vnet_qinfo host_vnet_qinfo = {0}; - u32 qinfo_id = type == NBL_ABNORMAL_EVENT_DVN ? NBL_PAIR_ID_GET_TX(queue_id) : - NBL_PAIR_ID_GET_RX(queue_id); + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_dprbac_sad_crypto_info crypto_info; - if (type >= NBL_ABNORMAL_EVENT_MAX) - return -EINVAL; + memset(&crypto_info, 0, sizeof(crypto_info)); - nbl_hw_read_regs(phy_mgt, NBL_IPRO_QUEUE_TBL(queue_id), - (u8 *)&ipro_queue_tbl, sizeof(ipro_queue_tbl)); + memcpy(crypto_info.key, key, sizeof(crypto_info.key)); + crypto_info.salt = salt; + crypto_info.crypto_type = crypto_type; + crypto_info.tunnel_mode = tunnel_mode; + crypto_info.icv_len = icv_len; + nbl_hw_write_regs(phy_mgt, NBL_DPRBAC_SAD_CRYPTO_INFO(index), + (u8 *)&crypto_info, sizeof(crypto_info)); +} - detail->abnormal = true; - detail->qid = queue_id; - detail->vsi_id = ipro_queue_tbl.vsi_id; +static void nbl_phy_cfg_dipsec_sad_encap(void *priv, u32 index, u8 nat_flag, + u16 dport, u32 spi, u32 *ip_data) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_dprbac_sad_encap_info encap_info; - nbl_hw_read_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(qinfo_id), - (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); - host_vnet_qinfo.valid = 1; - nbl_hw_write_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(qinfo_id), - (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); + memset(&encap_info, 0, sizeof(encap_info)); - return 0; + encap_info.nat_flag = nat_flag; + encap_info.dport = dport; + encap_info.spi = spi; + memcpy(encap_info.dip_addr, ip_data, 16); + memcpy(encap_info.sip_addr, ip_data + 4, 16); + nbl_hw_write_regs(phy_mgt, NBL_DPRBAC_SAD_ENCAP_INFO(index), + (u8 *)&encap_info, sizeof(encap_info)); } -static int nbl_phy_process_abnormal_event(void *priv, struct nbl_abnormal_event_info *abnomal_info) +static u32 nbl_phy_read_dipsec_status(void *priv) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; - struct device *dev = NBL_PHY_MGT_TO_DEV(phy_mgt); - struct dvn_desc_dif_err_info desc_dif_err_info = {0}; - struct dvn_pkt_dif_err_info pkt_dif_err_info = {0}; - struct dvn_err_queue_id_get err_queue_id_get = {0}; - struct uvn_queue_err_info queue_err_info = {0}; - struct nbl_abnormal_details *detail; - u32 int_status = 0, rdma_other_abn = 0, tlp_out_drop_cnt = 0; - u32 desc_dif_err_cnt = 0, pkt_dif_err_cnt = 0; - u32 queue_err_cnt; - int ret = 0; - nbl_hw_read_regs(phy_mgt, NBL_DVN_INT_STATUS, (u8 *)&int_status, sizeof(u32)); - if (int_status) { - if (int_status & BIT(NBL_DVN_INT_DESC_DIF_ERR)) { - nbl_hw_read_regs(phy_mgt, NBL_DVN_DESC_DIF_ERR_CNT, - (u8 *)&desc_dif_err_cnt, sizeof(u32)); - nbl_hw_read_regs(phy_mgt, NBL_DVN_DESC_DIF_ERR_INFO, - (u8 *)&desc_dif_err_info, - sizeof(struct dvn_desc_dif_err_info)); - dev_info(dev, "dvn int_status:0x%x, desc_dif_mf_cnt:%d, queue_id:%d\n", - int_status, desc_dif_err_cnt, desc_dif_err_info.queue_id); - detail = &abnomal_info->details[NBL_ABNORMAL_EVENT_DVN]; - nbl_phy_process_abnormal_queue(phy_mgt, desc_dif_err_info.queue_id, - NBL_ABNORMAL_EVENT_DVN, detail); + return nbl_hw_rd32(phy_mgt, NBL_DPRBAC_INT_STATUS); +} - ret |= BIT(NBL_ABNORMAL_EVENT_DVN); - } +static u32 nbl_phy_reset_dipsec_status(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 dipsec_status; - if (int_status & BIT(NBL_DVN_INT_PKT_DIF_ERR)) { - nbl_hw_read_regs(phy_mgt, NBL_DVN_PKT_DIF_ERR_CNT, - (u8 *)&pkt_dif_err_cnt, sizeof(u32)); - nbl_hw_read_regs(phy_mgt, NBL_DVN_PKT_DIF_ERR_INFO, - (u8 *)&pkt_dif_err_info, - sizeof(struct dvn_pkt_dif_err_info)); - dev_info(dev, "dvn int_status:0x%x, pkt_dif_mf_cnt:%d, queue_id:%d\n", - int_status, pkt_dif_err_cnt, pkt_dif_err_info.queue_id); - } + dipsec_status = nbl_hw_rd32(phy_mgt, NBL_DPRBAC_INT_STATUS); + nbl_hw_wr32(phy_mgt, NBL_DPRBAC_INT_STATUS, dipsec_status); - /* clear dvn abnormal irq */ - nbl_hw_write_regs(phy_mgt, NBL_DVN_INT_STATUS, - (u8 *)&int_status, sizeof(int_status)); + return dipsec_status; +} - /* enable new queue error irq */ - err_queue_id_get.desc_flag = 1; - err_queue_id_get.pkt_flag = 1; - nbl_hw_write_regs(phy_mgt, NBL_DVN_ERR_QUEUE_ID_GET, - (u8 *)&err_queue_id_get, sizeof(err_queue_id_get)); +static u32 nbl_phy_read_dipsec_lft_info(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + return nbl_hw_rd32(phy_mgt, NBL_DPRBAC_LIFETIME_INFO); +} + +static void nbl_phy_cfg_dipsec_lft_info(void *priv, u32 index, u32 lifetime_diff, + u32 flag_wen, u32 msb_wen) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_ipsec_lifetime_diff modify_liftime; + + memset(&modify_liftime, 0, sizeof(modify_liftime)); + + modify_liftime.sad_index = index; + if (flag_wen) { + modify_liftime.lifetime_diff = lifetime_diff; + nbl_hw_wr32(phy_mgt, NBL_DPRBAC_LIFETIME_DIFF, modify_liftime.data[1]); + modify_liftime.flag_wen = 1; + modify_liftime.flag_value = 1; + } + + if (msb_wen) { + modify_liftime.msb_wen = 1; + modify_liftime.msb_value = 1; + } + nbl_hw_wr32(phy_mgt, NBL_DPRBAC_SAD_LIFEDIFF, modify_liftime.data[0]); +} + +static void nbl_phy_init_dprbac(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_dprbac_enable dprbac_enable = {.data = 0}; + union nbl_dprbac_dbg_cnt_en dbg_cnt_en = {.data = 0}; + + dprbac_enable.prbac = 1; + dprbac_enable.mf_fwd = 1; + nbl_hw_wr32(phy_mgt, NBL_DPRBAC_ENABLE, dprbac_enable.data); + + dbg_cnt_en.total = 1; + dbg_cnt_en.in_right_bypass = 1; + dbg_cnt_en.in_drop_bypass = 1; + dbg_cnt_en.in_drop_prbac = 1; + dbg_cnt_en.out_drop_prbac = 1; + dbg_cnt_en.out_right_prbac = 1; + nbl_hw_wr32(phy_mgt, NBL_DPRBAC_DBG_CNT_EN, dbg_cnt_en.data); +} + +static void nbl_phy_cfg_uipsec_nat(void *priv, u8 nat_flag, u16 dport) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_uprbac_nat uprbac_nat = {.data = 0}; + + uprbac_nat.enable = nat_flag; + uprbac_nat.dport = dport; + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_NAT, uprbac_nat.data); +} + +static void nbl_phy_cfg_uipsec_sad_esn(void *priv, u32 index, u32 sn, + u32 esn, u8 overlap, u8 enable) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_uprbac_sad_bottom ipsec_esn = {0}; + + ipsec_esn.sn = sn; + ipsec_esn.esn = esn; + ipsec_esn.overlap = overlap; + ipsec_esn.enable = enable; + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_SAD_BOTTOM(index), + (u8 *)&ipsec_esn, sizeof(ipsec_esn)); +} + +static void nbl_phy_cfg_uipsec_sad_lifetime(void *priv, u32 index, u32 lft_cnt, + u32 lft_diff, u8 limit_enable, u8 limit_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_uprbac_sad_lifetime lifetime = {0}; + + lifetime.cnt = lft_cnt; + lifetime.diff = lft_diff; + lifetime.enable = limit_enable; + lifetime.unit = limit_type; + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_SAD_LIFETIME(index), + (u8 *)&lifetime, sizeof(lifetime)); +} + +static void nbl_phy_cfg_uipsec_sad_crypto(void *priv, u32 index, u32 *key, u32 salt, + u32 crypto_type, u8 tunnel_mode, u8 icv_len) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_uprbac_sad_crypto_info crypto_info; + + memset(&crypto_info, 0, sizeof(crypto_info)); + + memcpy(crypto_info.key, key, sizeof(crypto_info.key)); + crypto_info.salt = salt; + crypto_info.crypto_type = crypto_type; + crypto_info.tunnel_mode = tunnel_mode; + crypto_info.icv_len = icv_len; + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_SAD_CRYPTO_INFO(index), + (u8 *)&crypto_info, sizeof(crypto_info)); +} + +static void nbl_phy_cfg_uipsec_sad_window(void *priv, u32 index, u8 window_en, u8 option) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_uprbac_sad_slide_window slide_window; + + memset(&slide_window, 0, sizeof(slide_window)); + slide_window.enable = window_en; + slide_window.option = option; + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_SAD_SLIDE_WINDOW(index), + (u8 *)&slide_window, sizeof(slide_window)); +} + +static void nbl_phy_cfg_uipsec_em_tcam(void *priv, u16 tcam_index, u32 *data) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_uprbac_em_tcam em_tcam = {0}; + + em_tcam.key_dat0 = data[0]; + em_tcam.key_dat1 = data[1]; + em_tcam.key_dat2 = data[2] >> 16; + em_tcam.key_vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_EM_TCAM(2 * tcam_index + 1), + (u8 *)&em_tcam, sizeof(em_tcam)); + + em_tcam.key_dat0 = (data[2] << 16) + (data[3] >> 16); + em_tcam.key_dat1 = (data[3] << 16) + (data[4] >> 16); + em_tcam.key_dat2 = data[4]; + em_tcam.key_vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_EM_TCAM(2 * tcam_index), + (u8 *)&em_tcam, sizeof(em_tcam)); +} + +static void nbl_phy_cfg_uipsec_em_ad(void *priv, u16 tcam_index, u32 index) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_uprbac_em_ad em_ad = {0}; + + em_ad.sad_index = index; + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_EM_AD(2 * tcam_index), em_ad.data); +} + +static void nbl_phy_clear_uipsec_tcam_ad(void *priv, u16 tcam_index) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_uprbac_em_tcam em_tcam = {0}; + union nbl_uprbac_em_ad em_ad = {0}; + + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_EM_TCAM(2 * tcam_index + 1), + (u8 *)&em_tcam, sizeof(em_tcam)); + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_EM_TCAM(2 * tcam_index), + (u8 *)&em_tcam, sizeof(em_tcam)); + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_EM_AD(2 * tcam_index), em_ad.data); +} + +static void nbl_phy_cfg_uipsec_em_ht(void *priv, u32 index, u16 ht_table, u16 ht_index, + u16 ht_other_index, u16 ht_bucket) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_uprbac_ht uprbac_ht; + + memset(&uprbac_ht, 0, sizeof(uprbac_ht)); + + nbl_hw_read_regs(phy_mgt, NBL_UPRBAC_HT(ht_table, ht_index), uprbac_ht.data, 16); + if (ht_bucket == 0) { + uprbac_ht.vld0 = 1; + uprbac_ht.ht_other_index0 = ht_other_index; + uprbac_ht.kt_index0 = index; + } + if (ht_bucket == 1) { + uprbac_ht.vld1 = 1; + uprbac_ht.ht_other_index1 = ht_other_index; + uprbac_ht.kt_index1 = index; + } + if (ht_bucket == 2) { + uprbac_ht.vld2 = 1; + uprbac_ht.ht_other_index2 = ht_other_index; + uprbac_ht.kt_index2 = index; + } + if (ht_bucket == 3) { + uprbac_ht.vld3 = 1; + uprbac_ht.ht_other_index3 = ht_other_index; + uprbac_ht.kt_index3 = index; + } + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_HT(ht_table, ht_index), uprbac_ht.data, 16); +} + +static void nbl_phy_cfg_uipsec_em_kt(void *priv, u32 index, u32 *data) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_uprbac_kt uprbac_kt; + + memset(&uprbac_kt, 0, sizeof(uprbac_kt)); + memcpy(uprbac_kt.key, data, 20); + uprbac_kt.sad_index = index; + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_KT(index), (u8 *)&uprbac_kt, sizeof(uprbac_kt)); +} + +static void nbl_phy_clear_uipsec_ht_kt(void *priv, u32 index, u16 ht_table, + u16 ht_index, u16 ht_bucket) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_uprbac_ht uprbac_ht; + struct nbl_uprbac_kt uprbac_kt; + + memset(&uprbac_ht, 0, sizeof(uprbac_ht)); + memset(&uprbac_kt, 0, sizeof(uprbac_kt)); + nbl_hw_read_regs(phy_mgt, NBL_UPRBAC_HT(ht_table, ht_index), uprbac_ht.data, 16); + if (ht_bucket == 0) { + uprbac_ht.vld0 = 0; + uprbac_ht.ht_other_index0 = 0; + uprbac_ht.kt_index0 = 0; + } + if (ht_bucket == 1) { + uprbac_ht.vld1 = 0; + uprbac_ht.ht_other_index1 = 0; + uprbac_ht.kt_index1 = 0; + } + if (ht_bucket == 2) { + uprbac_ht.vld2 = 0; + uprbac_ht.ht_other_index2 = 0; + uprbac_ht.kt_index2 = 0; + } + if (ht_bucket == 3) { + uprbac_ht.vld3 = 0; + uprbac_ht.ht_other_index3 = 0; + uprbac_ht.kt_index3 = 0; + } + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_HT(ht_table, ht_index), uprbac_ht.data, 16); + + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_KT(index), (u8 *)&uprbac_kt, sizeof(uprbac_kt)); +} + +static u32 nbl_phy_read_uipsec_status(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + return nbl_hw_rd32(phy_mgt, NBL_UPRBAC_INT_STATUS); +} + +static u32 nbl_phy_reset_uipsec_status(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 uipsec_status; + + uipsec_status = nbl_hw_rd32(phy_mgt, NBL_UPRBAC_INT_STATUS); + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_INT_STATUS, uipsec_status); + + return uipsec_status; +} + +static void nbl_phy_cfg_uipsec_lft_info(void *priv, u32 index, u32 lifetime_diff, + u32 flag_wen, u32 msb_wen) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_ipsec_lifetime_diff modify_liftime; + + memset(&modify_liftime, 0, sizeof(modify_liftime)); + + modify_liftime.sad_index = index; + if (flag_wen) { + modify_liftime.lifetime_diff = lifetime_diff; + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_LIFETIME_DIFF, modify_liftime.data[1]); + modify_liftime.flag_wen = 1; + modify_liftime.flag_value = 1; + } + + if (msb_wen) { + modify_liftime.msb_wen = 1; + modify_liftime.msb_value = 1; + } + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_SAD_LIFEDIFF, modify_liftime.data[0]); +} + +static u32 nbl_phy_read_uipsec_lft_info(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + return nbl_hw_rd32(phy_mgt, NBL_UPRBAC_LIFETIME_INFO); +} + +static void nbl_phy_init_uprbac(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_uprbac_enable uprbac_enable = {0}; + union nbl_uprbac_dbg_cnt_en dbg_cnt_en = {0}; + struct nbl_uprbac_em_profile em_profile = {0}; + + uprbac_enable.prbac = 1; + uprbac_enable.padding_check = 1; + uprbac_enable.pad_err = 1; + uprbac_enable.icv_err = 1; + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_ENABLE, uprbac_enable.data); + + dbg_cnt_en.drop_prbac = 1; + dbg_cnt_en.right_prbac = 1; + dbg_cnt_en.replay = 1; + dbg_cnt_en.right_misc = 1; + dbg_cnt_en.error_misc = 1; + dbg_cnt_en.xoff_drop = 1; + dbg_cnt_en.intf_cell = 1; + dbg_cnt_en.sad_miss = 1; + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_DBG_CNT_EN, dbg_cnt_en.data); + + em_profile.vld = 1; + em_profile.hash_sel0 = 0; + em_profile.hash_sel1 = 3; + nbl_hw_write_regs(phy_mgt, LEONIS_UPRBAC_EM_PROFILE, + (u8 *)&em_profile, sizeof(em_profile)); +} + +static u32 nbl_phy_get_fw_ping(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + unsigned long ping; + + nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_HEARTBEAT_PING, (u8 *)&ping, sizeof(ping)); + + return ping; +} + +static void nbl_phy_set_fw_ping(void *priv, u32 ping) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_hw_write_mbx_regs(phy_mgt, NBL_FW_HEARTBEAT_PING, (u8 *)&ping, sizeof(ping)); +} + +static u32 nbl_phy_get_fw_pong(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 pong; + + nbl_hw_read_regs(phy_mgt, NBL_FW_HEARTBEAT_PONG, (u8 *)&pong, sizeof(pong)); + + return pong; +} + +static void nbl_phy_set_fw_pong(void *priv, u32 pong) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_hw_write_regs(phy_mgt, NBL_FW_HEARTBEAT_PONG, (u8 *)&pong, sizeof(pong)); +} + +static void nbl_phy_load_p4(void *priv, u32 addr, u32 size, u8 *data) +{ + nbl_hw_write_be_regs(priv, addr, data, size); +} + +static void nbl_phy_ipro_chksum_err_ctrl(void *priv, u8 status) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union ipro_errcode_tbl_u errcode; + u8 index = NBL_ERROR_CODE_L3_CHKSUM; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_ERRCODE_TBL_REG(index), + (u8 *)errcode.data, sizeof(errcode)); + errcode.info.vld = status; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_ERRCODE_TBL_REG(index), + (u8 *)errcode.data, sizeof(errcode)); + + index = NBL_ERROR_CODE_L4_CHKSUM; + nbl_hw_read_regs(phy_mgt, NBL_IPRO_ERRCODE_TBL_REG(index), + (u8 *)errcode.data, sizeof(errcode)); + errcode.info.vld = status; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_ERRCODE_TBL_REG(index), + (u8 *)errcode.data, sizeof(errcode)); +} + +static int nbl_phy_init_offload_fwd(void *priv, u16 vsi_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union epro_no_dport_redirect_u epro_no_dport = {.info = {0}}; + union nbl_action_data set_dport = {.data = 0}; + union epro_vpt_u vpt; + + memset(&vpt, 0, sizeof(vpt)); + + set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_UPCALL; + set_dport.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + set_dport.dport.up.port_id = vsi_id; + + epro_no_dport.info.dport = set_dport.data; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_NO_DPORT_REDIRECT_ADDR, + (u8 *)epro_no_dport.data, sizeof(epro_no_dport)); + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), (u8 *)vpt.data, + NBL_EPRO_VPT_DWLEN * NBL_BYTES_IN_REG); + vpt.info.rss_alg_sel = NBL_SYM_TOEPLITZ_INT; + vpt.info.rss_key_type_btm = NBL_KEY_IP4_L4_RSS_BIT | NBL_KEY_IP6_L4_RSS_BIT; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), (u8 *)vpt.data, + NBL_EPRO_VPT_DWLEN * NBL_BYTES_IN_REG); + + /* drop packets with wrong chksums, to prevent PED from correcting them */ + nbl_phy_ipro_chksum_err_ctrl(phy_mgt, 1); + + return 0; +} + +static int nbl_phy_cmdq_init(void *priv, void *param, u16 func_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_chan_cmdq_init_info *cmdq_param = + (struct nbl_chan_cmdq_init_info *)param; + union pcompleter_host_cfg_funtion_id_cmdq_u cfg_func_id = { + .info.dbg = func_id, + .info.vld = 1, + }; + u32 value = 0; + + /* dis-enable the queue, this will reset queue head to 0 */ + nbl_warn(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "CMDQ start init: size %u %llu %u\n", + cmdq_param->len, cmdq_param->pa, cmdq_param->bdf_num); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_EN_ADDR, value); + + /* write registers */ + value = 0; + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_TAIL_ADDR, value); + value = cmdq_param->len; + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_SIZE_ADDR, value); + value = NBL_CMDQ_HI_DWORD(cmdq_param->pa); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_BADDR_H_ADDR, value); + value = NBL_CMDQ_LO_DWORD(cmdq_param->pa); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_BADDR_L_ADDR, value); + + nbl_hw_wr32(phy_mgt, NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_CMDQ_ADDR, + *(u32 *)&cfg_func_id); + + /* enable the queue */ + value = 1; + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_EN_ADDR, value); + /* write dif registers (mode and bdf) for receive queue */ + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_DIF_BDF_ADDR, + cmdq_param->bdf_num); + value = NBL_CMDQ_DIF_MODE_VALUE; + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_DIF_MODE_ADDR, value); + value = 0x1fffff; + nbl_hw_wr32(phy_mgt, NBL_CMDQ_FLOW_EN_ADDR, value); + return 0; +} + +static int nbl_phy_cmdq_destroy(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 value = 0; + + nbl_hw_wr32(phy_mgt, NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_CMDQ_ADDR, + value); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_EN_ADDR, value); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_SIZE_ADDR, value); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_BADDR_H_ADDR, value); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_BADDR_L_ADDR, value); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_DIF_INT_ADDR, value); + + return NBL_OK; +} + +static int nbl_phy_cmdq_reset(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 value = 0; + u32 delay_count = 0; + u32 r_head = 0; + u32 r_tail = 0; + + /* disable the command queue */ + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_EN_ADDR, value); + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "CMDQ resetting now...\n"); + + /* wait until tail equals head, then reset tail */ + while (true) { + usleep_range(NBL_CMDQ_DELAY_200US, NBL_CMDQ_DELAY_300US); + r_head = nbl_hw_rd32(phy_mgt, NBL_CMDQ_HOST_CMDQ_CURR_ADDR); + r_tail = nbl_hw_rd32(phy_mgt, NBL_CMDQ_HOST_CMDQ_TAIL_ADDR); + if (r_head == r_tail) + break; + + delay_count++; + if (delay_count >= NBL_CMDQ_RESET_MAX_WAIT) + return -EBADRQC; + } + + /* enable the queue, and resend the command */ + value = 0; + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_TAIL_ADDR, value); + value = 1; + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_EN_ADDR, value); + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "CMDQ finished resetting!\n"); + return 0; +} + +static void nbl_phy_update_cmdq_tail(void *priv, u32 doorbell) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_hw_wr32(phy_mgt, NBL_CMD_NOTIFY_ADDR, doorbell); +} + +static int nbl_acl_set_act_pri(struct nbl_phy_mgt *phy_mgt) +{ + union acl_action_priority0_u act0_pri = { + .info.action_id9_pri = 3, + }; + + union acl_action_priority4_u act4_pri = { + .info.action_id9_pri = 3, + }; + + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_PRIORITY0_ADDR, + (u8 *)act0_pri.data, sizeof(act0_pri)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_PRIORITY4_ADDR, + (u8 *)act4_pri.data, sizeof(act4_pri)); + return NBL_OK; +} + +static int nbl_acl_check_init(struct nbl_phy_mgt *phy_mgt) +{ + int ret = NBL_OK; + union acl_init_done_u acl_init; + + nbl_hw_read_regs(phy_mgt, NBL_ACL_INIT_DONE_ADDR, (u8 *)acl_init.data, + sizeof(acl_init)); + if (!acl_init.info.done) + ret = NBL_FAIL; + if (ret == NBL_OK) + nbl_info(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "NBL ACL init start success"); + else + nbl_info(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "NBL ACL init start fail"); + + return ret; +} + +static int nbl_acl_flow_stat_on(struct nbl_phy_mgt *phy_mgt) +{ + union acl_flow_id_stat_act_u flow_id_act = { + .info.flow_id_en = 1, + }; + + union acl_stat_id_act_u stat_id_act = { + .info.act_en = 1, + .info.act_id = NBL_ACT_SET_SPECIAL_FLOW_STAT, + }; + + nbl_hw_write_regs(phy_mgt, NBL_ACL_FLOW_ID_STAT_ACT_ADDR, + (u8 *)flow_id_act.data, sizeof(flow_id_act)); + + nbl_hw_write_regs(phy_mgt, NBL_ACL_STAT_ID_ACT_ADDR, + (u8 *)stat_id_act.data, sizeof(stat_id_act)); + return NBL_OK; +} + +static int nbl_acl_set_tcam_info_regs(struct nbl_phy_mgt *phy_mgt, + struct nbl_acl_cfg_param *acl_param) +{ + u8 *acl_key_cfg_ptr = (u8 *)(acl_param->tcam_cfg); + u8 *act_cfg_ptr = (u8 *)(acl_param->action_cfg); + + nbl_hw_write_regs(phy_mgt, + NBL_ACL_TCAM_CFG_REG(acl_param->acl_stage), + acl_key_cfg_ptr, sizeof(union acl_tcam_cfg_u)); + nbl_hw_write_regs(phy_mgt, + NBL_ACL_ACTION_RAM_CFG_REG(acl_param->acl_stage), + act_cfg_ptr, sizeof(union acl_action_ram_cfg_u)); + + return NBL_OK; +} + +static int nbl_acl_set_tcam_info(struct nbl_phy_mgt *phy_mgt, + struct nbl_acl_cfg_param *acl_param) +{ + int ret = 0; + + ret = nbl_acl_set_tcam_info_regs(phy_mgt, acl_param); + ret = nbl_acl_set_tcam_info_regs(phy_mgt, acl_param + 1); + return ret; +} + +static int nbl_acl_flow_stat_clear(struct nbl_phy_mgt *phy_mgt) +{ + union acl_flow_id_stat_glb_clr_u flow_stat_clear = { + .info.glb_clr = 1, + }; + union acl_stat_id_stat_glb_clr_u stat_stat_clear = { + .info.glb_clr = 1, + }; + union acl_flow_id_stat_done_u flow_done_info = {.info = {0}}; + u32 rd_retry = 0; + + nbl_hw_write_regs(phy_mgt, NBL_ACL_FLOW_ID_STAT_GLB_CLR_ADDR, + (u8 *)flow_stat_clear.data, sizeof(flow_stat_clear)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_STAT_ID_STAT_GLB_CLR_ADDR, + (u8 *)stat_stat_clear.data, sizeof(stat_stat_clear)); + while (1) { + nbl_hw_read_regs(phy_mgt, NBL_ACL_FLOW_ID_STAT_DONE_ADDR, + (u8 *)flow_done_info.data, + sizeof(flow_done_info)); + if (flow_done_info.info.glb_clr_done) + break; + if (rd_retry++ == NBL_ACL_RD_RETRY) { + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "NBL ACL init start fail"); + return NBL_FAIL; + } + usleep_range(NBL_ACL_RD_WAIT_100US, NBL_ACL_RD_WAIT_200US); + } + + return NBL_OK; +} + +static int nbl_acl_flow_tcam_clear(struct nbl_phy_mgt *phy_mgt, u16 tcam_btm, + u16 tcam_start_idx, u16 tcam_end_idx) +{ + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + union acl_indirect_ctrl_u indirect_ctrl = { + .info.tcam_addr = 0, + .info.cpu_acl_cfg_start = 1, + .info.acc_btm = tcam_btm, + .info.cpu_acl_cfg_rw = NBL_ACL_CPU_WRITE, + }; + union acl_indirect_access_ack_u indirect_ack = {.info = {0}}; + /* set invalid in each tcam */ + union acl_valid_bit_u tcam_data_valid = {.info = {0}}; + int try_time = NBL_ACL_RD_RETRY; + + for (; tcam_start_idx < tcam_end_idx; ++tcam_start_idx) { + nbl_hw_write_regs(phy_mgt, NBL_ACL_VALID_BIT_ADDR, + (u8 *)tcam_data_valid.data, + sizeof(tcam_data_valid)); + indirect_ctrl.info.tcam_addr = tcam_start_idx; + nbl_hw_write_regs(phy_mgt, NBL_ACL_INDIRECT_CTRL_ADDR, + (u8 *)indirect_ctrl.data, + sizeof(indirect_ctrl)); + + while (try_time--) { + nbl_hw_read_regs(phy_mgt, + NBL_ACL_INDIRECT_ACCESS_ACK_ADDR, + (u8 *)indirect_ack.data, + sizeof(indirect_ack)); + if (indirect_ack.info.done) + break; + usleep_range(NBL_ACL_RD_WAIT_100US, NBL_ACL_RD_WAIT_200US); + } + + if (!indirect_ack.info.done) { + nbl_info(common, NBL_DEBUG_FLOW, "indirect access failed(%u-%u), done: %u, status: %08x.", + tcam_start_idx, try_time + 1, 0, indirect_ack.info.status); + return NBL_FAIL; + } + + indirect_ack.info.done = 0; + try_time = NBL_ACL_RD_RETRY; + } + nbl_debug(common, NBL_DEBUG_FLOW, "-----clear acl flow:idx(depth):%d(%d)-----\n", + tcam_start_idx, tcam_end_idx); + return NBL_OK; +} + +static int nbl_acl_init_regs(struct nbl_phy_mgt *phy_mgt, + struct nbl_chan_flow_init_info *param) +{ + /* set act priority */ + nbl_acl_set_act_pri(phy_mgt); + + /* read acl init done */ + if (nbl_acl_check_init(phy_mgt)) + return NBL_FAIL; + + /* set flow-stat enable */ + nbl_acl_flow_stat_on(phy_mgt); + + /* set tcam info */ + nbl_acl_set_tcam_info(phy_mgt, param->acl_cfg); + + /* clear flow stat */ + if (nbl_acl_flow_stat_clear(phy_mgt)) + return NBL_FAIL; + + /* clear key/mask/act tcam tab */ + if (nbl_acl_flow_tcam_clear(phy_mgt, NBL_ACL_FLUSH_FLOW_BTM, 0, NBL_ACL_TCAM_DEPTH)) + return NBL_FAIL; + return NBL_OK; +} + +static int nbl_phy_init_acl_stats(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + /* init acl stat */ + nbl_acl_flow_stat_on(phy_mgt); + /* clear flow stat */ + if (nbl_acl_flow_stat_clear(phy_mgt)) + return NBL_FAIL; + nbl_info(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, "flow stat init: finished"); + return 0; +} + +static int nbl_phy_acl_unset_upcall_rule(void *priv, u8 idx) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + return nbl_acl_flow_tcam_clear(phy_mgt, NBL_ACL_FLUSH_UPCALL_BTM, idx, idx * 2); +} + +static void nbl_phy_acl_set_dport(int *action, u16 vsi_id) +{ + union nbl_action_data set_dport = {.data = 0}; + + set_dport.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_UPCALL; + set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_NONE; + set_dport.dport.up.port_id = vsi_id; + + *action = set_dport.data + (NBL_ACT_SET_DPORT << NBL_16BIT); +} + +static int nbl_phy_acl_set_upcall_rule(void *priv, u8 idx, u16 vsi_id) +{ + int tcam_entry = idx << 1; + int fwd_act = 0; + int rd_retry = NBL_ACL_RD_RETRY; + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + union acl_action_ram15_u action_ram; + union acl_indirect_ctrl_u indirect_ctrl = { + .info.tcam_addr = tcam_entry, + .info.cpu_acl_cfg_start = 1, + .info.cpu_acl_cfg_rw = NBL_ACL_INDIRECT_ACCESS_WRITE, + .info.acc_btm = NBL_ACL_FLUSH_UPCALL_BTM, + }; + union acl_indirect_access_ack_u indirect_ack; + union acl_valid_bit_u tcam_data_valid = { + .info.valid_bit = NBL_ACL_FLUSH_UPCALL_BTM, + }; + union nbl_acl_tcam_upcall_data_u eth_data = { + .eth_pt_id = NBL_ACL_ETH_PF_UPCALL, + }; + union nbl_acl_tcam_upcall_data_u eth_mask; + union nbl_acl_tcam_upcall_data_u vsi_data = { + .vsi_pt_id = NBL_ACL_VSI_PF_UPCALL, + }; + union nbl_acl_tcam_upcall_data_u vsi_mask; + + memset(&action_ram, 0, sizeof(action_ram)); + memset(&indirect_ack, 0, sizeof(indirect_ack)); + nbl_info(common, NBL_DEBUG_FLOW, "-----set acl tcam_cfg and act_cfg:%d-----\n", idx); + /* mask all fields default */ + memset(ð_mask, 0xff, sizeof(eth_mask)); + eth_mask.eth_pt_id = 0; + eth_mask.eth_id = 0; + + memset(&vsi_mask, 0xff, sizeof(vsi_mask)); + vsi_mask.sw_id = 0; + vsi_mask.vsi_pt_id = 0; + /* eth acl rule */ + nbl_phy_acl_set_dport(&fwd_act, NBL_GET_PF_VSI_ID(idx)); + NBL_ACL_GET_ACTION_DATA(fwd_act, action_ram.info.Action0); + indirect_ctrl.info.tcam_addr = tcam_entry; + nbl_info(common, NBL_DEBUG_FLOW, "---addr:%d, size:%lu---\n", + tcam_entry, sizeof(action_ram)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_TBL(NBL_ACL_TCAM_UPCALL_IDX, tcam_entry), + (u8 *)action_ram.data, sizeof(action_ram)); + + eth_data.eth_id = NBL_GET_PF_ETH_ID(idx); + nbl_info(common, NBL_DEBUG_FLOW, "-----key(mask): %d(%d), %d(%d)\n", + eth_data.eth_pt_id, eth_mask.eth_pt_id, eth_data.eth_id, eth_mask.eth_id); + nbl_tcam_truth_value_convert(ð_data.tcam_data, ð_mask.tcam_data); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_DATA_X(NBL_ACL_TCAM_UPCALL_IDX), + eth_data.data, sizeof(eth_data)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_DATA_Y(NBL_ACL_TCAM_UPCALL_IDX), + eth_mask.data, sizeof(eth_mask)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_VALID_BIT_ADDR, + (u8 *)&tcam_data_valid, sizeof(tcam_data_valid)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_INDIRECT_CTRL_ADDR, + (u8 *)&indirect_ctrl, sizeof(indirect_ctrl)); + do { + nbl_hw_read_regs(phy_mgt, NBL_ACL_INDIRECT_ACCESS_ACK_ADDR, + (u8 *)&indirect_ack, sizeof(indirect_ack)); + if (!indirect_ack.info.done) { + rd_retry--; + usleep_range(NBL_ACL_RD_WAIT_100US, NBL_ACL_RD_WAIT_200US); + } else { + break; + } + } while (rd_retry); + + if (!indirect_ack.info.done) { + nbl_err(common, NBL_DEBUG_FLOW, "acl init flows error in pf%d\n", idx); + return -EIO; + } + memset(indirect_ack.data, 0, sizeof(indirect_ack)); + + /* vsi acl rule */ + nbl_phy_acl_set_dport(&fwd_act, vsi_id); + NBL_ACL_GET_ACTION_DATA(fwd_act, action_ram.info.Action0); + indirect_ctrl.info.tcam_addr = ++tcam_entry; + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_TBL(NBL_ACL_TCAM_UPCALL_IDX, tcam_entry), + (u8 *)&action_ram, sizeof(action_ram)); + + vsi_data.sw_id = idx; + nbl_info(common, NBL_DEBUG_FLOW, "-----key(mask):%d(%d), %d(%d)\n", + vsi_data.vsi_pt_id, vsi_mask.vsi_pt_id, vsi_data.sw_id, vsi_mask.sw_id); + nbl_tcam_truth_value_convert(&vsi_data.tcam_data, &vsi_mask.tcam_data); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_DATA_X(NBL_ACL_TCAM_UPCALL_IDX), + vsi_data.data, sizeof(vsi_data)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_DATA_Y(NBL_ACL_TCAM_UPCALL_IDX), + vsi_mask.data, sizeof(vsi_mask)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_VALID_BIT_ADDR, + (u8 *)&tcam_data_valid, sizeof(tcam_data_valid)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_INDIRECT_CTRL_ADDR, + (u8 *)&indirect_ctrl, sizeof(indirect_ctrl)); + do { + nbl_hw_read_regs(phy_mgt, NBL_ACL_INDIRECT_ACCESS_ACK_ADDR, + (u8 *)&indirect_ack, sizeof(indirect_ack)); + if (!indirect_ack.info.done) { + rd_retry--; + usleep_range(NBL_ACL_RD_WAIT_100US, NBL_ACL_RD_WAIT_200US); + } else { + break; + } + } while (rd_retry); + + if (!indirect_ack.info.done) { + nbl_err(common, NBL_DEBUG_FLOW, "acl init flows error in pf%d\n", idx); + return -EIO; + } + + return 0; +} + +static void nbl_phy_uninit_acl(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + union acl_tcam_cfg_u acl_key_cfg; + union acl_action_ram_cfg_u acl_act_cfg; + union acl_loop_back_en_u loop_en; + + memset(&acl_key_cfg, 0, sizeof(acl_key_cfg)); + memset(&acl_act_cfg, 0, sizeof(acl_act_cfg)); + memset(&loop_en, 0, sizeof(loop_en)); + + nbl_hw_write_regs(phy_mgt, NBL_ACL_LOOP_BACK_EN_ADDR, (u8 *)&loop_en, + sizeof(loop_en)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_CFG_REG(NBL_ACL_VSI_PF_UPCALL), + (u8 *)&acl_key_cfg, sizeof(union acl_tcam_cfg_u)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_CFG_REG(NBL_ACL_VSI_PF_UPCALL), + (u8 *)&acl_act_cfg, sizeof(union acl_action_ram_cfg_u)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_CFG_REG(NBL_ACL_ETH_PF_UPCALL), + (u8 *)&acl_key_cfg, sizeof(union acl_tcam_cfg_u)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_CFG_REG(NBL_ACL_ETH_PF_UPCALL), + (u8 *)&acl_act_cfg, sizeof(union acl_action_ram_cfg_u)); + nbl_info(common, NBL_DEBUG_FLOW, "nbl uninit acl done\n"); +} + +static void nbl_phy_init_acl(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + union acl_tcam_cfg_u acl_key_cfg = { + .info.startcompare15 = 1, + .info.startset15 = 1, + .info.tcam15_enable = 1, + .info.key_id15 = 0, + }; + union acl_action_ram_cfg_u acl_act_cfg = { + .info.action_ram15_enable = 1, + .info.action_ram15_alloc_id = NBL_ACL_TCAM_UPCALL_IDX, + }; + union acl_loop_back_en_u loop_en = { + .info.loop_back_en = 1, + }; + + nbl_hw_write_regs(phy_mgt, NBL_ACL_LOOP_BACK_EN_ADDR, (u8 *)&loop_en, + sizeof(loop_en)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_CFG_REG(NBL_ACL_VSI_PF_UPCALL), + (u8 *)&acl_key_cfg, sizeof(union acl_tcam_cfg_u)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_CFG_REG(NBL_ACL_VSI_PF_UPCALL), + (u8 *)&acl_act_cfg, sizeof(union acl_action_ram_cfg_u)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_CFG_REG(NBL_ACL_ETH_PF_UPCALL), + (u8 *)&acl_key_cfg, sizeof(union acl_tcam_cfg_u)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_CFG_REG(NBL_ACL_ETH_PF_UPCALL), + (u8 *)&acl_act_cfg, sizeof(union acl_action_ram_cfg_u)); + nbl_info(common, NBL_DEBUG_FLOW, "nbl init acl done\n"); +} + +static int nbl_ipro_init_regs(struct nbl_phy_mgt *phy_mgt) +{ + /* write error code for smac-spoof and vlan check */ + union ipro_anti_fake_addr_errcode_u errcode_def = { + .info.num = NBL_ERROR_CODE_DN_SMAC, + .info.rsv = 0, + }; + union ipro_anti_fake_addr_action_u default_drop = { + .info.dqueue = 0, + .info.dqueue_en = 0, + .info.proc_done = 1, + .info.set_dport_en = 1, + .info.set_dport = NBL_SET_DPORT(AUX_FWD_TYPE_UPCALL, + NEXT_STG_SEL_BYPASS, + SET_DPORT_TYPE_SP_PORT, + PORT_TYPE_SP_DROP), + .info.rsv = 0, + }; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: ipro errcode & actions"); + nbl_hw_write_regs(phy_mgt, NBL_IPRO_ANTI_FAKE_ADDR_ERRCODE_ADDR, + (u8 *)errcode_def.data, sizeof(errcode_def)); + errcode_def.info.num = NBL_ERROR_CODE_VLAN; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_VLAN_NUM_CHK_ERRCODE_ADDR, + (u8 *)errcode_def.data, sizeof(errcode_def)); + + /* default drop for underlay pkt flt, smac-spoof and vlan check */ + nbl_hw_write_regs(phy_mgt, NBL_IPRO_UDL_PKT_FLT_ACTION_ADDR, + (u8 *)default_drop.data, sizeof(default_drop)); + nbl_hw_write_regs(phy_mgt, NBL_IPRO_ANTI_FAKE_ADDR_ACTION_ADDR, + (u8 *)default_drop.data, sizeof(default_drop)); + nbl_hw_write_regs(phy_mgt, NBL_IPRO_VLAN_NUM_CHK_ACTION_ADDR, + (u8 *)default_drop.data, sizeof(default_drop)); + + return NBL_OK; +} + +static int nbl_pp_init_regs(struct nbl_phy_mgt *phy_mgt) +{ + u32 action_dport_pri = 0x3000; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: pp action priority"); + nbl_hw_write_regs(phy_mgt, NBL_PP0_ACTION_PRIORITY0_ADDR, + (u8 *)&action_dport_pri, sizeof(action_dport_pri)); + nbl_hw_write_regs(phy_mgt, NBL_PP0_ACTION_PRIORITY4_ADDR, + (u8 *)&action_dport_pri, sizeof(action_dport_pri)); + + nbl_hw_write_regs(phy_mgt, NBL_PP1_ACTION_PRIORITY0_ADDR, + (u8 *)&action_dport_pri, sizeof(action_dport_pri)); + nbl_hw_write_regs(phy_mgt, NBL_PP1_ACTION_PRIORITY4_ADDR, + (u8 *)&action_dport_pri, sizeof(action_dport_pri)); + + nbl_hw_write_regs(phy_mgt, NBL_PP2_ACTION_PRIORITY0_ADDR, + (u8 *)&action_dport_pri, sizeof(action_dport_pri)); + nbl_hw_write_regs(phy_mgt, NBL_PP2_ACTION_PRIORITY4_ADDR, + (u8 *)&action_dport_pri, sizeof(action_dport_pri)); + return NBL_OK; +} + +static void nbl_fem_profile_table_action_set(struct nbl_phy_mgt *phy_mgt, u32 pp_id, + u32 pt_idx, u16 vsi_id, bool is_set_upcall) +{ + union fem_em0_profile_table_u em_pt_tbl; + union fem_em0_profile_table_u em_pt_tbl_tmp; + union nbl_action_data set_dport = {.data = 0}; + + memset(&em_pt_tbl, 0, sizeof(em_pt_tbl)); + memset(&em_pt_tbl_tmp, 0, sizeof(em_pt_tbl_tmp)); + if (is_set_upcall) { + set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_ACL_S0; + set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_UPCALL; + set_dport.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + set_dport.dport.up.port_id = vsi_id; + em_pt_tbl_tmp.info.action0 = set_dport.data + + (NBL_ACT_SET_DPORT << NBL_ACT_DATA_BITS); + } + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: pt upcall: %u %u %u", pp_id, pt_idx, vsi_id); + /* read profile table configured with P4 ELF, set the upcall action */ + switch (pp_id) { + case NBL_PP_TYPE_0: + nbl_hw_read_regs(phy_mgt, NBL_FEM_EM0_PROFILE_TABLE_REG(pt_idx), + (u8 *)em_pt_tbl.data, + NBL_FEM_EM0_PROFILE_TABLE_DWLEN * + NBL_BYTES_IN_REG); + em_pt_tbl.info.action0 = em_pt_tbl_tmp.info.action0; + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM0_PROFILE_TABLE_REG(pt_idx), + (u8 *)em_pt_tbl.data, + NBL_FEM_EM0_PROFILE_TABLE_DWLEN * + NBL_BYTES_IN_REG); + break; + case NBL_PP_TYPE_1: + nbl_hw_read_regs(phy_mgt, NBL_FEM_EM1_PROFILE_TABLE_REG(pt_idx), + (u8 *)em_pt_tbl.data, + NBL_FEM_EM0_PROFILE_TABLE_DWLEN * + NBL_BYTES_IN_REG); + em_pt_tbl.info.action0 = em_pt_tbl_tmp.info.action0; + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM1_PROFILE_TABLE_REG(pt_idx), + (u8 *)em_pt_tbl.data, + NBL_FEM_EM0_PROFILE_TABLE_DWLEN * + NBL_BYTES_IN_REG); + break; + case NBL_PP_TYPE_2: + nbl_hw_read_regs(phy_mgt, NBL_FEM_EM2_PROFILE_TABLE_REG(pt_idx), + (u8 *)&em_pt_tbl.data, + NBL_FEM_EM0_PROFILE_TABLE_DWLEN * + NBL_BYTES_IN_REG); + em_pt_tbl.info.action0 = em_pt_tbl_tmp.info.action0; + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM2_PROFILE_TABLE_REG(pt_idx), + (u8 *)em_pt_tbl.data, + NBL_FEM_EM0_PROFILE_TABLE_DWLEN * + NBL_BYTES_IN_REG); + break; + default: + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "wrong pp id for this profile"); + } +} + +static int nbl_fem_init_regs(struct nbl_phy_mgt *phy_mgt, + struct nbl_chan_flow_init_info *param) +{ + u8 i = 0; + u32 bank_sel = 0; + struct nbl_flow_prf_data *prf_data; + union fem_ht_bank_sel_btm_u ht_bank_sel = {.info = {0}}; + + /* HT bank sel */ + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: fem bank selection"); + bank_sel = HT_PORT0_BANK_SEL | HT_PORT1_BANK_SEL << NBL_8BIT | + HT_PORT2_BANK_SEL << NBL_16BIT; + nbl_hw_write_regs(phy_mgt, NBL_FEM_HT_BANK_SEL_BITMAP, + (u8 *)&bank_sel, sizeof(bank_sel)); + + /* KT bank sel */ + bank_sel = KT_PORT0_BANK_SEL | KT_PORT1_BANK_SEL << NBL_8BIT | + KT_PORT2_BANK_SEL << NBL_16BIT; + nbl_hw_write_regs(phy_mgt, NBL_FEM_KT_BANK_SEL_BITMAP, + (u8 *)&bank_sel, sizeof(bank_sel)); + + /* AT bank sel */ + bank_sel = AT_PORT0_BANK_SEL | AT_PORT1_BANK_SEL << NBL_16BIT; + nbl_hw_write_regs(phy_mgt, NBL_FEM_AT_BANK_SEL_BITMAP, + (u8 *)&bank_sel, sizeof(bank_sel)); + bank_sel = AT_PORT2_BANK_SEL; + nbl_hw_write_regs(phy_mgt, NBL_FEM_AT_BANK_SEL_BITMAP2, + (u8 *)&bank_sel, sizeof(bank_sel)); + + ht_bank_sel.info.port0_ht_depth = HT_PORT0_BTM; + ht_bank_sel.info.port1_ht_depth = HT_PORT1_BTM; + ht_bank_sel.info.port2_ht_depth = HT_PORT2_BTM; + nbl_hw_write_regs(phy_mgt, NBL_FEM_HT_BANK_SEL_BTM_ADDR, + (u8 *)ht_bank_sel.data, sizeof(ht_bank_sel)); + + for (i = 0; i < param->flow_cfg.item_cnt; i++) { + prf_data = ¶m->flow_cfg.prf_data[i]; + nbl_fem_profile_table_action_set(phy_mgt, prf_data->pp_id, + prf_data->prf_id, param->vsi_id, true); + } + + return NBL_OK; +} + +static int nbl_mcc_init_regs(struct nbl_phy_mgt *phy_mgt) +{ + union mcc_action_priority_u act_pri = { + .info.dport_act_pri = 3, + .info.statidx_act_pri = 3, + .info.dqueue_act_pri = 3, + }; + + nbl_hw_write_regs(phy_mgt, NBL_MCC_ACTION_PRIORITY_ADDR, + (u8 *)act_pri.data, sizeof(act_pri)); + return NBL_OK; +} + +static void nbl_ped_vlan_type_init(struct nbl_phy_mgt *phy_mgt) +{ + union dped_vlan_type0_u vlan_type = {.info = {0}}; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: vlan type init"); + vlan_type.info.vau = RTE_ETHER_TYPE_VLAN; + nbl_hw_write_regs(phy_mgt, NBL_UPED_VLAN_TYPE0_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_VLAN_TYPE0_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); + vlan_type.info.vau = RTE_ETHER_TYPE_QINQ; + nbl_hw_write_regs(phy_mgt, NBL_UPED_VLAN_TYPE1_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_VLAN_TYPE1_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); + vlan_type.info.vau = RTE_ETHER_TYPE_QINQ1; + nbl_hw_write_regs(phy_mgt, NBL_UPED_VLAN_TYPE2_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_VLAN_TYPE2_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); + vlan_type.info.vau = RTE_ETHER_TYPE_QINQ2; + nbl_hw_write_regs(phy_mgt, NBL_UPED_VLAN_TYPE3_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_VLAN_TYPE3_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); +} + +static void nbl_ped_csum_cmd_init(struct nbl_phy_mgt *phy_mgt) +{ + union uped_l4_ck_cmd_50_u l4_ck_cmd_50 = {.info = {0}}; + union uped_l4_ck_cmd_51_u l4_ck_cmd_51 = {.info = {0}}; + union uped_l4_ck_cmd_60_u l4_ck_cmd_60 = {.info = {0}}; + union uped_l4_ck_cmd_61_u l4_ck_cmd_61 = {.info = {0}}; + + l4_ck_cmd_50.info.len_in_oft = 0x2; + l4_ck_cmd_50.info.len_phid = 0x2; + l4_ck_cmd_50.info.data_vld = 0x1; + l4_ck_cmd_50.info.in_oft = 0x2; + l4_ck_cmd_50.info.phid = 0x3; + l4_ck_cmd_50.info.en = 0x1; + + l4_ck_cmd_51.info.ck_start0 = 0xc; + l4_ck_cmd_51.info.ck_phid0 = 0x2; + l4_ck_cmd_51.info.ck_len0 = 0x8; + l4_ck_cmd_51.info.ck_phid1 = 0x3; + l4_ck_cmd_51.info.ck_vld1 = 0x1; + + l4_ck_cmd_60.info.value = 0x62; + l4_ck_cmd_60.info.len_in_oft = 0x4; + l4_ck_cmd_60.info.len_phid = 0x2; + l4_ck_cmd_60.info.len_vld = 0x1; + l4_ck_cmd_60.info.data_vld = 0x1; + l4_ck_cmd_60.info.in_oft = 0x2; + l4_ck_cmd_60.info.phid = 0x3; + l4_ck_cmd_60.info.en = 0x1; + + l4_ck_cmd_61.info.ck_start0 = 0x8; + l4_ck_cmd_61.info.ck_phid0 = 0x2; + l4_ck_cmd_61.info.ck_len0 = 0x20; + l4_ck_cmd_61.info.ck_vld0 = 0x1; + l4_ck_cmd_61.info.ck_phid1 = 0x3; + l4_ck_cmd_61.info.ck_vld1 = 0x1; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: ped checksum commands"); + nbl_hw_write_regs(phy_mgt, NBL_UPED_L4_CK_CMD_50_ADDR, + (u8 *)l4_ck_cmd_50.data, sizeof(l4_ck_cmd_50)); + nbl_hw_write_regs(phy_mgt, NBL_UPED_L4_CK_CMD_51_ADDR, + (u8 *)l4_ck_cmd_51.data, sizeof(l4_ck_cmd_51)); + nbl_hw_write_regs(phy_mgt, NBL_UPED_L4_CK_CMD_60_ADDR, + (u8 *)l4_ck_cmd_60.data, sizeof(l4_ck_cmd_60)); + nbl_hw_write_regs(phy_mgt, NBL_UPED_L4_CK_CMD_61_ADDR, + (u8 *)l4_ck_cmd_61.data, sizeof(l4_ck_cmd_61)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_L4_CK_CMD_50_ADDR, + (u8 *)l4_ck_cmd_50.data, sizeof(l4_ck_cmd_50)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_L4_CK_CMD_51_ADDR, + (u8 *)l4_ck_cmd_51.data, sizeof(l4_ck_cmd_51)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_L4_CK_CMD_60_ADDR, + (u8 *)l4_ck_cmd_60.data, sizeof(l4_ck_cmd_60)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_L4_CK_CMD_61_ADDR, + (u8 *)l4_ck_cmd_61.data, sizeof(l4_ck_cmd_61)); +} + +static int nbl_ped_init_regs(struct nbl_phy_mgt *phy_mgt) +{ + nbl_ped_vlan_type_init(phy_mgt); + nbl_ped_csum_cmd_init(phy_mgt); + return NBL_OK; +} + +static void nbl_flow_clear_tcam_ad(struct nbl_phy_mgt *phy_mgt) +{ + union fem_em0_tcam_table_u tcam_table; + union fem_em0_ad_table_u ad_table; + u8 *tcam_ptr = (u8 *)tcam_table.data; + u8 *ad_ptr = (u8 *)ad_table.data; + u16 i = 0; + + memset(&tcam_table, 0, sizeof(tcam_table)); + memset(&ad_table, 0, sizeof(ad_table)); + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: to clear flow pp tcam"); + for (; i < NBL_FEM_TCAM_MAX_NUM; i++) { + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM0_TCAM_TABLE_REG(i), + tcam_ptr, sizeof(tcam_table)); + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM0_AD_TABLE_REG(i), + ad_ptr, sizeof(ad_table)); + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM1_TCAM_TABLE_REG(i), + tcam_ptr, sizeof(tcam_table)); + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM1_AD_TABLE_REG(i), + ad_ptr, sizeof(ad_table)); + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM2_TCAM_TABLE_REG(i), + tcam_ptr, sizeof(tcam_table)); + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM2_AD_TABLE_REG(i), + ad_ptr, sizeof(ad_table)); + nbl_hw_rd32(phy_mgt, NBL_FEM_EM2_AD_TABLE_REG(i)); + } +} + +static union __maybe_unused epro_aft_u aft_def[NBL_FWD_TYPE_MAX] = { + [NBL_FWD_TYPE_NORMAL] = { + .data = BIT(NBL_ACT_SET_MCC) | BIT(NBL_ACT_SET_TAB_INDEX) | + BIT(NBL_ACT_SET_MIRROR), + }, + [NBL_FWD_TYPE_CPU_ASSIGNED] = { + .data = BIT(NBL_ACT_SET_MCC) | BIT(NBL_ACT_SET_TAB_INDEX) | + BIT(NBL_ACT_SET_MIRROR), + }, + [NBL_FWD_TYPE_UPCALL] = { + .data = BIT(NBL_ACT_SET_MCC) | BIT(NBL_ACT_SET_TAB_INDEX) | + BIT(NBL_ACT_SET_MIRROR) | BIT(NBL_ACT_SET_VNI0) | + BIT(NBL_ACT_SET_VNI1) | BIT(NBL_ACT_REP_IPV4_SIP) | + BIT(NBL_ACT_REP_IPV4_DIP) | BIT(NBL_ACT_REP_IPV6_SIP) | + BIT(NBL_ACT_REP_IPV6_DIP) | BIT(NBL_ACT_REP_DPORT) | + BIT(NBL_ACT_REP_SPORT) | BIT(NBL_ACT_REP_DMAC) | + BIT(NBL_ACT_REP_SMAC) | BIT(NBL_ACT_REP_IPV4_DSCP) | + BIT(NBL_ACT_REP_IPV6_DSCP) | BIT(NBL_ACT_REP_IPV4_TTL) | + BIT(NBL_ACT_REP_IPV6_TTL) | BIT(NBL_ACT_DEL_SVLAN) | + BIT(NBL_ACT_DEL_CVLAN) | BIT(NBL_ACT_REP_SVLAN) | + BIT(NBL_ACT_REP_CVLAN) | BIT(NBL_ACT_ADD_CVLAN) | + BIT(NBL_ACT_ADD_SVLAN) | BIT(NBL_ACT_TNL_ENCAP) | + BIT(NBL_ACT_TNL_DECAP) | BIT(NBL_ACT_REP_OUTER_SPORT) | + BIT(NBL_ACT_SET_PRI_MDF0), + }, + [NBL_FWD_TYPE_SRC_MIRROR] = { + .data = BIT(NBL_ACT_SET_FLOW_STAT0) | BIT(NBL_ACT_SET_FLOW_STAT1) | + BIT(NBL_ACT_SET_RSS) | BIT(NBL_ACT_SET_TAB_INDEX) | + BIT(NBL_ACT_SET_MCC) | BIT(NBL_ACT_SET_VNI0) | + BIT(NBL_ACT_SET_VNI1) | BIT(NBL_ACT_SET_PRBAC) | + BIT(NBL_ACT_SET_DP_HASH0) | BIT(NBL_ACT_SET_DP_HASH1) | + BIT(NBL_ACT_SET_PRI_MDF0) | BIT(NBL_ACT_SET_FLOW_CAR) | + ((u64)0xffffffff << 32), + }, + [NBL_FWD_TYPE_OTHER_MIRROR] = { + .data = BIT(NBL_ACT_SET_FLOW_STAT0) | BIT(NBL_ACT_SET_FLOW_STAT1) | + BIT(NBL_ACT_SET_RSS) | BIT(NBL_ACT_SET_TAB_INDEX) | + BIT(NBL_ACT_SET_MCC) | BIT(NBL_ACT_SET_VNI0) | + BIT(NBL_ACT_SET_VNI1) | BIT(NBL_ACT_SET_PRBAC) | + BIT(NBL_ACT_SET_DP_HASH0) | BIT(NBL_ACT_SET_DP_HASH1) | + BIT(NBL_ACT_SET_PRI_MDF0), + }, + [NBL_FWD_TYPE_MNG] = {.data = 0,}, + [NBL_FWD_TYPE_GLB_LB] = {.data = 0,}, + [NBL_FWD_TYPE_DROP] = {.data = 0,}, +}; + +static void nbl_epro_act_pri_cfg(struct nbl_phy_mgt *phy_mgt) +{ + union epro_action_priority_u act_pri = { + .info.mirroridx = EPRO_ACT_MIRRORIDX_PRI, + .info.car = EPRO_ACT_CARIDX_PRI, + .info.dqueue = EPRO_ACT_DQUEUE_PRI, + .info.dport = EPRO_ACT_DPORT_PRI, + .info.pop_8021q = EPRO_ACT_POP_IVLAN_PRI, + .info.pop_qinq = EPRO_ACT_POP_OVLAN_PRI, + .info.replace_inner_vlan = EPRO_ACT_REPLACE_IVLAN_PRI, + .info.replace_outer_vlan = EPRO_ACT_REPLACE_OVLAN_PRI, + .info.push_inner_vlan = EPRO_ACT_PUSH_IVLAN_PRI, + .info.push_outer_vlan = EPRO_ACT_PUSH_OVLAN_PRI, + .info.outer_sport_mdf = EPRO_ACT_OUTER_SPORT_MDF_PRI, + .info.pri_mdf = EPRO_ACT_PRI_MDF_PRI, + .info.dp_hash0 = EPRO_ACT_DP_HASH0_PRI, + .info.dp_hash1 = EPRO_ACT_DP_HASH1_PRI, + .info.rsv = 0, + }; + union epro_mirror_action_priority_u mir_act_pri = { + .info.car = EPRO_MIRROR_ACT_CARIDX_PRI, + .info.dqueue = EPRO_MIRROR_ACT_DQUEUE_PRI, + .info.dport = EPRO_MIRROR_ACT_DPORT_PRI, + .info.rsv = 0, + }; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: epro action priority"); + nbl_hw_write_regs(phy_mgt, NBL_EPRO_ACTION_PRIORITY_ADDR, + (u8 *)act_pri.data, sizeof(act_pri)); + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_MIRROR_ACTION_PRIORITY_ADDR, + (u8 *)mir_act_pri.data, sizeof(mir_act_pri)); +} + +static void nbl_epro_act_sel_en_cfg(struct nbl_phy_mgt *phy_mgt) +{ + union epro_act_sel_en_u act_sel_en = { + .info.rssidx_en = 1, + .info.dport_en = 1, + .info.mirroridx_en = 1, + .info.dqueue_en = 1, + .info.encap_en = 1, + .info.pop_8021q_en = 1, + .info.pop_qinq_en = 1, + .info.push_cvlan_en = 1, + .info.push_svlan_en = 1, + .info.replace_cvlan_en = 1, + .info.replace_svlan_en = 1, + .info.rsv = 0, + }; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: epro action enable"); + nbl_hw_write_regs(phy_mgt, NBL_EPRO_ACT_SEL_EN_ADDR, + (u8 *)act_sel_en.data, sizeof(act_sel_en)); +} + +static void nbl_epro_act_cfg_init(struct nbl_phy_mgt *phy_mgt) +{ + union epro_am_act_id0_u am_act_id0 = {.info = {0}}; + union epro_am_act_id1_u am_act_id1 = {.info = {0}}; + union epro_am_act_id2_u am_act_id2 = {.info = {0}}; + union epro_am_act_id3_u am_act_id3 = {.info = {0}}; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: epro action id"); + am_act_id0.info.replace_cvlan = NBL_ACT_REP_CVLAN; + am_act_id0.info.replace_svlan = NBL_ACT_REP_SVLAN; + am_act_id0.info.push_cvlan = NBL_ACT_ADD_CVLAN; + am_act_id0.info.push_svlan = NBL_ACT_ADD_SVLAN; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_AM_ACT_ID0_ADDR, + (u8 *)am_act_id0.data, sizeof(am_act_id0)); + am_act_id1.info.pop_qinq = NBL_ACT_DEL_CVLAN; + am_act_id1.info.pop_8021q = NBL_ACT_DEL_SVLAN; + am_act_id1.info.dport = NBL_ACT_SET_DPORT; + am_act_id1.info.dqueue = NBL_ACT_SET_QUE_IDX; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_AM_ACT_ID1_ADDR, + (u8 *)am_act_id1.data, sizeof(am_act_id1)); + am_act_id2.info.rssidx = NBL_ACT_SET_RSS; + am_act_id2.info.mirroridx = NBL_ACT_SET_MIRROR; + am_act_id2.info.car = NBL_ACT_SET_CAR; + am_act_id2.info.encap = NBL_ACT_TNL_ENCAP; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_AM_ACT_ID2_ADDR, + (u8 *)am_act_id2.data, sizeof(am_act_id2)); + am_act_id3.info.outer_sport_mdf = NBL_ACT_REP_OUTER_SPORT; + am_act_id3.info.pri_mdf = NBL_ACT_SET_PRI_MDF0; + am_act_id3.info.dp_hash0 = NBL_ACT_SET_DP_HASH0; + am_act_id3.info.dp_hash1 = NBL_ACT_SET_DP_HASH1; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_AM_ACT_ID3_ADDR, + (u8 *)am_act_id3.data, sizeof(am_act_id3)); + + nbl_epro_act_pri_cfg(phy_mgt); + nbl_epro_act_sel_en_cfg(phy_mgt); +} + +static int nbl_epro_init_regs(struct nbl_phy_mgt *phy_mgt) +{ + u32 fwd_type = 0; + union epro_rss_sk_u rss_sk_def; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: epro rss"); + /* init default rss toeplitz hash key */ + rss_sk_def.info.sk_arr[0] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[1] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[2] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[3] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[4] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[5] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[6] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[7] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[8] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[9] = NBL_EPRO_RSS_KEY_32; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_SK_ADDR, (u8 *)rss_sk_def.data, + sizeof(rss_sk_def)); + + nbl_epro_act_cfg_init(phy_mgt); + + for (fwd_type = 0; fwd_type < NBL_FWD_TYPE_MAX; fwd_type++) + nbl_hw_write_regs(phy_mgt, NBL_EPRO_AFT_REG(fwd_type), + (u8 *)&aft_def[fwd_type].data, sizeof(union epro_aft_u)); + + return NBL_OK; +} + +static int nbl_phy_flow_init(void *priv, void *param) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_chan_flow_init_info *info = + (struct nbl_chan_flow_init_info *)param; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: start"); + nbl_hw_wr32(phy_mgt, NBL_FEM_INIT_START_ADDR, NBL_FEM_INIT_START_VALUE); + nbl_flow_clear_tcam_ad(phy_mgt); + nbl_ipro_init_regs(phy_mgt); + nbl_pp_init_regs(phy_mgt); + nbl_fem_init_regs(phy_mgt, info); + nbl_mcc_init_regs(phy_mgt); + nbl_acl_init_regs(phy_mgt, info); + nbl_epro_init_regs(phy_mgt); + nbl_ped_init_regs(phy_mgt); + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: finished"); + + return NBL_OK; +} + +static void nbl_phy_clear_profile_table_action(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u8 i = 0; + u8 pp_id = 0; + u8 prf_id = 0; + + for (i = NBL_PP1_PROFILE_ID_MIN; i <= NBL_PP2_PROFILE_ID_MAX; i++) { + pp_id = i / NBL_PP_PROFILE_NUM; + prf_id = i % NBL_PP_PROFILE_NUM; + nbl_fem_profile_table_action_set(phy_mgt, pp_id, + prf_id, 0, false); + } +} + +static int nbl_phy_flow_deinit(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow deinit: start"); + + nbl_phy_clear_profile_table_action(phy_mgt); + // clear FEM & ACL tcams + nbl_flow_clear_tcam_ad(phy_mgt); + nbl_acl_flow_tcam_clear(phy_mgt, NBL_ACL_FLUSH_FLOW_BTM, 0, NBL_ACL_TCAM_DEPTH); + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow deinit: finished"); + return NBL_OK; +} + +static int nbl_phy_flow_get_acl_switch(void *priv, u8 *acl_enable) +{ + union acl_init_done_u init_done; + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + init_done.data[0] = nbl_hw_rd32(phy_mgt, NBL_ACL_INIT_DONE_ADDR); + *acl_enable = init_done.info.done; + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "acl switch: %u", *acl_enable); + return 0; +} + +static void nbl_phy_get_line_rate_info(void *priv, void *data, void *result) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_rep_line_rate_info *req = (struct nbl_rep_line_rate_info *)data; + struct nbl_rep_line_rate_info *resp = (struct nbl_rep_line_rate_info *)result; + u16 table_id = req->func_id; + union epro_vpt_u *vpt = (union epro_vpt_u *)resp->data; + + struct dsch_vn_sha2net_map_tbl *sha2net = + (struct dsch_vn_sha2net_map_tbl *)(resp->data + NBL_EPRO_VPT_DWLEN); + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(req->vsi_id), + (u8 *)vpt->data, + NBL_EPRO_VPT_DWLEN * NBL_BYTES_IN_REG); + nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_SHA2NET_MAP_TBL_REG(table_id), + (u8 *)sha2net, + NBL_DSCH_VN_SHA2NET_MAP_TBL_DWLEN * NBL_BYTES_IN_REG); +} + +static void nbl_and_parsed_reg(u32 *ptr, u32 *value, u32 reg_len) +{ + u32 idx = 0; + + for (idx = 0; idx < reg_len; idx++) { + *value = (*value) & (*ptr); + value++; + ptr++; + } +} + +static void nbl_or_parsed_reg(u32 *ptr, u32 *value, u32 reg_len) +{ + u32 idx = 0; + + for (idx = 0; idx < reg_len; idx++) { + *value = (*value) | (*ptr); + value++; + ptr++; + } +} + +static void nbl_write_parsed_reg(struct nbl_phy_mgt *phy_mgt, + struct nbl_chan_regs_info *reg_info, u32 *value) +{ + u32 *ptr = (u32 *)reg_info->data; + u32 reg_len = reg_info->data_len; + + if (reg_info->mode == NBL_FLOW_READ_OR_WRITE_MODE) { + nbl_or_parsed_reg(ptr, value, reg_len); + } else if (reg_info->mode == NBL_FLOW_READ_AND_WRITE_MODE) { + nbl_and_parsed_reg(ptr, value, reg_len); + } else if (reg_info->mode == NBL_FLOW_READ_OR_AND_WRITE_MODE) { + reg_len = reg_len / 2; + nbl_or_parsed_reg(ptr, value, reg_len); + nbl_and_parsed_reg(ptr + reg_len, value, reg_len); + } else { + // point the value to mailbox received data + value = reg_info->data; + } + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "send regs to write(%u): size %u, depth %u, data %u", + reg_info->tbl_name, reg_len, reg_info->depth, reg_info->data[0]); + + switch (reg_info->tbl_name) { + case NBL_FLOW_EPRO_ECPVPT_REG: + nbl_hw_write_regs(phy_mgt, NBL_EPRO_ECPVPT_REG(reg_info->depth), + (u8 *)value, NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EPRO_ECPIPT_REG: + nbl_hw_write_regs(phy_mgt, NBL_EPRO_ECPIPT_REG(reg_info->depth), + (u8 *)value, NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DPED_TAB_TNL_REG: + nbl_hw_write_regs(phy_mgt, + NBL_DPED_TAB_TNL_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DPED_REPLACE: + nbl_hw_write_regs(phy_mgt, + NBL_DPED_TAB_REPLACE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UPED_REPLACE: + nbl_hw_write_regs(phy_mgt, + NBL_UPED_TAB_REPLACE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DPED_MIRROR_TABLE: + nbl_hw_write_regs(phy_mgt, + NBL_DPED_TAB_MIR_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DPED_MIR_CMD_0_TABLE: + nbl_hw_write_regs(phy_mgt, + NBL_DPED_MIR_CMD_0_TABLE(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EPRO_MT_REG: + nbl_hw_write_regs(phy_mgt, NBL_EPRO_MT_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EM0_TCAM_TABLE_REG: + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM0_TCAM_TABLE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EM1_TCAM_TABLE_REG: + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM1_TCAM_TABLE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EM2_TCAM_TABLE_REG: + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM2_TCAM_TABLE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EM0_AD_TABLE_REG: + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM0_AD_TABLE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EM1_AD_TABLE_REG: + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM1_AD_TABLE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EM2_AD_TABLE_REG: + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM2_AD_TABLE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_IPRO_UDL_PKT_FLT_DMAC_REG: + nbl_hw_write_regs(phy_mgt, + NBL_IPRO_UDL_PKT_FLT_DMAC_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_IPRO_UDL_PKT_FLT_CTRL_REG: + nbl_hw_write_regs(phy_mgt, + NBL_IPRO_UDL_PKT_FLT_CTRL_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_ACTION_RAM_TBL: + nbl_hw_write_regs(phy_mgt, + NBL_ACL_ACTION_RAM_TBL(reg_info->ram_id, + reg_info->s_depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_MCC_TBL_REG: + nbl_hw_write_regs(phy_mgt, NBL_MCC_TBL_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EPRO_EPT_REG: + nbl_hw_write_regs(phy_mgt, NBL_EPRO_EPT_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_IPRO_UP_SRC_PORT_TBL_REG: + nbl_hw_write_regs(phy_mgt, + NBL_IPRO_UP_SRC_PORT_TBL_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UCAR_FLOW_REG: + nbl_hw_write_regs(phy_mgt, NBL_UCAR_FLOW_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EPRO_VPT_REG: + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UCAR_FLOW_TIMMING_ADD_ADDR: + nbl_hw_write_regs(phy_mgt, NBL_UCAR_FLOW_TIMMING_ADD_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_SHAPING_GRP_TIMMING_ADD_ADDR: + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_GRP_TIMMING_ADD_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_SHAPING_GRP_REG: + nbl_hw_write_regs(phy_mgt, + NBL_SHAPING_GRP_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DSCH_VN_SHA2GRP_MAP_TBL_REG: + nbl_hw_write_regs(phy_mgt, + NBL_DSCH_VN_SHA2GRP_MAP_TBL_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DSCH_VN_GRP2SHA_MAP_TBL_REG: + nbl_hw_write_regs(phy_mgt, + NBL_DSCH_VN_GRP2SHA_MAP_TBL_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_SHAPING_DPORT_TIMMING_ADD_ADDR: + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DPORT_TIMMING_ADD_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_SHAPING_DPORT_REG: + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DPORT_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DSCH_PSHA_EN_ADDR: + nbl_hw_write_regs(phy_mgt, NBL_DSCH_PSHA_EN_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UCAR_FLOW_4K_REG: + nbl_hw_write_regs(phy_mgt, NBL_UCAR_FLOW_4K_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UCAR_FLOW_4K_TIMMING_ADD_ADDR: + nbl_hw_write_regs(phy_mgt, NBL_UCAR_FLOW_4K_TIMMING_ADD_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_SHAPING_NET_TIMMING_ADD_ADDR: + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_NET_TIMMING_ADD_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_SHAPING_NET_REG: + case NBL_FLOW_DSCH_VN_NET2SHA_MAP_TBL_REG: + case NBL_FLOW_DSCH_VN_SHA2NET_MAP_TBL_REG: + nbl_phy_set_offload_shaping(phy_mgt, reg_info, value); + break; + case NBL_FLOW_UCAR_CAR_CTRL_ADDR: + nbl_hw_write_regs(phy_mgt, NBL_UCAR_CAR_CTRL_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UPED_VSI_TYPE_REG: + nbl_hw_write_regs(phy_mgt, NBL_UPED_TAB_VSI_TYPE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DPED_VSI_TYPE_REG: + nbl_hw_write_regs(phy_mgt, NBL_DPED_TAB_VSI_TYPE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + default: + nbl_err(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "send regs: unrecognized register(%u) to write, will not handle", + reg_info->tbl_name); + break; + } +} + +static void nbl_read_parsed_reg(struct nbl_phy_mgt *phy_mgt, + struct nbl_chan_regs_info *reg_info, u32 *value) +{ + u32 reg_len = reg_info->data_len; + + // in this mode, both or-data and and-data are sent + if (reg_info->mode == NBL_FLOW_READ_OR_AND_WRITE_MODE) + reg_len = reg_len / 2; + + if (reg_len > NBL_CHAN_REG_MAX_LEN) { + nbl_err(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "send regs: read length longer than data allocated"); + return; + } + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "send regs to read(%u): size %u, depth %u, data %u", + reg_info->tbl_name, reg_len, reg_info->depth, reg_info->data[0]); + + switch (reg_info->tbl_name) { + case NBL_FLOW_EPRO_ECPVPT_REG: + nbl_hw_read_regs(phy_mgt, NBL_EPRO_ECPVPT_REG(reg_info->depth), + (u8 *)value, NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EPRO_ECPIPT_REG: + nbl_hw_read_regs(phy_mgt, NBL_EPRO_ECPIPT_REG(reg_info->depth), + (u8 *)value, NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EPRO_EPT_REG: + nbl_hw_read_regs(phy_mgt, NBL_EPRO_EPT_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_IPRO_UP_SRC_PORT_TBL_REG: + nbl_hw_read_regs(phy_mgt, + NBL_IPRO_UP_SRC_PORT_TBL_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EPRO_VPT_REG: + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DSCH_PSHA_EN_ADDR: + nbl_hw_read_regs(phy_mgt, NBL_DSCH_PSHA_EN_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UCAR_CAR_CTRL_ADDR: + nbl_hw_read_regs(phy_mgt, NBL_UCAR_CAR_CTRL_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UCAR_GREEN_CELL_ADDR: + nbl_hw_read_regs(phy_mgt, + (NBL_UCAR_GREEN_CELL_ADDR + reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UCAR_GREEN_PKT_ADDR: + nbl_hw_read_regs(phy_mgt, + (NBL_UCAR_GREEN_PKT_ADDR + reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + default: + nbl_err(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "send regs: unrecognized register(%u) to read, will not handle", + reg_info->tbl_name); + break; + } +} + +static int nbl_phy_offload_flow_rule(void *priv, void *param) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_chan_bulk_regs_info *hdr_info = + (struct nbl_chan_bulk_regs_info *)param; + struct nbl_chan_regs_info *reg_info = + (struct nbl_chan_regs_info *)(hdr_info + 1); + u8 regs_count = hdr_info->item_cnt; + u32 value[NBL_CHAN_REG_MAX_LEN] = { 0 }; + u8 i; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "send regs: flow regs received: to parse and read/write: " + "regs info: count %u, total size %u, " + "1st reg: table %u, mode %u, size %u, depth %u, data %u", + hdr_info->item_cnt, hdr_info->data_len, + reg_info->tbl_name, reg_info->mode, reg_info->data_len, + reg_info->depth, reg_info->data[0]); + + if (reg_info->data_len == 0 || regs_count == 0) { + nbl_err(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "send regs: reg count or data length invalid"); + return -1; + } + + for (i = 0; i < regs_count; i++) { + if (reg_info->mode == NBL_FLOW_READ_MODE) { + nbl_read_parsed_reg(phy_mgt, reg_info, value); + } else if (reg_info->mode == NBL_FLOW_WRITE_MODE) { + nbl_write_parsed_reg(phy_mgt, reg_info, value); + } else if (reg_info->mode == NBL_FLOW_READ_OR_WRITE_MODE || + reg_info->mode == NBL_FLOW_READ_AND_WRITE_MODE || + reg_info->mode == NBL_FLOW_READ_OR_AND_WRITE_MODE) { + nbl_read_parsed_reg(phy_mgt, reg_info, value); + nbl_write_parsed_reg(phy_mgt, reg_info, value); + } else { + nbl_err(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "failed parsing reg info: unrecognized mode: " + "tab %u, mode %u, size %u, ", reg_info->tbl_name, + reg_info->mode, reg_info->data_len); + } + + reg_info = (struct nbl_chan_regs_info *) + (reg_info + reg_info->data_len + 1); + } + + return NBL_OK; +} + +static void +nbl_repr_eth_dev_ipro_dn_init(struct nbl_phy_mgt *phy_mgt, u16 vsi_id) +{ + union ipro_dn_src_port_tbl_u dn_src_port = {.info = {0}}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)dn_src_port.data, + NBL_IPRO_DN_SRC_PORT_TBL_DWLEN * NBL_BYTES_IN_REG); + dn_src_port.info.phy_flow = 0; + dn_src_port.info.set_dport_en = 0; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)dn_src_port.data, + NBL_IPRO_DN_SRC_PORT_TBL_DWLEN * NBL_BYTES_IN_REG); + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "init rep: ipro dn src written"); +} + +static void +nbl_repr_eth_dev_ipro_up_src_init(struct nbl_phy_mgt *phy_mgt, u16 eth_id) +{ + union ipro_up_src_port_tbl_u up_src_port = {.info = {0}}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_UP_SRC_PORT_TBL_REG(eth_id), + (u8 *)up_src_port.data, + NBL_IPRO_UP_SRC_PORT_TBL_DWLEN * NBL_BYTES_IN_REG); + up_src_port.info.phy_flow = 0; + up_src_port.info.set_dport_en = 0; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_UP_SRC_PORT_TBL_REG(eth_id), + (u8 *)up_src_port.data, + NBL_IPRO_UP_SRC_PORT_TBL_DWLEN * NBL_BYTES_IN_REG); + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "init rep: ipro up src written"); +} + +static void +nbl_ped_port_vlan_type_cfg(struct nbl_phy_mgt *phy_mgt, u32 port_id, + enum nbl_ped_vlan_type_e type, + enum nbl_ped_vlan_tpid_e tpid) +{ + union nbl_ped_port_vlan_type_u cfg = {.info = {0}}; + + if (port_id >= NBL_DPED_VLAN_TYPE_PORT_NUM || tpid >= PED_VLAN_TYPE_NUM) { + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "port_id %u exceed the max num %u.", + port_id, NBL_DPED_VLAN_TYPE_PORT_NUM); + return; + } + + nbl_hw_read_regs(phy_mgt, NBL_DPED_TAB_VSI_TYPE_REG(port_id), + (u8 *)cfg.data, NBL_BYTES_IN_REG); + switch (type) { + case INNER_VLAN_TYPE: + cfg.info.i_vlan_sel = tpid & 0b11; + break; + case OUTER_VLAN_TYPE: + cfg.info.o_vlan_sel = tpid & 0b11; + break; + } + nbl_hw_write_regs(phy_mgt, NBL_DPED_TAB_VSI_TYPE_REG(port_id), + (u8 *)cfg.data, NBL_BYTES_IN_REG); + + nbl_hw_read_regs(phy_mgt, NBL_UPED_TAB_VSI_TYPE_REG(port_id), + (u8 *)cfg.data, NBL_BYTES_IN_REG); + switch (type) { + case INNER_VLAN_TYPE: + cfg.info.i_vlan_sel = tpid & 0b11; + break; + case OUTER_VLAN_TYPE: + cfg.info.o_vlan_sel = tpid & 0b11; + break; + } + nbl_hw_write_regs(phy_mgt, NBL_UPED_TAB_VSI_TYPE_REG(port_id), + (u8 *)cfg.data, NBL_BYTES_IN_REG); +} + +static int nbl_phy_init_rep(void *priv, u16 vsi_id, u8 inner_type, + u8 outer_type, u8 rep_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union epro_vpt_u vpt = {.info = {0}}; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "init rep: vsi id %u, rep type %u", vsi_id, rep_type); + if (rep_type == NBL_ETHDEV_PF_REP || + rep_type == NBL_ETHDEV_VIRTIO_REP) { + nbl_repr_eth_dev_ipro_dn_init(phy_mgt, vsi_id); + /* configure vlan tpid type for vsi */ + nbl_ped_port_vlan_type_cfg(phy_mgt, vsi_id, INNER_VLAN_TYPE, + inner_type); + nbl_ped_port_vlan_type_cfg(phy_mgt, vsi_id, OUTER_VLAN_TYPE, + outer_type); + } else if (rep_type == NBL_ETHDEV_ETH_REP) { + vsi_id = vsi_id - NBL_ETH_REP_INFO_BASE; + nbl_repr_eth_dev_ipro_up_src_init(phy_mgt, vsi_id); + /* configure vlan tpid type for eth */ + nbl_ped_port_vlan_type_cfg(phy_mgt, + (vsi_id + NBL_PED_VSI_TYPE_ETH_BASE), + INNER_VLAN_TYPE, inner_type); + nbl_ped_port_vlan_type_cfg(phy_mgt, + (vsi_id + NBL_PED_VSI_TYPE_ETH_BASE), + OUTER_VLAN_TYPE, outer_type); + } + + /* init rss l4 */ + if (rep_type == NBL_ETHDEV_PF_REP || rep_type == NBL_ETHDEV_VIRTIO_REP) { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), (u8 *)vpt.data, + NBL_EPRO_VPT_DWLEN * NBL_BYTES_IN_REG); + vpt.info.rss_alg_sel = NBL_SYM_TOEPLITZ_INT; + vpt.info.rss_key_type_btm = NBL_KEY_IP4_L4_RSS_BIT | NBL_KEY_IP6_L4_RSS_BIT; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), (u8 *)vpt.data, + NBL_EPRO_VPT_DWLEN * NBL_BYTES_IN_REG); + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "init rep: epro rss written"); + } + + return NBL_OK; +} + +static int nbl_phy_init_vdpaq(void *priv, u16 func_id, u16 bdf, u64 pa, u32 size) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union pcompleter_host_cfg_funtion_id_vdpa_net_u cfg_func_id = { + .info.dbg = func_id, + .info.vld = 1, + }; + + /* disable vdpa queue */ + nbl_hw_wr32(phy_mgt, NBL_VDPA_EN_ADDR, 0); + + /* cfg vdpa queue base */ + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_BASE_ADDR_L_ADDR, (u32)pa); + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_BASE_ADDR_H_ADDR, (u32)(pa >> 32)); + + /* cfg vdpa queue size */ + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_SIZE_MASK_ADDR, size - 1); + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_TPNTR_ADDR, size); + + /* reset vdpa queue head */ + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_HPNTR_RST_ADDR, 1); + + /* cfg vdpa queue bdf */ + nbl_hw_wr32(phy_mgt, NBL_VDPA_DIF_BDF_ADDR, bdf); + + nbl_hw_write_regs(phy_mgt, NBL_PCOMPLETER_HOST_CFG_FUNTION_ID_VDPA_NET_ADDR, + (u8 *)&cfg_func_id, sizeof(cfg_func_id)); + + /* all registers set, enable vdpa queue again */ + nbl_hw_wr32(phy_mgt, NBL_VDPA_EN_ADDR, 1); + + return 0; +} + +static void nbl_phy_destroy_vdpaq(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_SIZE_MASK_ADDR, 0); + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_BASE_ADDR_L_ADDR, 0); + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_BASE_ADDR_H_ADDR, 0); + + /* reset the head */ + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_HPNTR_RST_ADDR, 1); + nbl_hw_wr32(phy_mgt, NBL_VDPA_EN_ADDR, 0); +} + +static const u32 nbl_phy_reg_dump_list[] = { + NBL_TOP_CTRL_VERSION_INFO, + NBL_TOP_CTRL_VERSION_DATE, +}; + +static void nbl_phy_get_reg_dump(void *priv, u32 *data, u32 len) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + int i; + + for (i = 0; i < ARRAY_SIZE(nbl_phy_reg_dump_list) && i < len; i++) + nbl_hw_read_regs(phy_mgt, nbl_phy_reg_dump_list[i], + (u8 *)&data[i], sizeof(data[i])); +} + +static int nbl_phy_get_reg_dump_len(void *priv) +{ + return ARRAY_SIZE(nbl_phy_reg_dump_list) * sizeof(u32); +} + +/* return value need to convert to Mil degree Celsius(1/1000) */ +static u32 nbl_phy_get_chip_temperature(void *priv, enum nbl_hwmon_type type, u32 senser_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 temp = 0; + + switch (type) { + case NBL_HWMON_TEMP_INPUT: + temp = nbl_hw_rd32(phy_mgt, NBL_TOP_CTRL_TVSENSOR0); + temp = (temp & 0x1ff) * 1000; + break; + case NBL_HWMON_TEMP_MAX: + temp = NBL_LEONIS_TEMP_MAX * 1000; + break; + case NBL_HWMON_TEMP_CRIT: + temp = NBL_LEONIS_TEMP_CRIT * 1000; + break; + case NBL_HWMON_TEMP_HIGHEST: + temp = nbl_hw_rd32(phy_mgt, NBL_TOP_CTRL_TVSENSOR0); + temp = (temp >> 16) * 1000; + break; + default: + break; + } + return temp; +} + +static struct nbl_phy_ped_tbl ped_tbl[NBL_FLOW_PED_RECORD_MAX] = { + [NBL_FLOW_PED_UMAC_TYPE] = {.addr = NBL_UPED_TAB_REPLACE_ADDR, + .addr_len = NBL_UPED_TAB_REPLACE_DWLEN,}, + [NBL_FLOW_PED_DMAC_TYPE] = {.addr = NBL_DPED_TAB_REPLACE_ADDR, + .addr_len = NBL_DPED_TAB_REPLACE_DWLEN,}, + [NBL_FLOW_PED_UIP_TYPE] = {.addr = NBL_UPED_TAB_REPLACE_ADDR, + .addr_len = NBL_UPED_TAB_REPLACE_DWLEN,}, + [NBL_FLOW_PED_DIP_TYPE] = {.addr = NBL_DPED_TAB_REPLACE_ADDR, + .addr_len = NBL_DPED_TAB_REPLACE_DWLEN,}, + [NBL_FLOW_PED_UIP6_TYPE] = {.addr = NBL_UPED_TAB_REPLACE_ADDR, + .addr_len = NBL_UPED_TAB_REPLACE_DWLEN,}, + [NBL_FLOW_PED_DIP6_TYPE] = {.addr = NBL_DPED_TAB_REPLACE_ADDR, + .addr_len = NBL_DPED_TAB_REPLACE_DWLEN,}, +}; + +static void nbl_phy_write_ped_tbl(void *priv, u8 *data, u16 idx, enum nbl_flow_ped_type ped_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u64 reg; + +#define NBL_PHY_PED_ADDR_REG(addr, idx, size) ((addr) + (idx) * (size) * 4) + /* if ped type is ipv6 ,we need write ped_h */ + if (ped_type == NBL_FLOW_PED_UIP6_TYPE || ped_type == NBL_FLOW_PED_DIP6_TYPE) { + /* write high 64-bit first then update data and idx for common write */ + data += ped_tbl[ped_type].addr_len * 4; + reg = NBL_PHY_PED_ADDR_REG(ped_tbl[ped_type].addr, idx, + ped_tbl[ped_type].addr_len); + nbl_hw_write_regs(phy_mgt, reg, data, ped_tbl[ped_type].addr_len * 4); + idx += NBL_TC_MAX_PED_H_IDX; + data -= ped_tbl[ped_type].addr_len * 4; + } + + reg = NBL_PHY_PED_ADDR_REG(ped_tbl[ped_type].addr, idx, ped_tbl[ped_type].addr_len); + nbl_hw_write_regs(phy_mgt, reg, data, ped_tbl[ped_type].addr_len * 4); +} + +static int nbl_phy_set_mtu(void *priv, u16 mtu_index, u16 mtu) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_mtu_sel ipro_mtu_sel = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_MTU_SEL_REG(mtu_index / 2), + (u8 *)&ipro_mtu_sel, sizeof(ipro_mtu_sel)); + + if (mtu_index % 2 == 0) + ipro_mtu_sel.mtu_0 = mtu; + else + ipro_mtu_sel.mtu_1 = mtu; + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_MTU_SEL_REG(mtu_index / 2), + (u8 *)&ipro_mtu_sel, sizeof(ipro_mtu_sel)); + + return 0; +} + +static u16 nbl_phy_get_mtu_index(void *priv, u16 vsi_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_dn_src_port_tbl ipro_dn_src_port_tbl = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, sizeof(ipro_dn_src_port_tbl)); + return ipro_dn_src_port_tbl.mtu_sel; +} + +static int nbl_phy_process_abnormal_queue(struct nbl_phy_mgt *phy_mgt, u16 queue_id, int type, + struct nbl_abnormal_details *detail) +{ + struct nbl_ipro_queue_tbl ipro_queue_tbl = {0}; + struct nbl_host_vnet_qinfo host_vnet_qinfo = {0}; + u32 qinfo_id = type == NBL_ABNORMAL_EVENT_DVN ? NBL_PAIR_ID_GET_TX(queue_id) : + NBL_PAIR_ID_GET_RX(queue_id); + + if (type >= NBL_ABNORMAL_EVENT_MAX) + return -EINVAL; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_QUEUE_TBL(queue_id), + (u8 *)&ipro_queue_tbl, sizeof(ipro_queue_tbl)); + + detail->abnormal = true; + detail->qid = queue_id; + detail->vsi_id = ipro_queue_tbl.vsi_id; + + nbl_hw_read_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(qinfo_id), + (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); + host_vnet_qinfo.valid = 1; + nbl_hw_write_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(qinfo_id), + (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); + + return 0; +} + +static int nbl_phy_process_abnormal_event(void *priv, struct nbl_abnormal_event_info *abnomal_info) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct device *dev = NBL_PHY_MGT_TO_DEV(phy_mgt); + struct dvn_desc_dif_err_info desc_dif_err_info = {0}; + struct dvn_pkt_dif_err_info pkt_dif_err_info = {0}; + struct dvn_err_queue_id_get err_queue_id_get = {0}; + struct uvn_queue_err_info queue_err_info = {0}; + struct nbl_abnormal_details *detail; + u32 int_status = 0, rdma_other_abn = 0, tlp_out_drop_cnt = 0; + u32 desc_dif_err_cnt = 0, pkt_dif_err_cnt = 0; + u32 queue_err_cnt; + int ret = 0; + + nbl_hw_read_regs(phy_mgt, NBL_DVN_INT_STATUS, (u8 *)&int_status, sizeof(u32)); + if (int_status == U32_MAX) + dev_info(dev, "dvn int_status:0x%x", int_status); + + if (int_status && int_status != U32_MAX) { + if (int_status & BIT(NBL_DVN_INT_DESC_DIF_ERR)) { + nbl_hw_read_regs(phy_mgt, NBL_DVN_DESC_DIF_ERR_CNT, + (u8 *)&desc_dif_err_cnt, sizeof(u32)); + nbl_hw_read_regs(phy_mgt, NBL_DVN_DESC_DIF_ERR_INFO, + (u8 *)&desc_dif_err_info, + sizeof(struct dvn_desc_dif_err_info)); + dev_info(dev, "dvn int_status:0x%x, desc_dif_mf_cnt:%d, queue_id:%d\n", + int_status, desc_dif_err_cnt, desc_dif_err_info.queue_id); + detail = &abnomal_info->details[NBL_ABNORMAL_EVENT_DVN]; + nbl_phy_process_abnormal_queue(phy_mgt, desc_dif_err_info.queue_id, + NBL_ABNORMAL_EVENT_DVN, detail); + + ret |= BIT(NBL_ABNORMAL_EVENT_DVN); + } + + if (int_status & BIT(NBL_DVN_INT_PKT_DIF_ERR)) { + nbl_hw_read_regs(phy_mgt, NBL_DVN_PKT_DIF_ERR_CNT, + (u8 *)&pkt_dif_err_cnt, sizeof(u32)); + nbl_hw_read_regs(phy_mgt, NBL_DVN_PKT_DIF_ERR_INFO, + (u8 *)&pkt_dif_err_info, + sizeof(struct dvn_pkt_dif_err_info)); + dev_info(dev, "dvn int_status:0x%x, pkt_dif_mf_cnt:%d, queue_id:%d\n", + int_status, pkt_dif_err_cnt, pkt_dif_err_info.queue_id); + } + + /* clear dvn abnormal irq */ + nbl_hw_write_regs(phy_mgt, NBL_DVN_INT_STATUS, + (u8 *)&int_status, sizeof(int_status)); + + /* enable new queue error irq */ + err_queue_id_get.desc_flag = 1; + err_queue_id_get.pkt_flag = 1; + nbl_hw_write_regs(phy_mgt, NBL_DVN_ERR_QUEUE_ID_GET, + (u8 *)&err_queue_id_get, sizeof(err_queue_id_get)); + } + + int_status = 0; + nbl_hw_read_regs(phy_mgt, NBL_UVN_INT_STATUS, (u8 *)&int_status, sizeof(u32)); + if (int_status == U32_MAX) + dev_info(dev, "uvn int_status:0x%x", int_status); + if (int_status && int_status != U32_MAX) { + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_ERR_CNT, + (u8 *)&queue_err_cnt, sizeof(u32)); + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_ERR_INFO, + (u8 *)&queue_err_info, sizeof(struct uvn_queue_err_info)); + dev_info(dev, "uvn int_status:%x queue_err_cnt: 0x%x qid 0x%x\n", + int_status, queue_err_cnt, queue_err_info.queue_id); + + if (int_status & BIT(NBL_UVN_INT_QUEUE_ERR)) { + detail = &abnomal_info->details[NBL_ABNORMAL_EVENT_UVN]; + nbl_phy_process_abnormal_queue(phy_mgt, queue_err_info.queue_id, + NBL_ABNORMAL_EVENT_UVN, detail); + + ret |= BIT(NBL_ABNORMAL_EVENT_UVN); + } + + /* clear uvn abnormal irq */ + nbl_hw_write_regs(phy_mgt, NBL_UVN_INT_STATUS, + (u8 *)&int_status, sizeof(int_status)); + } + + int_status = 0; + nbl_hw_read_regs(phy_mgt, NBL_DSCH_INT_STATUS, (u8 *)&int_status, sizeof(u32)); + nbl_hw_read_regs(phy_mgt, NBL_DSCH_RDMA_OTHER_ABN, (u8 *)&rdma_other_abn, sizeof(u32)); + if (int_status == U32_MAX) + dev_info(dev, "dsch int_status:0x%x", int_status); + if (int_status && int_status != U32_MAX && + (int_status != NBL_DSCH_RDMA_OTHER_ABN_BIT || + rdma_other_abn != NBL_DSCH_RDMA_DPQM_DB_LOST)) { + dev_info(dev, "dsch int_status:%x\n", int_status); + + /* clear dsch abnormal irq */ + nbl_hw_write_regs(phy_mgt, NBL_DSCH_INT_STATUS, + (u8 *)&int_status, sizeof(int_status)); + } + + int_status = 0; + nbl_hw_read_regs(phy_mgt, NBL_PCOMPLETER_INT_STATUS, (u8 *)&int_status, sizeof(u32)); + if (int_status == U32_MAX) + dev_info(dev, "pcomleter int_status:0x%x", int_status); + if (int_status && int_status != U32_MAX) { + nbl_hw_read_regs(phy_mgt, NBL_PCOMPLETER_TLP_OUT_DROP_CNT, + (u8 *)&tlp_out_drop_cnt, sizeof(u32)); + dev_info(dev, "pcomleter int_status:0x%x tlp_out_drop_cnt 0x%x\n", + int_status, tlp_out_drop_cnt); + + /* clear pcomleter abnormal irq */ + nbl_hw_write_regs(phy_mgt, NBL_PCOMPLETER_INT_STATUS, + (u8 *)&int_status, sizeof(int_status)); + } + + return ret; +} + +static u32 nbl_phy_get_uvn_desc_entry_stats(void *priv) +{ + return nbl_hw_rd32(priv, NBL_UVN_DESC_RD_ENTRY); +} + +static void nbl_phy_set_uvn_desc_wr_timeout(void *priv, u16 timeout) +{ + struct uvn_desc_wr_timeout wr_timeout = {0}; + + wr_timeout.num = timeout; + nbl_hw_write_regs(priv, NBL_UVN_DESC_WR_TIMEOUT, (u8 *)&wr_timeout, sizeof(wr_timeout)); +} + +static int nbl_phy_cfg_lag_algorithm(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_ept_tbl ept_tbl = {0}; + u8 hw_hash_type = NBL_EPRO_LAG_ALG_L2_HASH; + + switch (hash_type) { + case NETDEV_LAG_HASH_L23: + case NETDEV_LAG_HASH_E23: + hw_hash_type = NBL_EPRO_LAG_ALG_L23_HASH; + break; + case NETDEV_LAG_HASH_L34: + case NETDEV_LAG_HASH_E34: + hw_hash_type = NBL_EPRO_LAG_ALG_LINUX_L34_HASH; + break; + default: + break; + } + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_EPT_TABLE(lag_id + NBL_EPRO_EPT_LAG_OFFSET), + (u8 *)&ept_tbl, sizeof(struct nbl_epro_ept_tbl)); + ept_tbl.lag_alg_sel = hw_hash_type; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_EPT_TABLE(lag_id + NBL_EPRO_EPT_LAG_OFFSET), + (u8 *)&ept_tbl, sizeof(struct nbl_epro_ept_tbl)); + + nbl_info(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_PHY, + "Nbl phy set lag hash type %d", hw_hash_type); + return 0; +} + +static int nbl_phy_cfg_lag_member_list(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_ept_tbl ept_tbl = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_EPT_TABLE(param->lag_id + NBL_EPRO_EPT_LAG_OFFSET), + (u8 *)&ept_tbl, sizeof(struct nbl_epro_ept_tbl)); + if (param->lag_num) { + ept_tbl.fwd = 1; + ept_tbl.vld = 1; + } else { + ept_tbl.fwd = 0; + ept_tbl.vld = 0; + } + nbl_hw_write_regs(phy_mgt, NBL_EPRO_EPT_TABLE(param->lag_id + NBL_EPRO_EPT_LAG_OFFSET), + (u8 *)&ept_tbl, sizeof(struct nbl_epro_ept_tbl)); + + nbl_info(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_PHY, + "Nbl phy set port lag member list done, lag_id:%d, port0:%d, port1:%d\n", + param->lag_id, param->port_list[0], param->port_list[1]); + + return 0; +} + +static int nbl_phy_cfg_lag_member_fwd(void *priv, u16 eth_id, u16 lag_id, u8 fwd) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_ept_tbl ept_tbl = {0}; + struct nbl_ipro_upsport_tbl upsport = {0}; + u8 lag_btm = 0, lag_btm_new = 0; + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_EPT_TABLE(lag_id + NBL_EPRO_EPT_LAG_OFFSET), + (u8 *)&ept_tbl, sizeof(struct nbl_epro_ept_tbl)); + lag_btm = ept_tbl.lag_port_btm; + lag_btm_new = fwd ? lag_btm | (1 << eth_id) : lag_btm & ~(1 << eth_id); + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_EPT_TABLE(lag_id + NBL_EPRO_EPT_LAG_OFFSET), + (u8 *)&ept_tbl, sizeof(struct nbl_epro_ept_tbl)); + ept_tbl.lag_port_btm = lag_btm_new; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_EPT_TABLE(lag_id + NBL_EPRO_EPT_LAG_OFFSET), + (u8 *)&ept_tbl, sizeof(struct nbl_epro_ept_tbl)); + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_UP_SPORT_TABLE(eth_id), (u8 *)&upsport, sizeof(upsport)); + + upsport.lag_id = fwd ? lag_id : 0; + upsport.lag_vld = fwd; + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_UP_SPORT_TABLE(eth_id), + (u8 *)&upsport, sizeof(upsport)); + + return 0; +} + +static bool nbl_phy_get_lag_fwd(void *priv, u16 eth_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_upsport_tbl upsport = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_UP_SPORT_TABLE(eth_id), (u8 *)&upsport, sizeof(upsport)); + return upsport.lag_vld; +} + +static int nbl_phy_cfg_lag_member_up_attr(void *priv, u16 eth_id, u16 lag_id, bool enable) +{ + return 0; +} + +static void nbl_phy_get_board_info(void *priv, struct nbl_board_port_info *board_info) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_fw_board_cfg_dw3 dw3 = {.info = {0}}; + + nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_BOARD_DW3_OFFSET, (u8 *)&dw3, sizeof(dw3)); + board_info->eth_num = dw3.info.port_num; + board_info->eth_speed = dw3.info.port_speed; + board_info->p4_version = dw3.info.p4_version; +} + +static u32 nbl_phy_get_fw_eth_num(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_fw_board_cfg_dw3 dw3 = {.info = {0}}; + + nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_BOARD_DW3_OFFSET, (u8 *)&dw3, sizeof(dw3)); + return dw3.info.port_num; +} + +static u32 nbl_phy_get_fw_eth_map(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_fw_board_cfg_dw6 dw6 = {.info = {0}}; + + nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_BOARD_DW6_OFFSET, (u8 *)&dw6, sizeof(dw6)); + return dw6.info.eth_bitmap; +} + +static int nbl_phy_cfg_bond_shaping(void *priv, u8 eth_id, u8 speed, bool enable) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_shaping_dport dport = {0}; + struct nbl_shaping_dvn_dport dvn_dport = {0}; + struct nbl_shaping_rdma_dport rdma_dport = {0}; + u32 rate, dvn_rate, rdma_rate; + + if (!enable) { + nbl_shaping_eth_init(phy_mgt, eth_id, speed); + return 0; + } + + if (speed == NBL_FW_PORT_SPEED_100G) { + rate = NBL_SHAPING_DPORT_100G_RATE * 2; + dvn_rate = NBL_SHAPING_DPORT_HALF_100G_RATE; + rdma_rate = NBL_SHAPING_DPORT_100G_RATE; + } else { + rate = NBL_SHAPING_DPORT_25G_RATE * 2; + dvn_rate = NBL_SHAPING_DPORT_HALF_25G_RATE; + rdma_rate = NBL_SHAPING_DPORT_25G_RATE; + } + + dport.cir = rate; + dport.pir = rate; + dport.depth = max(dport.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH); + dport.cbs = dport.depth; + dport.pbs = dport.depth; + dport.valid = 1; + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DPORT_REG(eth_id), (u8 *)&dport, sizeof(dport)); + + dvn_dport.cir = dvn_rate; + dvn_dport.pir = dvn_rate; + dvn_dport.depth = max(dvn_dport.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH); + dvn_dport.cbs = dvn_dport.depth; + dvn_dport.pbs = dvn_dport.depth; + dvn_dport.valid = 1; + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DVN_DPORT_REG(eth_id), + (u8 *)&dvn_dport, sizeof(dvn_dport)); + + rdma_dport.cir = rdma_rate; + rdma_dport.pir = rdma_rate; + rdma_dport.depth = max(rdma_dport.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH); + rdma_dport.cbs = rdma_dport.depth; + rdma_dport.pbs = rdma_dport.depth; + rdma_dport.valid = 1; + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_RDMA_DPORT_REG(eth_id), + (u8 *)&rdma_dport, sizeof(rdma_dport)); + + return 0; +} + +static void nbl_phy_set_bond_fc_th(struct nbl_phy_mgt *phy_mgt, + u8 main_eth_id, u8 other_eth_id, u8 speed) +{ + struct dstore_d_dport_fc_th fc_th = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(main_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); + if (speed == NBL_FW_PORT_SPEED_100G) { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_100G_BOND_MAIN; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_100G_BOND_MAIN; + } else { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_BOND_MAIN; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_BOND_MAIN; } + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(main_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); - int_status = 0; - nbl_hw_read_regs(phy_mgt, NBL_UVN_INT_STATUS, (u8 *)&int_status, sizeof(u32)); - if (int_status) { - nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_ERR_CNT, - (u8 *)&queue_err_cnt, sizeof(u32)); - nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_ERR_INFO, - (u8 *)&queue_err_info, sizeof(struct uvn_queue_err_info)); - dev_info(dev, "uvn int_status:%x queue_err_cnt: 0x%x qid 0x%x\n", - int_status, queue_err_cnt, queue_err_info.queue_id); + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(other_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); + if (speed == NBL_FW_PORT_SPEED_100G) { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_100G_BOND_OTHER; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_100G_BOND_OTHER; + } else { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_BOND_OTHER; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_BOND_OTHER; + } + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(other_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); +} - if (int_status & BIT(NBL_UVN_INT_QUEUE_ERR)) { - detail = &abnomal_info->details[NBL_ABNORMAL_EVENT_UVN]; - nbl_phy_process_abnormal_queue(phy_mgt, queue_err_info.queue_id, - NBL_ABNORMAL_EVENT_UVN, detail); +static void nbl_phy_remove_bond_fc_th(struct nbl_phy_mgt *phy_mgt, + u8 main_eth_id, u8 other_eth_id, u8 speed) +{ + struct dstore_d_dport_fc_th fc_th = {0}; - ret |= BIT(NBL_ABNORMAL_EVENT_UVN); - } + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(main_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); + if (speed == NBL_FW_PORT_SPEED_100G) { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_100G; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_100G; + } else { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH; + } + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(main_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); - /* clear uvn abnormal irq */ - nbl_hw_write_regs(phy_mgt, NBL_UVN_INT_STATUS, - (u8 *)&int_status, sizeof(int_status)); + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(other_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); + if (speed == NBL_FW_PORT_SPEED_100G) { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_100G; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_100G; + } else { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH; } + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(other_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); +} - int_status = 0; - nbl_hw_read_regs(phy_mgt, NBL_DSCH_INT_STATUS, (u8 *)&int_status, sizeof(u32)); - nbl_hw_read_regs(phy_mgt, NBL_DSCH_RDMA_OTHER_ABN, (u8 *)&rdma_other_abn, sizeof(u32)); - if (int_status && (int_status != NBL_DSCH_RDMA_OTHER_ABN_BIT || - rdma_other_abn != NBL_DSCH_RDMA_DPQM_DB_LOST)) { - dev_info(dev, "dsch int_status:%x\n", int_status); +static void nbl_phy_cfg_bgid_back_pressure(void *priv, u8 main_eth_id, u8 other_eth_id, + bool enable, u8 speed) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; - /* clear dsch abnormal irq */ - nbl_hw_write_regs(phy_mgt, NBL_DSCH_INT_STATUS, - (u8 *)&int_status, sizeof(int_status)); + struct dvn_back_pressure_mask mask = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DVN_BACK_PRESSURE_MASK, (u8 *)&mask, sizeof(mask)); + nbl_phy_cfg_dvn_bp_mask(&mask, main_eth_id, enable); + nbl_phy_cfg_dvn_bp_mask(&mask, other_eth_id, enable); + nbl_hw_write_regs(phy_mgt, NBL_DVN_BACK_PRESSURE_MASK, (u8 *)&mask, sizeof(mask)); + + if (enable) + nbl_phy_set_bond_fc_th(phy_mgt, main_eth_id, other_eth_id, speed); + else + nbl_phy_remove_bond_fc_th(phy_mgt, main_eth_id, other_eth_id, speed); +} + +static void nbl_phy_set_tc_kgen_cvlan_zero(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union pp1_kgen_key_prf_u kgen_key_prf = {.info = {0}}; + + nbl_hw_read_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(2), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); + kgen_key_prf.info.ext16_2_src = 0x19; + nbl_hw_write_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(2), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); + + nbl_hw_read_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(3), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); + kgen_key_prf.info.ext16_2_src = 0x19; + nbl_hw_write_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(3), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); +} + +static void nbl_phy_unset_tc_kgen_cvlan(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union pp1_kgen_key_prf_u kgen_key_prf = {.info = {0}}; + + nbl_hw_read_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(2), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); + kgen_key_prf.info.ext16_2_src = 0x99; + nbl_hw_write_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(2), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); + + nbl_hw_read_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(3), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); + kgen_key_prf.info.ext16_2_src = 0x99; + nbl_hw_write_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(3), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); +} + +static void nbl_phy_set_ped_tab_vsi_type(void *priv, u32 port_id, u16 eth_proto) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union dped_tab_vsi_type_u dped_vsi_type = {.info = {0}}; + union uped_tab_vsi_type_u uped_vsi_type = {.info = {0}}; + + dped_vsi_type.info.sel = eth_proto; + nbl_hw_write_regs(phy_mgt, NBL_DPED_TAB_VSI_TYPE_REG(port_id), (u8 *)&dped_vsi_type, + sizeof(dped_vsi_type)); + + uped_vsi_type.info.sel = eth_proto; + nbl_hw_write_regs(phy_mgt, NBL_UPED_TAB_VSI_TYPE_REG(port_id), (u8 *)&uped_vsi_type, + sizeof(uped_vsi_type)); +} + +static void nbl_phy_clear_acl(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_acl_flow_tcam_clear(phy_mgt, NBL_ACL_FLUSH_FLOW_BTM, 0, NBL_ACL_TCAM_DEPTH); +} + +static int nbl_phy_clr_fd_udf_l2(struct nbl_phy_mgt *phy_mgt) +{ + union upa_ext_conf_table_u clear = {{0}}; + u8 index[] = {0, 1, 2, 3, 4, 5}; + u8 entry[] = {2, 3, 12}; + u8 i = 0; + u8 j = 0; + + for (i = 0; i < ARRAY_SIZE(index); i++) + for (j = 0; j < ARRAY_SIZE(entry); j++) + nbl_hw_write_regs(phy_mgt, + NBL_UPA_EXT_CONF_TABLE_REG(16 * index[i] + entry[j]), + (u8 *)&clear, sizeof(clear)); + + return 0; +} + +static int nbl_phy_clr_fd_udf_l3(struct nbl_phy_mgt *phy_mgt) +{ + union upa_ext_conf_table_u clear = {{0}}; + u8 index0[] = {8, 10}; + u8 entry0[] = {9, 10, 13}; + u8 index1[] = {9, 11, 12}; + u8 entry1[] = {9, 10, 11}; + u8 i = 0; + u8 j = 0; + + for (i = 0; i < ARRAY_SIZE(index0); i++) + for (j = 0; j < ARRAY_SIZE(entry0); j++) + nbl_hw_write_regs(phy_mgt, + NBL_UPA_EXT_CONF_TABLE_REG(16 * index0[i] + entry0[j]), + (u8 *)&clear, sizeof(clear)); + + for (i = 0; i < ARRAY_SIZE(index1); i++) + for (j = 0; j < ARRAY_SIZE(entry1); j++) + nbl_hw_write_regs(phy_mgt, + NBL_UPA_EXT_CONF_TABLE_REG(16 * index1[i] + entry1[j]), + (u8 *)&clear, sizeof(clear)); + + return 0; +} + +static int nbl_phy_clr_fd_udf_l4(struct nbl_phy_mgt *phy_mgt) +{ + union upa_ext_conf_table_u clear = {{0}}; + u8 index[] = {16, 17, 18, 19, 21, 22, 24}; + u8 entry[] = {2, 10, 11, 13}; + u8 entry1[] = {2, 10, 11, 7, 8, 9}; /* for index = 20 */ + u8 entry2[] = {2, 10, 11, 14, 15}; /* for index = 24 */ + u8 i = 0; + u8 j = 0; + + for (i = 0; i < ARRAY_SIZE(index); i++) + for (j = 0; j < ARRAY_SIZE(entry); j++) + nbl_hw_write_regs(phy_mgt, + NBL_UPA_EXT_CONF_TABLE_REG(16 * index[i] + entry[j]), + (u8 *)&clear, sizeof(clear)); + + i = 20; + for (j = 0; j < ARRAY_SIZE(entry1); j++) + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * i + entry1[j]), + (u8 *)&clear, sizeof(clear)); + + i = 25; + for (j = 0; j < ARRAY_SIZE(entry2); j++) + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * i + entry2[j]), + (u8 *)&clear, sizeof(clear)); + + return 0; +} + +static int nbl_phy_clr_fd_udf(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_phy_clr_fd_udf_l2(phy_mgt); + nbl_phy_clr_fd_udf_l3(phy_mgt); + nbl_phy_clr_fd_udf_l4(phy_mgt); + + return 0; +} + +static int nbl_phy_set_fd_udf_l2(struct nbl_phy_mgt *phy_mgt, u8 offset) +{ + union upa_ext_conf_table_u ext32 = {{0}}; + union upa_ext_conf_table_u ext32_0 = {{0}}; /* used for half length extraction */ + union upa_ext_conf_table_u ext32_1 = {{0}}; /* used for half length extraction */ + union upa_ext_conf_table_u ext8 = {{0}}; + u8 index = 0; /* extractors profile index */ + u8 entry = 0; /* extractor index */ + + if (offset % 4 == 0) { + /* use 4B extractor */ + ext32.info.dst_offset = 40; + ext32.info.source_offset = offset / 4; + ext32.info.mode_sel = 0; + ext32.info.mode_start_off = 0; + ext32.info.lx_sel = 1; + ext32.info.op_en = 1; + + index = 0; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 1; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 2; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 3; + entry = 2; + ext32.info.dst_offset = 44; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 4; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 5; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + } else if (offset % 4 == 2) { + /* use 2 * 4B extractor, all use half length extraction mode */ + ext32_0.info.dst_offset = 40; + ext32_0.info.source_offset = offset / 4; + ext32_0.info.mode_sel = 1; + ext32_0.info.mode_start_off = 0b10; /* low-2-high */ + ext32_0.info.lx_sel = 1; + ext32_0.info.op_en = 1; + + ext32_1.info.dst_offset = 40; + ext32_1.info.source_offset = offset / 4 + 1; + ext32_1.info.mode_sel = 1; + ext32_1.info.mode_start_off = 0b01; /* high-2-low */ + ext32_1.info.lx_sel = 1; + ext32_1.info.op_en = 1; + + /* tunnel cases */ + index = 0; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_0, sizeof(ext32_0)); + entry = 3; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_1, sizeof(ext32_1)); + index = 1; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_0, sizeof(ext32_0)); + entry = 3; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_1, sizeof(ext32_1)); + index = 2; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_0, sizeof(ext32_0)); + entry = 3; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_1, sizeof(ext32_1)); + + /* non-tunnel cases */ + ext32_0.info.dst_offset = 44; + ext32_1.info.dst_offset = 44; + index = 3; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_0, sizeof(ext32_0)); + entry = 3; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_1, sizeof(ext32_1)); + index = 4; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_0, sizeof(ext32_0)); + entry = 3; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_1, sizeof(ext32_1)); + index = 5; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_0, sizeof(ext32_0)); + entry = 3; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_1, sizeof(ext32_1)); + } else if (offset % 4 == 1 || offset % 4 == 3) { + /* use 4B extractor & 1B extractor for overwritten */ + /* tunnel cases */ + ext32.info.dst_offset = 40; + ext32.info.source_offset = (offset + 2) / 4; + ext32.info.mode_sel = 0; + ext32.info.mode_start_off = 0; + ext32.info.lx_sel = 1; + ext32.info.op_en = 1; + + ext8.info.dst_offset = (offset % 4 == 3) ? 43 : 40; + ext8.info.source_offset = (offset % 4 == 3) ? offset : offset + 3; + ext8.info.mode_sel = 0; + ext8.info.mode_start_off = 0; + ext8.info.lx_sel = 1; + ext8.info.op_en = 1; + + index = 0; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 12; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + index = 1; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 12; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + index = 2; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 12; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + /* non-tunnel cases */ + ext32.info.dst_offset = 44; + ext8.info.dst_offset = (offset % 4 == 3) ? 47 : 44; + index = 3; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 12; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + index = 4; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 12; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + index = 5; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 12; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); } - int_status = 0; - nbl_hw_read_regs(phy_mgt, NBL_PCOMPLETER_INT_STATUS, (u8 *)&int_status, sizeof(u32)); - if (int_status) { - nbl_hw_read_regs(phy_mgt, NBL_PCOMPLETER_TLP_OUT_DROP_CNT, - (u8 *)&tlp_out_drop_cnt, sizeof(u32)); - dev_info(dev, "pcomleter int_status:0x%x tlp_out_drop_cnt 0x%x\n", - int_status, tlp_out_drop_cnt); + return 0; +} - /* clear pcomleter abnormal irq */ - nbl_hw_write_regs(phy_mgt, NBL_PCOMPLETER_INT_STATUS, - (u8 *)&int_status, sizeof(int_status)); +static int nbl_phy_set_fd_udf_l3(struct nbl_phy_mgt *phy_mgt, u8 offset) +{ + union upa_ext_conf_table_u ext16_0 = {{0}}; + union upa_ext_conf_table_u ext16_1 = {{0}}; + union upa_ext_conf_table_u ext16_2 = {{0}}; /* used in half extraction mode */ + union upa_ext_conf_table_u ext8 = {{0}}; /* for overwritten 4B extraction */ + u8 index = 0; /* extractors profile index */ + u8 entry = 0; /* extractor index */ + + if (offset % 4 == 0 || offset % 4 == 2) { + /* tunnel cases */ + /* use 2 * 2B extractor */ + ext16_0.info.dst_offset = 40; + ext16_0.info.source_offset = offset / 2; + ext16_0.info.mode_sel = 0; + ext16_0.info.mode_start_off = 0; + ext16_0.info.lx_sel = 2; + ext16_0.info.op_en = 1; + ext16_1.info.dst_offset = 42; + ext16_1.info.source_offset = offset / 2 + 1; + ext16_1.info.mode_sel = 0; + ext16_1.info.mode_start_off = 0; + ext16_1.info.lx_sel = 2; + ext16_1.info.op_en = 1; + + index = 8; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + index = 9; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + /* non-tunnel cases */ + ext16_0.info.dst_offset = 44; + ext16_1.info.dst_offset = 46; + index = 10; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + index = 11; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + index = 12; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + } else if (offset % 4 == 1) { + /* tunnel cases */ + /* use 2*2B extractors & 1B extractor for overwritten */ + ext16_0.info.dst_offset = 42; + ext16_0.info.source_offset = offset / 2 + 1; + ext16_0.info.mode_sel = 0; + ext16_0.info.mode_start_off = 0; + ext16_0.info.lx_sel = 2; + ext16_0.info.op_en = 1; + + /* half mode extractor */ + ext16_1.info.dst_offset = 40; + ext16_1.info.source_offset = offset / 2; + ext16_1.info.mode_sel = 1; + ext16_1.info.mode_start_off = 0b11; /* low-2-low */ + ext16_1.info.lx_sel = 2; + ext16_1.info.op_en = 1; + + ext8.info.dst_offset = 40; + ext8.info.source_offset = offset + 3; + ext8.info.mode_sel = 0; + ext8.info.mode_start_off = 0; + ext8.info.lx_sel = 2; + ext8.info.op_en = 1; + + /* half mode extractor */ + ext16_2.info.dst_offset = 40; + ext16_2.info.source_offset = offset / 2 + 2; + ext16_2.info.mode_sel = 1; + ext16_2.info.mode_start_off = 0; /* high-2-high */ + ext16_2.info.lx_sel = 2; + ext16_2.info.op_en = 1; + + index = 8; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + index = 9; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_2, sizeof(ext16_2)); + + /* for non-tunnel cases */ + ext16_0.info.dst_offset = 46; + ext16_1.info.dst_offset = 44; + ext16_2.info.dst_offset = 44; + ext8.info.dst_offset = 44; + + index = 10; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + index = 11; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_2, sizeof(ext16_2)); + + index = 12; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_2, sizeof(ext16_2)); + } else if (offset % 4 == 3) { + /* tunnel cases */ + /* use 2*2B extractors & 1B extractor for overwritten */ + ext16_0.info.dst_offset = 40; + ext16_0.info.source_offset = offset / 2 + 1; + ext16_0.info.mode_sel = 0; + ext16_0.info.mode_start_off = 0; + ext16_0.info.lx_sel = 2; + ext16_0.info.op_en = 1; + + ext16_1.info.dst_offset = 42; + ext16_1.info.source_offset = offset / 2; + ext16_1.info.mode_sel = 1; + ext16_1.info.mode_start_off = 0b11; /* low-2-low */ + ext16_1.info.lx_sel = 2; + ext16_1.info.op_en = 1; + + ext8.info.dst_offset = 42; + ext8.info.source_offset = offset + 3; + ext8.info.mode_sel = 0; + ext8.info.mode_start_off = 0; + ext8.info.lx_sel = 2; + ext8.info.op_en = 1; + + /* half mode extractor */ + ext16_2.info.dst_offset = 42; + ext16_2.info.source_offset = offset / 2 + 2; + ext16_2.info.mode_sel = 1; + ext16_2.info.mode_start_off = 0b00; /* high-2-high */ + ext16_2.info.lx_sel = 2; + ext16_2.info.op_en = 1; + + index = 8; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + index = 9; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_2, sizeof(ext16_2)); + + /* for non-tunnel cases */ + ext16_0.info.dst_offset = 44; + ext16_1.info.dst_offset = 46; + ext16_2.info.dst_offset = 46; + ext8.info.dst_offset = 46; + + index = 10; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + index = 11; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_2, sizeof(ext16_2)); + + index = 12; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_2, sizeof(ext16_2)); + } + + return 0; +} + +static int nbl_phy_set_fd_udf_l4(struct nbl_phy_mgt *phy_mgt, u8 offset) +{ + union upa_ext_conf_table_u ext32 = {{0}}; /* entry = 2 */ + union upa_ext_conf_table_u ext16_0 = {{0}}; /* entry = 10 */ + union upa_ext_conf_table_u ext16_1 = {{0}}; /* entry = 11 */ + union upa_ext_conf_table_u ext16_2 = {{0}}; /* entry for 2B = 7 8 9 */ + union upa_ext_conf_table_u ext8 = {{0}}; /* entry = 12 */ + union upa_ext_conf_table_u ext4_0 = {{0}}; /* entry = 14 */ + union upa_ext_conf_table_u ext4_1 = {{0}}; /* entry = 15 */ + u8 index = 0; /* extractors profile index */ + u8 entry = 0; /* extractor index */ + + if (offset % 4 == 0) { + /* use 1 * 4B extractor */ + ext32.info.dst_offset = 40; + ext32.info.source_offset = offset / 4 + 2; + ext32.info.mode_sel = 0; + ext32.info.mode_start_off = 0; + ext32.info.lx_sel = 2; + ext32.info.op_en = 1; + + /* tunnel vxlan & geneve case: plus UDP length 8B */ + index = 16; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + + /* tunnel geneve-ovn case: plus UDP length 8B */ + index = 17; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + + /* non-tunnel case */ + /* use 1 * 4B extractor */ + ext32.info.source_offset = offset / 4; + ext32.info.dst_offset = 44; + ext32.info.lx_sel = 3; + index = 20; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 21; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 22; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 24; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 25; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + } else if (offset % 4 == 2) { + /* use 2 * 2B extractors */ + ext16_0.info.dst_offset = 40; + ext16_0.info.source_offset = offset / 2 + 4; + ext16_0.info.mode_sel = 0; + ext16_0.info.mode_start_off = 0; + ext16_0.info.lx_sel = 2; + ext16_0.info.op_en = 1; + + ext16_1.info.dst_offset = 42; + ext16_1.info.source_offset = offset / 2 + 5; + ext16_1.info.mode_sel = 0; + ext16_1.info.mode_start_off = 0; + ext16_1.info.lx_sel = 2; + ext16_1.info.op_en = 1; + + /* tunnel vxlan & geneve case: plus UDP length 8B */ + index = 16; + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + /* tunnel geneve-ovn case: plus UDP length 8B */ + index = 17; + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + /* non-tunnel case */ + ext16_0.info.source_offset = offset / 2; + ext16_1.info.source_offset = offset / 2 + 1; + ext16_0.info.dst_offset = 44; + ext16_1.info.dst_offset = 46; + ext16_0.info.lx_sel = 3; + ext16_1.info.lx_sel = 3; + index = 20; + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + index = 21; + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + index = 22; + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + index = 24; + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + index = 25; + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + } else if (offset % 4 == 1 || offset % 4 == 3) { + /* use 4B extractor & 1B extractor for overwritten */ + ext32.info.dst_offset = 40; + ext32.info.source_offset = 2 + (offset + 2) / 4; + ext32.info.mode_sel = 0; + ext32.info.mode_start_off = 0; + ext32.info.lx_sel = 2; + ext32.info.op_en = 1; + + ext8.info.dst_offset = (offset % 4 == 1) ? 40 : 43; + ext8.info.source_offset = 8 + ((offset % 4 == 1) ? offset + 3 : offset); + ext8.info.mode_sel = 0; + ext8.info.mode_start_off = 0; + ext8.info.lx_sel = 2; + ext8.info.op_en = 1; + + /* tunnel vxlan & geneve case: plus UDP length 8B */ + index = 16; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + /* tunnel geneve-ovn case: plus UDP length 8B */ + index = 17; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + /* for non-tunnel cases */ + ext32.info.source_offset = (offset + 2) / 4; + ext8.info.source_offset = (offset % 4 == 1) ? offset + 3 : offset; + ext32.info.dst_offset = 44; + ext8.info.dst_offset = (offset % 4 == 1) ? 44 : 47; + ext32.info.lx_sel = 3; + ext8.info.lx_sel = 3; + + index = 21; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + index = 22; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + index = 24; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + /* non-tunnel for icmp: use 32bit & 4bit & 4bit extractors */ + /* currently disabled! */ + ext32.info.op_en = 0; + ext4_0.info.dst_offset = (offset % 4 == 1) ? 44 : 47; + ext4_0.info.source_offset = (offset % 4 == 1) ? offset + 3 : offset; + ext4_0.info.mode_sel = 1; + ext4_0.info.mode_start_off = 0b00; + ext4_0.info.lx_sel = 3; + ext4_0.info.op_en = 0; + + ext4_1.info.dst_offset = (offset % 4 == 1) ? 44 : 47; + ext4_1.info.source_offset = (offset % 4 == 1) ? offset + 3 : offset; + ext4_1.info.mode_sel = 1; + ext4_1.info.mode_start_off = 0b11; + ext4_1.info.lx_sel = 3; + ext4_1.info.op_en = 0; + + index = 25; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 14; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext4_0, sizeof(ext4_0)); + entry = 15; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext4_1, sizeof(ext4_1)); + + /* non-tunnel for icmpv6: use 32bit & 4bit & 4bit extractors */ + /* currently disabled! */ + ext16_0.info.dst_offset = (offset % 4 == 1) ? 42 : 40; + ext16_0.info.source_offset = offset / 2 + 1; + ext16_0.info.mode_sel = 0; + ext16_0.info.mode_start_off = 0; + ext16_0.info.lx_sel = 3; + ext16_0.info.op_en = 0; + + ext16_1.info.dst_offset = (offset % 4 == 1) ? 40 : 42; + ext16_1.info.source_offset = offset / 2; + ext16_1.info.mode_sel = 1; + ext16_1.info.mode_start_off = 0b00; + ext16_1.info.lx_sel = 3; + ext16_1.info.op_en = 0; + + ext16_2.info.dst_offset = (offset % 4 == 1) ? 40 : 42; + ext16_2.info.source_offset = offset / 2 + 2; + ext16_2.info.mode_sel = 1; + ext16_2.info.mode_start_off = 0b11; + ext16_2.info.lx_sel = 3; + ext16_2.info.op_en = 0; + + index = 20; + entry = 7; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 8; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_2, sizeof(ext16_2)); + } + + return 0; +} + +static int nbl_phy_set_fd_udf(void *priv, u8 lxmode, u8 offset) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + int ret = 0; + + switch (lxmode) { + case 0: + ret = nbl_phy_set_fd_udf_l2(phy_mgt, offset); + break; + case 1: + ret = nbl_phy_set_fd_udf_l3(phy_mgt, offset); + break; + case 2: + ret = nbl_phy_set_fd_udf_l4(phy_mgt, offset); + break; + default: + break; } return ret; } -static u32 nbl_phy_get_uvn_desc_entry_stats(void *priv) +static int nbl_phy_set_fd_tcam_cfg_default(void *priv) { - return nbl_hw_rd32(priv, NBL_UVN_DESC_RD_ENTRY); + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union acl_tcam_cfg_u acl_key_cfg = {{0}}; + union acl_action_ram_cfg_u acl_action_cfg = {{0}}; + union acl_kgen_tcam_u acl_kgen_tcam = {{0}}; + int i; + + nbl_hw_read_regs(phy_mgt, NBL_ACL_ACTION_RAM_CFG_REG(NBL_FD_PROFILE_DEFAULT), + (u8 *)&acl_action_cfg, sizeof(acl_action_cfg)); + + nbl_hw_read_regs(phy_mgt, NBL_ACL_TCAM_CFG_REG(NBL_FD_PROFILE_DEFAULT), + (u8 *)&acl_key_cfg, sizeof(acl_key_cfg)); + + acl_key_cfg.info.startcompare0 = 1; + acl_key_cfg.info.startset0 = 1; + acl_key_cfg.info.key_id0 = 11; + acl_key_cfg.info.tcam0_enable = 1; + + acl_key_cfg.info.startcompare1 = 0; + acl_key_cfg.info.startset1 = 0; + acl_key_cfg.info.key_id1 = 10; + acl_key_cfg.info.tcam1_enable = 1; + + acl_key_cfg.info.startcompare2 = 0; + acl_key_cfg.info.startset2 = 0; + acl_key_cfg.info.key_id2 = 9; + acl_key_cfg.info.tcam2_enable = 1; + + acl_key_cfg.info.startcompare3 = 0; + acl_key_cfg.info.startset3 = 0; + acl_key_cfg.info.key_id3 = 8; + acl_key_cfg.info.tcam3_enable = 1; + + acl_key_cfg.info.startcompare4 = 0; + acl_key_cfg.info.startset4 = 0; + acl_key_cfg.info.key_id4 = 7; + acl_key_cfg.info.tcam4_enable = 1; + + acl_key_cfg.info.startcompare5 = 0; + acl_key_cfg.info.startset5 = 0; + acl_key_cfg.info.key_id5 = 6; + acl_key_cfg.info.tcam5_enable = 1; + + acl_key_cfg.info.startcompare6 = 0; + acl_key_cfg.info.startset6 = 0; + acl_key_cfg.info.key_id6 = 5; + acl_key_cfg.info.tcam6_enable = 1; + + acl_key_cfg.info.startcompare7 = 0; + acl_key_cfg.info.startset0 = 0; + acl_key_cfg.info.key_id7 = 4; + acl_key_cfg.info.tcam7_enable = 1; + + acl_key_cfg.info.startcompare8 = 0; + acl_key_cfg.info.startset8 = 0; + acl_key_cfg.info.key_id8 = 3; + acl_key_cfg.info.tcam8_enable = 1; + + acl_key_cfg.info.startcompare9 = 0; + acl_key_cfg.info.startset9 = 0; + acl_key_cfg.info.key_id9 = 2; + acl_key_cfg.info.tcam9_enable = 1; + + acl_key_cfg.info.startcompare10 = 0; + acl_key_cfg.info.startset10 = 0; + acl_key_cfg.info.key_id10 = 1; + acl_key_cfg.info.tcam10_enable = 1; + + acl_key_cfg.info.startcompare11 = 0; + acl_key_cfg.info.startset11 = 0; + acl_key_cfg.info.key_id11 = 0; + acl_key_cfg.info.tcam11_enable = 1; + + /* Although we don't use it, startcompare and startset must be 1, to identify the end. */ + acl_key_cfg.info.startcompare12 = 1; + acl_key_cfg.info.startset12 = 1; + acl_key_cfg.info.key_id12 = 0; + acl_key_cfg.info.tcam12_enable = 0; + + acl_key_cfg.info.startcompare13 = 0; + acl_key_cfg.info.startset13 = 0; + acl_key_cfg.info.key_id13 = 0; + acl_key_cfg.info.tcam13_enable = 0; + + acl_key_cfg.info.startcompare14 = 0; + acl_key_cfg.info.startset14 = 0; + acl_key_cfg.info.key_id14 = 0; + acl_key_cfg.info.tcam14_enable = 0; + + /* For ovs-tc upcall */ + acl_key_cfg.info.startcompare15 = 1; + acl_key_cfg.info.startset15 = 1; + acl_key_cfg.info.key_id15 = 0; + acl_key_cfg.info.tcam15_enable = 1; + + acl_action_cfg.info.action_ram0_enable = 1; + acl_action_cfg.info.action_ram0_alloc_id = 11; + + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_CFG_REG(NBL_FD_PROFILE_DEFAULT), + (u8 *)&acl_action_cfg, sizeof(acl_action_cfg)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_CFG_REG(NBL_FD_PROFILE_DEFAULT + 1), + (u8 *)&acl_action_cfg, sizeof(acl_action_cfg)); + + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_CFG_REG(NBL_FD_PROFILE_DEFAULT), + (u8 *)&acl_key_cfg, sizeof(acl_key_cfg)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_CFG_REG(NBL_FD_PROFILE_DEFAULT + 1), + (u8 *)&acl_key_cfg, sizeof(acl_key_cfg)); + + for (i = NBL_FD_PROFILE_IPV4; i < NBL_FD_PROFILE_DEFAULT; i++) { + nbl_hw_read_regs(phy_mgt, NBL_ACL_KGEN_TCAM_REG(i), + (u8 *)&acl_kgen_tcam, sizeof(acl_kgen_tcam)); + acl_kgen_tcam.info.valid_bit = 0; + nbl_hw_write_regs(phy_mgt, NBL_ACL_KGEN_TCAM_REG(i), + (u8 *)&acl_kgen_tcam, sizeof(acl_kgen_tcam)); + } + + return 0; } -static void nbl_phy_set_uvn_desc_wr_timeout(void *priv, u16 timeout) +static int nbl_phy_set_fd_tcam_cfg_lite(void *priv) { - struct uvn_desc_wr_timeout wr_timeout = {0}; + return 0; +} - wr_timeout.num = timeout; - nbl_hw_write_regs(priv, NBL_UVN_DESC_WR_TIMEOUT, (u8 *)&wr_timeout, sizeof(wr_timeout)); +static int nbl_phy_set_fd_tcam_cfg_full(void *priv) +{ + return 0; } -static void nbl_phy_get_board_info(void *priv, struct nbl_board_port_info *board_info) +static int nbl_phy_set_fd_tcam_ram(void *priv, struct nbl_acl_tcam_param *data, + struct nbl_acl_tcam_param *mask, u16 ram_index, u32 depth_index) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; - union nbl_fw_board_cfg_dw3 dw3 = {.info = {0}}; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + union acl_indirect_ctrl_u indirect_ctrl = { + .info.cpu_acl_cfg_start = 1, + .info.cpu_acl_cfg_rw = NBL_ACL_INDIRECT_ACCESS_WRITE, + }; + union acl_valid_bit_u tcam_data_valid = {{0}}; + union acl_indirect_access_ack_u indirect_ack = {{0}}; + struct nbl_acl_tcam_common_data_u tcam_data = {{0}}, tcam_mask = {{0}}; + int i, rd_retry = NBL_ACL_RD_RETRY; + + for (i = 0; i < data->len / NBL_ACL_TCAM_KEY_LEN; i++) { + memset(&tcam_data, 0, sizeof(tcam_data)); + memset(&tcam_mask, 0, sizeof(tcam_data)); + + memcpy(&tcam_data.data, &data->info.key[i], sizeof(tcam_data.data)); + memcpy(&tcam_mask.data, &mask->info.key[i], sizeof(tcam_mask.data)); + + *(u64 *)(&tcam_mask) = ~(*(u64 *)(&tcam_mask)); + + nbl_tcam_truth_value_convert((u64 *)&tcam_data, (u64 *)&tcam_mask); + + indirect_ctrl.info.acc_btm |= 1 << (ram_index + i); + tcam_data_valid.info.valid_bit |= 1 << (ram_index + i); + + nbl_debug(common, NBL_DEBUG_FLOW, "Set key tcam %d: 0x%02x%02x%02x%02x%02x", + ram_index + i, tcam_data.data[4], tcam_data.data[3], tcam_data.data[2], + tcam_data.data[1], tcam_data.data[0]); + nbl_debug(common, NBL_DEBUG_FLOW, "Set key tcam mask %d: 0x%02x%02x%02x%02x%02x", + ram_index + i, tcam_mask.data[4], tcam_mask.data[3], tcam_mask.data[2], + tcam_mask.data[1], tcam_mask.data[0]); + + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_DATA_X(ram_index + i), + (u8 *)&tcam_data, sizeof(tcam_data)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_DATA_Y(ram_index + i), + (u8 *)&tcam_mask, sizeof(tcam_mask)); + } - nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_BOARD_DW3_OFFSET, (u8 *)&dw3, sizeof(dw3)); - board_info->eth_num = dw3.info.port_num; - board_info->eth_speed = dw3.info.port_speed; + indirect_ctrl.info.tcam_addr = depth_index; + + nbl_debug(common, NBL_DEBUG_FLOW, "Set valid bit %08x", *(u32 *)&tcam_data_valid); + nbl_debug(common, NBL_DEBUG_FLOW, "Set ctrl %08x", *(u32 *)&indirect_ctrl); + + nbl_hw_write_regs(phy_mgt, NBL_ACL_VALID_BIT_ADDR, + (u8 *)&tcam_data_valid, sizeof(tcam_data_valid)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_INDIRECT_CTRL_ADDR, + (u8 *)&indirect_ctrl, sizeof(indirect_ctrl)); + do { + nbl_hw_read_regs(phy_mgt, NBL_ACL_INDIRECT_ACCESS_ACK_ADDR, + (u8 *)&indirect_ack, sizeof(indirect_ack)); + if (!indirect_ack.info.done) { + rd_retry--; + usleep_range(NBL_ACL_RD_WAIT_100US, NBL_ACL_RD_WAIT_200US); + } else { + break; + } + } while (rd_retry); + + if (!indirect_ack.info.done) { + nbl_err(common, NBL_DEBUG_FLOW, "Set fd acl tcam fail\n"); + return -EIO; + } + + return 0; } -static u32 nbl_phy_get_fw_eth_num(void *priv) +static int nbl_phy_set_fd_action_ram(void *priv, u32 action, u16 ram_index, u32 depth_index) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; - union nbl_fw_board_cfg_dw3 dw3 = {.info = {0}}; + union acl_action_ram15_u acl_action_ram = {{0}}; - nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_BOARD_DW3_OFFSET, (u8 *)&dw3, sizeof(dw3)); - return dw3.info.port_num; + acl_action_ram.info.Action0 = action; + + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_TBL(ram_index, depth_index), + (u8 *)&acl_action_ram, sizeof(acl_action_ram)); + + return 0; } -static u32 nbl_phy_get_fw_eth_map(void *priv) +static void nbl_phy_set_hw_status(void *priv, enum nbl_hw_status hw_status) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; - union nbl_fw_board_cfg_dw6 dw6 = {.info = {0}}; - nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_BOARD_DW6_OFFSET, (u8 *)&dw6, sizeof(dw6)); - return dw6.info.eth_bitmap; + phy_mgt->hw_status = hw_status; +}; + +static enum nbl_hw_status nbl_phy_get_hw_status(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + return phy_mgt->hw_status; +}; + +static u32 nbl_phy_get_perf_dump_length(void *priv) +{ + return sizeof(nbl_phy_dump_registers); +}; + +static u32 nbl_phy_get_perf_dump_data(void *priv, u8 *buffer, u32 length) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 copy_len = min_t(u32, length, sizeof(nbl_phy_dump_registers)); + int i; + + for (i = 0; i < copy_len / 4; i++) { + nbl_hw_read_regs(phy_mgt, nbl_phy_dump_registers[i], buffer, 4); + buffer += 4; + } + + return copy_len; +}; + +static int nbl_phy_get_mirror_table_id(void *priv, u16 vsi_id, int dir, + bool mirror_en, u8 *mt_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union ipro_dn_src_port_tbl_u ipro_dn_src_port_tbl = {{0}}; + union epro_vpt_u epro_vpt = {{0}}; + union epro_mt_u epro_mt = {{0}}; + int index = 0; + + if (dir == 0) { + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, + sizeof(ipro_dn_src_port_tbl)); + if (!mirror_en && !ipro_dn_src_port_tbl.info.mirror_en) { + *mt_id = NBL_EPRO_MT_MAX; + } else if (!mirror_en && ipro_dn_src_port_tbl.info.mirror_en) { + *mt_id = ipro_dn_src_port_tbl.info.mirror_id; + } else if (mirror_en && ipro_dn_src_port_tbl.info.mirror_en) { + *mt_id = ipro_dn_src_port_tbl.info.mirror_id; + } else if (mirror_en && !ipro_dn_src_port_tbl.info.mirror_en) { + for (; index < NBL_EPRO_MT_MAX; index++) { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_MT_REG(index), + (u8 *)&epro_mt, sizeof(epro_mt)); + if (epro_mt.info.vld == 0) { + *mt_id = index; + return 0; + } + } + *mt_id = NBL_EPRO_MT_MAX; + } + } else { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), + (u8 *)&epro_vpt, sizeof(epro_vpt)); + if (!mirror_en && !epro_vpt.info.mirror_en) { + *mt_id = NBL_EPRO_MT_MAX; + } else if (!mirror_en && epro_vpt.info.mirror_en) { + *mt_id = epro_vpt.info.mirror_id; + } else if (mirror_en && epro_vpt.info.mirror_en) { + *mt_id = epro_vpt.info.mirror_id; + } else if (mirror_en && !epro_vpt.info.mirror_en) { + for (; index < NBL_EPRO_MT_MAX; index++) { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_MT_REG(index), + (u8 *)&epro_mt, sizeof(epro_mt)); + if (epro_mt.info.vld == 0) { + *mt_id = index; + return 0; + } + } + *mt_id = NBL_EPRO_MT_MAX; + } + } + + return 0; +} + +static int nbl_phy_configure_mirror(void *priv, u16 vsi_id, bool mirror_en, + int dir, u8 mt_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union ipro_dn_src_port_tbl_u ipro_dn_src_port_tbl = {{0}}; + union epro_vpt_u epro_vpt = {{0}}; + + if (!mirror_en) { + if (dir == 0) { + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, + sizeof(ipro_dn_src_port_tbl)); + ipro_dn_src_port_tbl.info.mirror_en = 0; + ipro_dn_src_port_tbl.info.mirror_pr = 0; + ipro_dn_src_port_tbl.info.mirror_id = 0; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, + sizeof(ipro_dn_src_port_tbl)); + } else { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), + (u8 *)&epro_vpt, sizeof(epro_vpt)); + epro_vpt.info.mirror_en = 0; + epro_vpt.info.mirror_id = 0; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), (u8 *)&epro_vpt, + sizeof(epro_vpt)); + } + } else { + if (dir == 0) { + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, sizeof(ipro_dn_src_port_tbl)); + ipro_dn_src_port_tbl.info.mirror_en = mirror_en; + ipro_dn_src_port_tbl.info.mirror_pr = 3; + ipro_dn_src_port_tbl.info.mirror_id = mt_id; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, + sizeof(ipro_dn_src_port_tbl)); + } else { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), + (u8 *)&epro_vpt, sizeof(epro_vpt)); + epro_vpt.info.mirror_en = mirror_en; + epro_vpt.info.mirror_id = mt_id; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), (u8 *)&epro_vpt, + sizeof(epro_vpt)); + } + } + return 0; +} + +static int nbl_phy_configure_mirror_table(void *priv, bool mirror_en, + u16 mirror_vsi_id, u16 mirror_queue_id, u8 mt_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union epro_mt_u epro_mt = {{0}}; + + if (!mirror_en) { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_MT_REG(mt_id), (u8 *)&epro_mt, + sizeof(epro_mt)); + epro_mt.info.dport = 0; + epro_mt.info.dqueue = 0; + epro_mt.info.vld = mirror_en; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_MT_REG(mt_id), (u8 *)&epro_mt, + sizeof(epro_mt)); + } else { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_MT_REG(mt_id), (u8 *)&epro_mt, + sizeof(epro_mt)); + epro_mt.info.dport = mirror_vsi_id; + epro_mt.info.dqueue = mirror_queue_id; + epro_mt.info.vld = mirror_en; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_MT_REG(mt_id), (u8 *)&epro_mt, + sizeof(epro_mt)); + } + + return 0; +} + +static int nbl_phy_clear_mirror_cfg(void *priv, u16 vsi_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union ipro_dn_src_port_tbl_u ipro_dn_src_port_tbl = {{0}}; + union epro_vpt_u epro_vpt = {{0}}; + union epro_mt_u epro_mt = {{0}}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, sizeof(ipro_dn_src_port_tbl)); + if (ipro_dn_src_port_tbl.info.mirror_en) { + nbl_hw_write_regs(phy_mgt, NBL_EPRO_MT_REG(ipro_dn_src_port_tbl.info.mirror_id), + (u8 *)&epro_mt, sizeof(epro_mt)); + ipro_dn_src_port_tbl.info.mirror_en = 0; + ipro_dn_src_port_tbl.info.mirror_pr = 0; + ipro_dn_src_port_tbl.info.mirror_id = 0; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, + sizeof(ipro_dn_src_port_tbl)); + } + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), + (u8 *)&epro_vpt, sizeof(epro_vpt)); + if (epro_vpt.info.mirror_en) { + nbl_hw_write_regs(phy_mgt, NBL_EPRO_MT_REG(epro_vpt.info.mirror_id), + (u8 *)&epro_mt, sizeof(epro_mt)); + epro_vpt.info.mirror_en = 0; + epro_vpt.info.mirror_id = 0; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), (u8 *)&epro_vpt, + sizeof(epro_vpt)); + } + + return 0; +} + +static int nbl_phy_get_dstat_vsi_stat(void *priv, u16 vsi_id, u64 *fwd_pkt, u64 *fwd_byte) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_dstat_vsi_stat dstat_vsi_stat = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DSTAT_VSI_STAT(vsi_id), + (u8 *)&dstat_vsi_stat, sizeof(dstat_vsi_stat)); + + *fwd_pkt = dstat_vsi_stat.fwd_pkt_cnt_low + + ((u64)(dstat_vsi_stat.fwd_pkt_cnt_high) << 32); + *fwd_byte = dstat_vsi_stat.fwd_byte_cnt_low + + ((u64)(dstat_vsi_stat.fwd_byte_cnt_high) << 32); + + return 0; +} + +static int nbl_phy_get_ustat_vsi_stat(void *priv, u16 vsi_id, u64 *fwd_pkt, u64 *fwd_byte) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ustat_vsi_stat ustat_vsi_stat = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_USTAT_VSI_STAT(vsi_id), + (u8 *)&ustat_vsi_stat, sizeof(ustat_vsi_stat)); + + *fwd_pkt = ustat_vsi_stat.fwd_pkt_cnt_low + + ((u64)(ustat_vsi_stat.fwd_pkt_cnt_high) << 32); + *fwd_byte = ustat_vsi_stat.fwd_byte_cnt_low + + ((u64)(ustat_vsi_stat.fwd_byte_cnt_high) << 32); + + return 0; +} + +static int nbl_phy_get_uvn_pkt_drop_stats(void *priv, u16 global_queue_id, u32 *uvn_stat_pkt_drop) +{ + *uvn_stat_pkt_drop = nbl_hw_rd32(priv, NBL_UVN_STATIS_PKT_DROP(global_queue_id)); + return 0; +} + +static int nbl_phy_get_ustore_pkt_drop_stats(void *priv, u8 eth_id, + struct nbl_ustore_stats *ustore_stats) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + ustore_stats->rx_drop_packets = nbl_hw_rd32(phy_mgt, NBL_USTORE_BUF_PORT_DROP_PKT(eth_id)); + ustore_stats->rx_trun_packets = nbl_hw_rd32(phy_mgt, NBL_USTORE_BUF_PORT_TRUN_PKT(eth_id)); + + return 0; } static struct nbl_phy_ops phy_ops = { @@ -2704,11 +7176,13 @@ static struct nbl_phy_ops phy_ops = { .active_shaping = nbl_phy_active_shaping, .deactive_shaping = nbl_phy_deactive_shaping, .set_shaping = nbl_phy_set_shaping, + .set_ucar = nbl_phy_set_ucar, .cfg_dsch_net_to_group = nbl_phy_cfg_dsch_net_to_group, .init_epro_rss_key = nbl_phy_init_epro_rss_key, .read_rss_key = nbl_phy_read_epro_rss_key, .read_rss_indir = nbl_phy_read_rss_indir, .get_rss_alg_sel = nbl_phy_get_rss_alg_sel, + .set_rss_alg_sel = nbl_phy_set_rss_alg_sel, .init_epro_vpt_tbl = nbl_phy_init_epro_vpt_tbl, .set_epro_rss_default = nbl_phy_set_epro_rss_default, .cfg_epro_rss_ret = nbl_phy_cfg_epro_rss_ret, @@ -2725,7 +7199,13 @@ static struct nbl_phy_ops phy_ops = { .get_tx_queue_err_stats = nbl_phy_get_tx_queue_err_stats, .setup_queue_switch = nbl_phy_setup_queue_switch, .init_pfc = nbl_phy_init_pfc, + .cfg_phy_flow = nbl_phy_cfg_phy_flow, + .cfg_eth_port_priority_replace = nbl_phy_cfg_eth_port_priority_replace, .get_chip_temperature = nbl_phy_get_chip_temperature, + .write_ped_tbl = nbl_phy_write_ped_tbl, + .set_vsi_mtu = nbl_phy_set_vsi_mtu, + .set_mtu = nbl_phy_set_mtu, + .get_mtu_index = nbl_phy_get_mtu_index, .configure_msix_map = nbl_phy_configure_msix_map, .configure_msix_info = nbl_phy_configure_msix_info, @@ -2739,7 +7219,17 @@ static struct nbl_phy_ops phy_ops = { .del_tcam = nbl_phy_del_tcam, .add_mcc = nbl_phy_add_mcc, .del_mcc = nbl_phy_del_mcc, + .update_mcc_next_node = nbl_phy_update_mcc_next_node, + .add_tnl_encap = nbl_phy_add_tnl_encap, + .del_tnl_encap = nbl_phy_del_tnl_encap, .init_fem = nbl_phy_init_fem, + .init_acl = nbl_phy_init_acl, + .uninit_acl = nbl_phy_uninit_acl, + .set_upcall_rule = nbl_phy_acl_set_upcall_rule, + .unset_upcall_rule = nbl_phy_acl_unset_upcall_rule, + .set_shaping_dport_vld = nbl_phy_set_shaping_dport_vld, + .set_dport_fc_th_vld = nbl_phy_set_dport_fc_th_vld, + .init_acl_stats = nbl_phy_init_acl_stats, .update_mailbox_queue_tail_ptr = nbl_phy_update_mailbox_queue_tail_ptr, .config_mailbox_rxq = nbl_phy_config_mailbox_rxq, @@ -2750,6 +7240,9 @@ static struct nbl_phy_ops phy_ops = { .check_mailbox_dma_err = nbl_phy_check_mailbox_dma_err, .get_host_pf_mask = nbl_phy_get_host_pf_mask, .get_host_pf_fid = nbl_phy_get_host_pf_fid, + .get_real_bus = nbl_phy_get_real_bus, + .get_pf_bar_addr = nbl_phy_get_pf_bar_addr, + .get_vf_bar_addr = nbl_phy_get_vf_bar_addr, .cfg_mailbox_qinfo = nbl_phy_cfg_mailbox_qinfo, .enable_mailbox_irq = nbl_phy_enable_mailbox_irq, .enable_abnormal_irq = nbl_phy_enable_abnormal_irq, @@ -2773,20 +7266,114 @@ static struct nbl_phy_ops phy_ops = { .get_hw_addr = nbl_phy_get_hw_addr, + .cfg_ktls_tx_keymat = nbl_phy_cfg_ktls_tx_keymat, + .cfg_ktls_rx_keymat = nbl_phy_cfg_ktls_rx_keymat, + .cfg_ktls_rx_record = nbl_phy_cfg_ktls_rx_record, + + .cfg_dipsec_nat = nbl_phy_cfg_dipsec_nat, + .cfg_dipsec_sad_iv = nbl_phy_cfg_dipsec_sad_iv, + .cfg_dipsec_sad_esn = nbl_phy_cfg_dipsec_sad_esn, + .cfg_dipsec_sad_lifetime = nbl_phy_cfg_dipsec_sad_lifetime, + .cfg_dipsec_sad_crypto = nbl_phy_cfg_dipsec_sad_crypto, + .cfg_dipsec_sad_encap = nbl_phy_cfg_dipsec_sad_encap, + .read_dipsec_status = nbl_phy_read_dipsec_status, + .reset_dipsec_status = nbl_phy_reset_dipsec_status, + .read_dipsec_lft_info = nbl_phy_read_dipsec_lft_info, + .cfg_dipsec_lft_info = nbl_phy_cfg_dipsec_lft_info, + .init_dprbac = nbl_phy_init_dprbac, + .cfg_uipsec_nat = nbl_phy_cfg_uipsec_nat, + .cfg_uipsec_sad_esn = nbl_phy_cfg_uipsec_sad_esn, + .cfg_uipsec_sad_lifetime = nbl_phy_cfg_uipsec_sad_lifetime, + .cfg_uipsec_sad_crypto = nbl_phy_cfg_uipsec_sad_crypto, + .cfg_uipsec_sad_window = nbl_phy_cfg_uipsec_sad_window, + .cfg_uipsec_em_tcam = nbl_phy_cfg_uipsec_em_tcam, + .cfg_uipsec_em_ad = nbl_phy_cfg_uipsec_em_ad, + .clear_uipsec_tcam_ad = nbl_phy_clear_uipsec_tcam_ad, + .cfg_uipsec_em_ht = nbl_phy_cfg_uipsec_em_ht, + .cfg_uipsec_em_kt = nbl_phy_cfg_uipsec_em_kt, + .clear_uipsec_ht_kt = nbl_phy_clear_uipsec_ht_kt, + .read_uipsec_status = nbl_phy_read_uipsec_status, + .reset_uipsec_status = nbl_phy_reset_uipsec_status, + .read_uipsec_lft_info = nbl_phy_read_uipsec_lft_info, + .cfg_uipsec_lft_info = nbl_phy_cfg_uipsec_lft_info, + .init_uprbac = nbl_phy_init_uprbac, + .get_fw_ping = nbl_phy_get_fw_ping, .set_fw_ping = nbl_phy_set_fw_ping, .get_fw_pong = nbl_phy_get_fw_pong, .set_fw_pong = nbl_phy_set_fw_pong, + .load_p4 = nbl_phy_load_p4, + + .configure_qos = nbl_phy_configure_qos, + .configure_rdma_bw = nbl_phy_configure_rdma_bw, + .set_pfc_buffer_size = nbl_phy_set_pfc_buffer_size, + .get_pfc_buffer_size = nbl_phy_get_pfc_buffer_size, + .set_rate_limit = nbl_phy_set_rate_limit, + + .init_offload_fwd = nbl_phy_init_offload_fwd, + .init_cmdq = nbl_phy_cmdq_init, + .reset_cmdq = nbl_phy_cmdq_reset, + .destroy_cmdq = nbl_phy_cmdq_destroy, + .update_cmdq_tail = nbl_phy_update_cmdq_tail, + .init_flow = nbl_phy_flow_init, + .deinit_flow = nbl_phy_flow_deinit, + .get_flow_acl_switch = nbl_phy_flow_get_acl_switch, + .get_line_rate_info = nbl_phy_get_line_rate_info, + .offload_flow_rule = nbl_phy_offload_flow_rule, + .init_rep = nbl_phy_init_rep, + .clear_profile_table_action = nbl_phy_clear_profile_table_action, + .ipro_chksum_err_ctrl = nbl_phy_ipro_chksum_err_ctrl, + + .init_vdpaq = nbl_phy_init_vdpaq, + .destroy_vdpaq = nbl_phy_destroy_vdpaq, + .get_reg_dump = nbl_phy_get_reg_dump, .get_reg_dump_len = nbl_phy_get_reg_dump_len, .process_abnormal_event = nbl_phy_process_abnormal_event, .get_uvn_desc_entry_stats = nbl_phy_get_uvn_desc_entry_stats, .set_uvn_desc_wr_timeout = nbl_phy_set_uvn_desc_wr_timeout, + .cfg_lag_hash_algorithm = nbl_phy_cfg_lag_algorithm, + .cfg_lag_member_fwd = nbl_phy_cfg_lag_member_fwd, + .cfg_lag_member_list = nbl_phy_cfg_lag_member_list, + .cfg_lag_member_up_attr = nbl_phy_cfg_lag_member_up_attr, + .get_lag_fwd = nbl_phy_get_lag_fwd, + .cfg_bond_shaping = nbl_phy_cfg_bond_shaping, + .cfg_bgid_back_pressure = nbl_phy_cfg_bgid_back_pressure, + .get_fw_eth_num = nbl_phy_get_fw_eth_num, .get_fw_eth_map = nbl_phy_get_fw_eth_map, .get_board_info = nbl_phy_get_board_info, + .get_quirks = nbl_phy_get_quirks, + .set_tc_kgen_cvlan_zero = nbl_phy_set_tc_kgen_cvlan_zero, + .unset_tc_kgen_cvlan = nbl_phy_unset_tc_kgen_cvlan, + .set_ped_tab_vsi_type = nbl_phy_set_ped_tab_vsi_type, + + .clear_acl = nbl_phy_clear_acl, + .set_fd_udf = nbl_phy_set_fd_udf, + .clear_fd_udf = nbl_phy_clr_fd_udf, + .set_fd_tcam_cfg_default = nbl_phy_set_fd_tcam_cfg_default, + .set_fd_tcam_cfg_lite = nbl_phy_set_fd_tcam_cfg_lite, + .set_fd_tcam_cfg_full = nbl_phy_set_fd_tcam_cfg_full, + .set_fd_tcam_ram = nbl_phy_set_fd_tcam_ram, + .set_fd_action_ram = nbl_phy_set_fd_action_ram, + .set_hw_status = nbl_phy_set_hw_status, + .get_hw_status = nbl_phy_get_hw_status, + + .get_perf_dump_length = nbl_phy_get_perf_dump_length, + .get_perf_dump_data = nbl_phy_get_perf_dump_data, + + .get_mirror_table_id = nbl_phy_get_mirror_table_id, + .configure_mirror = nbl_phy_configure_mirror, + .configure_mirror_table = nbl_phy_configure_mirror_table, + .clear_mirror_cfg = nbl_phy_clear_mirror_cfg, + .set_dvn_desc_req = nbl_dvn_descreq_num_cfg, + .get_dvn_desc_req = nbl_dvn_descreq_num_get, + .get_dstat_vsi_stat = nbl_phy_get_dstat_vsi_stat, + .get_ustat_vsi_stat = nbl_phy_get_ustat_vsi_stat, + .get_uvn_pkt_drop_stats = nbl_phy_get_uvn_pkt_drop_stats, + .get_ustore_pkt_drop_stats = nbl_phy_get_ustore_pkt_drop_stats, }; /* Structure starts here, adding an op should not modify anything below */ @@ -2840,21 +7427,21 @@ static void nbl_phy_remove_ops(struct nbl_common_info *common, struct nbl_phy_op *phy_ops_tbl = NULL; } -static void nbl_phy_disable_rx_err_report(struct pci_dev *pdev) +static void __maybe_unused nbl_phy_disable_rx_err_report(struct pci_dev *pdev) { #define NBL_RX_ERR_BIT 0 #define NBL_BAD_TLP_BIT 6 #define NBL_BAD_DLLP_BIT 7 u8 mask = 0; - int pos = 0; + int aer_cap = 0; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); - if (!pos) + aer_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (!aer_cap) return; - pci_read_config_byte(pdev, pos + PCI_ERR_COR_MASK, &mask); + pci_read_config_byte(pdev, aer_cap + PCI_ERR_COR_MASK, &mask); mask |= BIT(NBL_RX_ERR_BIT) | BIT(NBL_BAD_TLP_BIT) | BIT(NBL_BAD_DLLP_BIT); - pci_write_config_byte(pdev, pos + PCI_ERR_COR_MASK, mask); + pci_write_config_byte(pdev, aer_cap + PCI_ERR_COR_MASK, mask); } int nbl_phy_init_leonis(void *p, struct nbl_init_param *param) @@ -2921,7 +7508,7 @@ int nbl_phy_init_leonis(void *p, struct nbl_init_param *param) if (ret) goto setup_ops_fail; - nbl_phy_disable_rx_err_report(pdev); + /* nbl_phy_disable_rx_err_report(pdev); */ (*phy_mgt_leonis)->ro_enable = pcie_relaxed_ordering_enabled(pdev); @@ -2965,3 +7552,4 @@ void nbl_phy_remove_leonis(void *p) nbl_phy_remove_ops(common, phy_ops_tbl); } + diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h index ad5c19ed1450c7eb53fc3330cf6909a1de2e1080..59c062e3196774de03dc056861d0f9019797e6dd 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h @@ -11,7 +11,6 @@ #include "nbl_hw.h" #include "nbl_phy.h" -#define NBL_NOTIFY_DELAY_MIN_TIME_FOR_REGS 200 /* 200us for palladium,3us for s2c */ #define NBL_NOTIFY_DELAY_MAX_TIME_FOR_REGS 300 /* 300us for palladium,5us for s2c */ #define NBL_DRAIN_WAIT_TIMES (30000) @@ -27,18 +26,19 @@ #define KT_MASK_LEN32_ACTION_INFO (0x0) #define KT_MASK_LEN12_ACTION_INFO (0xFFFFF000) #define NBL_FEM_SEARCH_KEY_LEN 44 +#define NBL_HW_DUMMY_REG (0x1300904) -#define HT_PORT0_BANK_SEL (0b01000000) -#define HT_PORT1_BANK_SEL (0b00110000) +#define HT_PORT0_BANK_SEL (0b01100000) +#define HT_PORT1_BANK_SEL (0b00011000) #define HT_PORT2_BANK_SEL (0b00000111) -#define KT_PORT0_BANK_SEL (0b11000000) -#define KT_PORT1_BANK_SEL (0b00110000) -#define KT_PORT2_BANK_SEL (0b00001111) +#define KT_PORT0_BANK_SEL (0b11100000) +#define KT_PORT1_BANK_SEL (0b00011000) +#define KT_PORT2_BANK_SEL (0b00000111) #define AT_PORT0_BANK_SEL (0b000000000000) -#define AT_PORT1_BANK_SEL (0b111000000000) -#define AT_PORT2_BANK_SEL (0b000111111111) -#define HT_PORT0_BTM 1 -#define HT_PORT1_BTM 3 +#define AT_PORT1_BANK_SEL (0b111110000000) +#define AT_PORT2_BANK_SEL (0b000001111111) +#define HT_PORT0_BTM 2 +#define HT_PORT1_BTM 6 #define HT_PORT2_BTM 16 #define NBL_1BIT 1 #define NBL_8BIT 8 @@ -74,8 +74,8 @@ #define NBL_KT_PHY_L2_DW_LEN 40 -#define NBL_ACL_VSI_PF_UPCALL 3 -#define NBL_ACL_ETH_PF_UPCALL 2 +#define NBL_ACL_VSI_PF_UPCALL 9 +#define NBL_ACL_ETH_PF_UPCALL 8 #define NBL_ACL_INDIRECT_ACCESS_WRITE (0) #define NBL_ACL_INDIRECT_ACCESS_READ (1) #define NBL_ETH_BASE_IDX 8 @@ -92,6 +92,11 @@ #define NBL_ACL_TCAM_DATA_X(t) (NBL_PPE_ACL_BASE + 0x00000904 + ((t) * 8)) #define NBL_ACL_TCAM_DATA_Y(t) (NBL_PPE_ACL_BASE + 0x00000990 + ((t) * 8)) +struct nbl_acl_tcam_common_data_u { + u8 data[5]; + u8 rsv[3]; +}; + /* ---------- MCC ---------- */ #define NBL_MCC_MODULE (0x00B44000) #define NBL_MCC_LEAF_NODE_TABLE(i) \ @@ -404,6 +409,8 @@ union nbl_fem_profile_tbl_u { #define NBL_LB_PF_CONFIGSPACE_BASE_ADDR (NBL_LB_PCIEX16_TOP_BASE + 0x00024000) #define NBL_LB_PCIEX16_TOP_AHB (NBL_LB_PCIEX16_TOP_BASE + 0x00000020) +#define NBL_SRIOV_CAPS_OFFSET (0x140) + /* -------- MAILBOX BAR2 ----- */ #define NBL_MAILBOX_NOTIFY_ADDR (0x00000000) #define NBL_MAILBOX_BAR_REG (0x00000000) @@ -452,7 +459,7 @@ struct nbl_adminq_qinfo_map_table { u32 devid:5; u32 bus:8; u32 msix_idx:13; - u32 msix_idx_vaild:1; + u32 msix_idx_valid:1; u32 rsv:2; }; @@ -476,7 +483,7 @@ struct nbl_mailbox_qinfo_map_table { u32 devid:5; u32 bus:8; u32 msix_idx:13; - u32 msix_idx_vaild:1; + u32 msix_idx_valid:1; u32 rsv:2; }; @@ -484,6 +491,7 @@ struct nbl_mailbox_qinfo_map_table { #define NBL_PCIE_HOST_K_PF_MASK_REG (NBL_INTF_HOST_PCIE_BASE + 0x00001004) #define NBL_PCIE_HOST_K_PF_FID(pf_id) \ (NBL_INTF_HOST_PCIE_BASE + 0x0000106C + 4 * (pf_id)) +#define NBL_PCIE_HOST_TL_CFG_BUSDEV (NBL_INTF_HOST_PCIE_BASE + 0x11040) /* -------- HOST_PADPT -------- */ #define NBL_HOST_PADPT_HOST_CFG_FC_PD_DN (NBL_INTF_HOST_PADPT_BASE + 0x00000160) @@ -675,12 +683,14 @@ struct nbl_ped_hw_edit_profile_cfg { #define NBL_LR_LEONIS_NET_SHAPING_DPETH 600 #define NBL_LR_LEONIS_NET_BUCKET_DEPTH 9600 -#define NBL_SHAPING_DPORT_25G_RATE 0x601E -#define NBL_SHAPING_DPORT_HALF_25G_RATE 0x300F +#define NBL_SHAPING_DPORT_25G_RATE 0x61A8 +#define NBL_SHAPING_DPORT_HALF_25G_RATE 0x30D4 #define NBL_SHAPING_DPORT_100G_RATE 0x1A400 #define NBL_SHAPING_DPORT_HALF_100G_RATE 0xD200 +#define NBL_UCAR_MAX_BUCKET_DEPTH 524287 + #define NBL_DSTORE_DROP_XOFF_TH 0xC8 #define NBL_DSTORE_DROP_XON_TH 0x64 @@ -728,6 +738,19 @@ struct dsch_vn_net2sha_map_tbl { u32 reserve:31; }; +#define NBL_NET_SHAPING_RDMA_BASE_ID (448) + +struct dsch_rdma_net2sha_map_tbl { + u32 net_shaping_id:10; + u32 reserve:21; + u32 vld:1; +}; + +struct dsch_rdma_sha2net_map_tbl { + u32 rdma_vf_id:31; + u32 vld:1; +}; + struct dsch_psha_en { u32 en:4; u32 rsv:28; @@ -901,6 +924,8 @@ struct nbl_dvn_stat_cnt { #define NBL_DVN_PKT_DIF_ERR_CNT (NBL_DP_DVN_BASE + 0x00000034) #define NBL_DVN_ERR_QUEUE_ID_GET (NBL_DP_DVN_BASE + 0x0000040C) #define NBL_DVN_BACK_PRESSURE_MASK (NBL_DP_DVN_BASE + 0x00000464) +#define NBL_DVN_DESCRD_L2_UNAVAIL_CNT (NBL_DP_DVN_BASE + 0x00000A1C) +#define NBL_DVN_DESCRD_L2_NOAVAIL_CNT (NBL_DP_DVN_BASE + 0x00000A20) #define DEFAULT_DVN_DESCREQ_NUMCFG (0x00080014) #define DEFAULT_DVN_100G_DESCREQ_NUMCFG (0x00080020) @@ -1030,9 +1055,17 @@ struct dvn_back_pressure_mask { #define NBL_UVN_QUEUE_ERR_MASK (NBL_DP_UVN_BASE + 0x00000224) #define NBL_UVN_ECPU_QUEUE_NUM (NBL_DP_UVN_BASE + 0x0000023C) #define NBL_UVN_DESC_WR_TIMEOUT (NBL_DP_UVN_BASE + 0x00000214) +#define NBL_UVN_DIF_DELAY_REQ (NBL_DP_UVN_BASE + 0x000010D0) +#define NBL_UVN_DIF_DELAY_TIME (NBL_DP_UVN_BASE + 0x000010D4) +#define NBL_UVN_DIF_DELAY_MAX (NBL_DP_UVN_BASE + 0x000010D8) +#define NBL_UVN_DESC_PRE_DESC_REQ_NULL (NBL_DP_UVN_BASE + 0x000012C8) +#define NBL_UVN_DESC_PRE_DESC_REQ_LACK (NBL_DP_UVN_BASE + 0x000012CC) #define NBL_UVN_DESC_RD_ENTRY (NBL_DP_UVN_BASE + 0x000012D0) +#define NBL_UVN_DESC_RD_DROP_DESC_LACK (NBL_DP_UVN_BASE + 0x000012E0) #define NBL_UVN_DIF_REQ_RO_FLAG (NBL_DP_UVN_BASE + 0x00000250) +#define NBL_UVN_DESC_PREFETCH_INIT (NBL_DP_UVN_BASE + 0x00000204) #define NBL_UVN_DESC_WR_TIMEOUT_4US (0x960) +#define NBL_UVN_DESC_PREFETCH_NUM (4) #define NBL_UVN_INT_QUEUE_ERR (5) @@ -1126,16 +1159,27 @@ struct uvn_queue_err_mask { u32 rsv1:26; }; +struct uvn_desc_prefetch_init { + u32 num:8; + u32 rsv1:8; + u32 sel:1; + u32 rsv:15; +}; + /* -------- USTORE -------- */ #define NBL_USTORE_PKT_LEN_ADDR (NBL_DP_USTORE_BASE + 0x00000108) #define NBL_USTORE_PORT_FC_TH_REG_ARR(port_id) \ (NBL_DP_USTORE_BASE + 0x00000134 + (port_id) * sizeof(struct nbl_ustore_port_fc_th)) - #define NBL_USTORE_COS_FC_TH_REG_ARR(cos_id) \ (NBL_DP_USTORE_BASE + 0x00000200 + (cos_id) * sizeof(struct nbl_ustore_cos_fc_th)) - #define NBL_USTORE_PORT_DROP_TH_REG_ARR(port_id) \ (NBL_DP_USTORE_BASE + 0x00000150 + (port_id) * sizeof(struct nbl_ustore_port_drop_th)) +#define NBL_USTORE_BUF_TOTAL_DROP_PKT (NBL_DP_USTORE_BASE + 0x000010A8) +#define NBL_USTORE_BUF_TOTAL_TRUN_PKT (NBL_DP_USTORE_BASE + 0x000010AC) +#define NBL_USTORE_BUF_PORT_DROP_PKT(eth_id) \ + (NBL_DP_USTORE_BASE + 0x00002500 + (eth_id) * sizeof(u32)) +#define NBL_USTORE_BUF_PORT_TRUN_PKT(eth_id) \ + (NBL_DP_USTORE_BASE + 0x00002540 + (eth_id) * sizeof(u32)) #define NBL_USTORE_SIGNLE_ETH_DROP_TH 0xC80 #define NBL_USTORE_DUAL_ETH_DROP_TH 0x640 @@ -1171,6 +1215,8 @@ struct nbl_ustore_cos_fc_th { u32 fc_en:1; }; +#define NBL_MAX_USTORE_COS_FC_TH (4080) + /* USTORE port_drop_th */ struct nbl_ustore_port_drop_th { u32 disc_th:12; @@ -1181,13 +1227,35 @@ struct nbl_ustore_port_drop_th { /* ---------- UL4S ---------- */ #define NBL_UL4S_SCH_PAD_ADDR (NBL_DP_UL4S_BASE + 0x000006c4) -/* UL4S UL4S_sch_pad */ -struct UL4S_sch_pad { +/* UL4S ul4s_sch_pad */ +struct ul4s_sch_pad { u32 en:1; u32 clr:1; u32 rsv:30; }; +/* --------- DSTAT --------- */ +#define NBL_DSTAT_VSI_STAT(vsi_id) \ + (NBL_DP_DSTAT_BASE + 0x00008000 + (vsi_id) * sizeof(struct nbl_dstat_vsi_stat)) + +struct nbl_dstat_vsi_stat { + u32 fwd_byte_cnt_low; + u32 fwd_byte_cnt_high; + u32 fwd_pkt_cnt_low; + u32 fwd_pkt_cnt_high; +}; + +/* --------- USTAT --------- */ +#define NBL_USTAT_VSI_STAT(vsi_id) \ + (NBL_DP_USTAT_BASE + 0x00008000 + (vsi_id) * sizeof(struct nbl_ustat_vsi_stat)) + +struct nbl_ustat_vsi_stat { + u32 fwd_byte_cnt_low; + u32 fwd_byte_cnt_high; + u32 fwd_pkt_cnt_low; + u32 fwd_pkt_cnt_high; +}; + /* ---------- IPRO ---------- */ /* ipro module related macros */ #define NBL_IPRO_MODULE (0xB04000) @@ -1219,7 +1287,7 @@ struct nbl_ipro_dn_src_port_tbl { u32 mirror_id:4; u32 vlan_layer_num_1:2; u32 phy_flow:1; - u32 not_used_0:4; + u32 mtu_sel:4; u32 addr_check_en:1; u32 smac_low:16; u32 smac_high; @@ -1258,6 +1326,11 @@ struct nbl_ipro_upsport_tbl { u32 rsv:1; }; +struct nbl_ipro_mtu_sel { + u32 mtu_1:16; /* [15:0] Default:0x0 RW */ + u32 mtu_0:16; /* [31:16] Default:0x0 RW */ +}; + /* ---------- EPRO ---------- */ #define NBL_EPRO_INT_STATUS (NBL_PPE_EPRO_BASE + 0x00000000) #define NBL_EPRO_INT_MASK (NBL_PPE_EPRO_BASE + 0x00000004) @@ -1522,8 +1595,30 @@ struct nbl_dqm_rxmac_tx_cos_bp_en_cfg { u32 eth3:8; }; -#define NBL_UQM_RX_COS_BP_EN (NBL_DP_UQM_BASE + 0x00000614) -#define NBL_UQM_TX_COS_BP_EN (NBL_DP_UQM_BASE + 0x00000604) +#define NBL_UQM_QUE_TYPE (NBL_DP_UQM_BASE + 0x0000013c) +#define NBL_UQM_RX_COS_BP_EN (NBL_DP_UQM_BASE + 0x00000614) +#define NBL_UQM_TX_COS_BP_EN (NBL_DP_UQM_BASE + 0x00000604) + +#define NBL_UQM_DROP_PKT_CNT (NBL_DP_UQM_BASE + 0x000009C0) +#define NBL_UQM_DROP_PKT_SLICE_CNT (NBL_DP_UQM_BASE + 0x000009C4) +#define NBL_UQM_DROP_PKT_LEN_ADD_CNT (NBL_DP_UQM_BASE + 0x000009C8) +#define NBL_UQM_DROP_HEAD_PNTR_ADD_CNT (NBL_DP_UQM_BASE + 0x000009CC) +#define NBL_UQM_DROP_WEIGHT_ADD_CNT (NBL_DP_UQM_BASE + 0x000009D0) +#define NBL_UQM_PORT_DROP_PKT_CNT (NBL_DP_UQM_BASE + 0x000009D4) +#define NBL_UQM_PORT_DROP_PKT_SLICE_CNT (NBL_DP_UQM_BASE + 0x000009F4) +#define NBL_UQM_PORT_DROP_PKT_LEN_ADD_CNT (NBL_DP_UQM_BASE + 0x00000A14) +#define NBL_UQM_PORT_DROP_HEAD_PNTR_ADD_CNT (NBL_DP_UQM_BASE + 0x00000A34) +#define NBL_UQM_PORT_DROP_WEIGHT_ADD_CNT (NBL_DP_UQM_BASE + 0x00000A54) +#define NBL_UQM_FWD_DROP_CNT (NBL_DP_UQM_BASE + 0x00000A80) +#define NBL_UQM_DPORT_DROP_CNT (NBL_DP_UQM_BASE + 0x00000B74) + +#define NBL_UQM_PORT_DROP_DEPTH 6 +#define NBL_UQM_DPORT_DROP_DEPTH 16 + +struct nbl_uqm_que_type { + u32 bp_drop:1; + u32 rsv:31; +}; /* UQM rx_cos_bp_en */ struct nbl_uqm_rx_cos_bp_en_cfg { @@ -1560,7 +1655,7 @@ struct nbl_uqm_tx_port_bp_en_cfg { /* dl4s */ #define NBL_DL4S_KEY_SALT(_i) (NBL_DP_DL4S_BASE + 0x00010000 + (_i) * 64) -/* UL4S */ +/* ul4s */ #define NBL_UL4S_SYNC_TRIG (NBL_DP_UL4S_BASE + 0x00000700) #define NBL_UL4S_SYNC_SID (NBL_DP_UL4S_BASE + 0x00000704) #define NBL_UL4S_SYNC_TCP_SN (NBL_DP_UL4S_BASE + 0x00000710) @@ -1887,6 +1982,8 @@ union nbl_ipsec_lifetime_diff { #define NBL_TOP_CTRL_MODULE (0x01300000) #define NBL_TOP_CTRL_INT_STATUS (NBL_TOP_CTRL_MODULE + 0X0000) #define NBL_TOP_CTRL_INT_MASK (NBL_TOP_CTRL_MODULE + 0X0004) +#define NBL_TOP_CTRL_LB_CLK (NBL_TOP_CTRL_MODULE + 0X0100) +#define NBL_TOP_CTRL_LB_RST (NBL_TOP_CTRL_MODULE + 0X0104) #define NBL_TOP_CTRL_TVSENSOR0 (NBL_TOP_CTRL_MODULE + 0X0254) #define NBL_TOP_CTRL_SOFT_DEF0 (NBL_TOP_CTRL_MODULE + 0x0430) #define NBL_TOP_CTRL_SOFT_DEF1 (NBL_TOP_CTRL_MODULE + 0x0434) @@ -1899,10 +1996,14 @@ union nbl_ipsec_lifetime_diff { #define NBL_FW_HEARTBEAT_PONG NBL_TOP_CTRL_SOFT_DEF1 -#define NBL_PP_NUM (3) -#define NBL_PP_TYPE_0 (0) -#define NBL_PP_TYPE_1 (1) -#define NBL_PP_TYPE_2 (2) +#define NBL_TOP_CTRL_RDMA_LB_RST BIT(10) +#define NBL_TOP_CTRL_RDMA_LB_CLK BIT(10) + +/* temperature threshold1 */ +#define NBL_LEONIS_TEMP_MAX (105) +/* temperature threshold2 */ +#define NBL_LEONIS_TEMP_CRIT (115) + #define NBL_ACT_DATA_BITS (16) #define NBL_CMDQ_DIF_MODE_VALUE (2) @@ -1944,7 +2045,7 @@ union nbl_ipsec_lifetime_diff { #define NBL_CMDQ_HI_DWORD(x) ((u32)(((x) >> 32) & 0xFFFFFFFF)) #define NBL_CMDQ_LO_DWORD(x) ((u32)(x) & 0xFFFFFFFF) #define NBL_FEM_INIT_START_KERN (0xFE) -#define NBL_FEM_INIT_START_VALUE (0x7E) +#define NBL_FEM_INIT_START_VALUE (0x3E) #define NBL_PED_VSI_TYPE_ETH_BASE (1027) #define NBL_DPED_VLAN_TYPE_PORT_NUM (1031) #define NBL_CHAN_REG_MAX_LEN (32) @@ -1990,11 +2091,21 @@ union nbl_ipsec_lifetime_diff { #define NBL_DSCH_VN_NET2SHA_MAP_TBL_REG(r) (NBL_DSCH_VN_NET2SHA_MAP_TBL_ADDR + \ (NBL_DSCH_VN_NET2SHA_MAP_TBL_DWLEN * 4) * (r)) +#define NBL_DSCH_RDMA_SHA2NET_MAP_TBL_ADDR (0x49c000) +#define NBL_DSCH_RDMA_SHA2NET_MAP_TBL_DWLEN (1) +#define NBL_DSCH_RDMA_SHA2NET_MAP_TBL_REG(r) (NBL_DSCH_RDMA_SHA2NET_MAP_TBL_ADDR + \ + (NBL_DSCH_RDMA_SHA2NET_MAP_TBL_DWLEN * 4) * (r)) +#define NBL_DSCH_RDMA_NET2SHA_MAP_TBL_ADDR (0x494000) +#define NBL_DSCH_RDMA_NET2SHA_MAP_TBL_DWLEN (1) +#define NBL_DSCH_RDMA_NET2SHA_MAP_TBL_REG(r) (NBL_DSCH_RDMA_NET2SHA_MAP_TBL_ADDR + \ + (NBL_DSCH_RDMA_NET2SHA_MAP_TBL_DWLEN * 4) * (r)) + /* Mailbox bar phy register offset begin */ #define NBL_FW_HEARTBEAT_PING 0x84 #define NBL_FW_BOARD_CONFIG 0x200 #define NBL_FW_BOARD_DW3_OFFSET (NBL_FW_BOARD_CONFIG + 12) #define NBL_FW_BOARD_DW6_OFFSET (NBL_FW_BOARD_CONFIG + 24) +#define NBL_ETH_REP_INFO_BASE (1024) /* Mailbox bar phy register offset end */ @@ -2010,14 +2121,6 @@ enum nbl_ped_vlan_type_e { OUTER_VLAN_TYPE, }; -enum nbl_eth_rep_id { - ETH_NET_REP_ID_0 = 2048, - ETH_NET_REP_ID_1, - ETH_NET_REP_ID_2, - ETH_NET_REP_ID_3, - ETH_NET_REP_ID_MAX -}; - enum nbl_ped_vlan_tpid_e { PED_VLAN_TYPE_8100 = 0, PED_VLAN_TYPE_88A8 = 1, @@ -2111,61 +2214,14 @@ enum rss_field_type { #define NBL_KEY_IP4_L4_RSS_BIT 1 #define NBL_KEY_IP6_L4_RSS_BIT 2 -#define NBL_DPED_L4_CK_CMD_40_ADDR (0x75c338) -#define NBL_DPED_L4_CK_CMD_40_DEPTH (1) -#define NBL_DPED_L4_CK_CMD_40_WIDTH (32) -#define NBL_DPED_L4_CK_CMD_40_DWLEN (1) -struct dped_l4_ck_cmd_40 { - u32 value:8; /* [7:0] Default:0x0 RW */ - u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ - u32 len_phid:2; /* [16:15] Default:0x0 RW */ - u32 len_vld:1; /* [17] Default:0x0 RW */ - u32 data_vld:1; /* [18] Default:0x0 RW */ - u32 in_oft:7; /* [25:19] Default:0x8 RW */ - u32 phid:2; /* [27:26] Default:0x3 RW */ - u32 flag:1; /* [28] Default:0x0 RW */ - u32 mode:1; /* [29] Default:0x1 RW */ - u32 rsv:1; /* [30] Default:0x0 RO */ - u32 en:1; /* [31] Default:0x0 RW */ -}; - -#define NBL_DSTORE_D_DPORT_FC_TH_ADDR (0x704600) -#define NBL_DSTORE_D_DPORT_FC_TH_DEPTH (5) -#define NBL_DSTORE_D_DPORT_FC_TH_WIDTH (32) -#define NBL_DSTORE_D_DPORT_FC_TH_DWLEN (1) - -struct dstore_d_dport_fc_th { - u32 xoff_th:11; /* [10:0] Default:200 RW */ - u32 rsv1:5; /* [15:11] Default:0x0 RO */ - u32 xon_th:11; /* [26:16] Default:100 RW */ - u32 rsv:3; /* [29:27] Default:0x0 RO */ - u32 fc_set:1; /* [30:30] Default:0x0 RW */ - u32 fc_en:1; /* [31:31] Default:0x0 RW */ -}; - -#define NBL_DSTORE_D_DPORT_FC_TH_REG(r) (NBL_DSTORE_D_DPORT_FC_TH_ADDR + \ - (NBL_DSTORE_D_DPORT_FC_TH_DWLEN * 4) * (r)) - -#define NBL_DSTORE_PORT_DROP_TH_ADDR (0x704150) -#define NBL_DSTORE_PORT_DROP_TH_DEPTH (6) -#define NBL_DSTORE_PORT_DROP_TH_WIDTH (32) -#define NBL_DSTORE_PORT_DROP_TH_DWLEN (1) - -struct dstore_port_drop_th { - u32 disc_th:10; /* [9:0] Default:800 RW */ - u32 rsv:21; /* [30:10] Default:0x0 RO */ - u32 en:1; /* [31] Default:0x1 RW */ -}; - -#define NBL_DSTORE_PORT_DROP_TH_REG(r) (NBL_DSTORE_PORT_DROP_TH_ADDR + \ - (NBL_DSTORE_PORT_DROP_TH_DWLEN * 4) * (r)) - union nbl_fw_board_cfg_dw3 { struct board_cfg_dw3 { - u32 port_typpe:1; + u32 port_type:1; u32 port_num:7; u32 port_speed:2; - u32 rsv:22; + u32 gpio_type:3; + u32 p4_version:1; /* 0: low version; 1: high version */ + u32 rsv:18; } __packed info; u32 data; }; @@ -2179,4 +2235,8 @@ union nbl_fw_board_cfg_dw6 { u32 data; }; +#define NBL_LEONIS_QUIRKS_OFFSET (0x00000140) + +#define NBL_LEONIS_ILLEGAL_REG_VALUE (0xDEADBEEF) + #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.c new file mode 100644 index 0000000000000000000000000000000000000000..1184c7977297deb362bcb50af620bb9498c881b8 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.c @@ -0,0 +1,3864 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_phy.h" +#include "nbl_phy_leonis.h" +#include "nbl_phy_leonis_regs.h" + +#define NBL_SEC_BLOCK_SIZE (0x100) +#define NBL_SEC000_SIZE (1) +#define NBL_SEC000_ADDR (0x114150) +#define NBL_SEC001_SIZE (1) +#define NBL_SEC001_ADDR (0x15c190) +#define NBL_SEC002_SIZE (1) +#define NBL_SEC002_ADDR (0x10417c) +#define NBL_SEC003_SIZE (1) +#define NBL_SEC003_ADDR (0x714154) +#define NBL_SEC004_SIZE (1) +#define NBL_SEC004_ADDR (0x75c190) +#define NBL_SEC005_SIZE (1) +#define NBL_SEC005_ADDR (0x70417c) +#define NBL_SEC006_SIZE (512) +#define NBL_SEC006_ADDR (0x8f000) +#define NBL_SEC006_REGI(i) (0x8f000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC007_SIZE (256) +#define NBL_SEC007_ADDR (0x8f800) +#define NBL_SEC007_REGI(i) (0x8f800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC008_SIZE (1024) +#define NBL_SEC008_ADDR (0x90000) +#define NBL_SEC008_REGI(i) (0x90000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC009_SIZE (2048) +#define NBL_SEC009_ADDR (0x94000) +#define NBL_SEC009_REGI(i) (0x94000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC010_SIZE (256) +#define NBL_SEC010_ADDR (0x96000) +#define NBL_SEC010_REGI(i) (0x96000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC011_SIZE (1024) +#define NBL_SEC011_ADDR (0x91000) +#define NBL_SEC011_REGI(i) (0x91000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC012_SIZE (128) +#define NBL_SEC012_ADDR (0x92000) +#define NBL_SEC012_REGI(i) (0x92000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC013_SIZE (64) +#define NBL_SEC013_ADDR (0x92200) +#define NBL_SEC013_REGI(i) (0x92200 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC014_SIZE (64) +#define NBL_SEC014_ADDR (0x92300) +#define NBL_SEC014_REGI(i) (0x92300 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC015_SIZE (1) +#define NBL_SEC015_ADDR (0x8c214) +#define NBL_SEC016_SIZE (1) +#define NBL_SEC016_ADDR (0x8c220) +#define NBL_SEC017_SIZE (1) +#define NBL_SEC017_ADDR (0x8c224) +#define NBL_SEC018_SIZE (1) +#define NBL_SEC018_ADDR (0x8c228) +#define NBL_SEC019_SIZE (1) +#define NBL_SEC019_ADDR (0x8c22c) +#define NBL_SEC020_SIZE (1) +#define NBL_SEC020_ADDR (0x8c1f0) +#define NBL_SEC021_SIZE (1) +#define NBL_SEC021_ADDR (0x8c1f8) +#define NBL_SEC022_SIZE (256) +#define NBL_SEC022_ADDR (0x85f000) +#define NBL_SEC022_REGI(i) (0x85f000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC023_SIZE (128) +#define NBL_SEC023_ADDR (0x85f800) +#define NBL_SEC023_REGI(i) (0x85f800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC024_SIZE (512) +#define NBL_SEC024_ADDR (0x860000) +#define NBL_SEC024_REGI(i) (0x860000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC025_SIZE (1024) +#define NBL_SEC025_ADDR (0x864000) +#define NBL_SEC025_REGI(i) (0x864000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC026_SIZE (256) +#define NBL_SEC026_ADDR (0x866000) +#define NBL_SEC026_REGI(i) (0x866000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC027_SIZE (512) +#define NBL_SEC027_ADDR (0x861000) +#define NBL_SEC027_REGI(i) (0x861000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC028_SIZE (64) +#define NBL_SEC028_ADDR (0x862000) +#define NBL_SEC028_REGI(i) (0x862000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC029_SIZE (32) +#define NBL_SEC029_ADDR (0x862200) +#define NBL_SEC029_REGI(i) (0x862200 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC030_SIZE (32) +#define NBL_SEC030_ADDR (0x862300) +#define NBL_SEC030_REGI(i) (0x862300 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC031_SIZE (1) +#define NBL_SEC031_ADDR (0x85c214) +#define NBL_SEC032_SIZE (1) +#define NBL_SEC032_ADDR (0x85c220) +#define NBL_SEC033_SIZE (1) +#define NBL_SEC033_ADDR (0x85c224) +#define NBL_SEC034_SIZE (1) +#define NBL_SEC034_ADDR (0x85c228) +#define NBL_SEC035_SIZE (1) +#define NBL_SEC035_ADDR (0x85c22c) +#define NBL_SEC036_SIZE (1) +#define NBL_SEC036_ADDR (0xb04200) +#define NBL_SEC037_SIZE (1) +#define NBL_SEC037_ADDR (0xb04230) +#define NBL_SEC038_SIZE (1) +#define NBL_SEC038_ADDR (0xb04234) +#define NBL_SEC039_SIZE (64) +#define NBL_SEC039_ADDR (0xb05800) +#define NBL_SEC039_REGI(i) (0xb05800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC040_SIZE (32) +#define NBL_SEC040_ADDR (0xb05400) +#define NBL_SEC040_REGI(i) (0xb05400 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC041_SIZE (16) +#define NBL_SEC041_ADDR (0xb05500) +#define NBL_SEC041_REGI(i) (0xb05500 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC042_SIZE (1) +#define NBL_SEC042_ADDR (0xb14148) +#define NBL_SEC043_SIZE (1) +#define NBL_SEC043_ADDR (0xb14104) +#define NBL_SEC044_SIZE (1) +#define NBL_SEC044_ADDR (0xb1414c) +#define NBL_SEC045_SIZE (1) +#define NBL_SEC045_ADDR (0xb14150) +#define NBL_SEC046_SIZE (256) +#define NBL_SEC046_ADDR (0xb15000) +#define NBL_SEC046_REGI(i) (0xb15000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC047_SIZE (32) +#define NBL_SEC047_ADDR (0xb15800) +#define NBL_SEC047_REGI(i) (0xb15800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC048_SIZE (1) +#define NBL_SEC048_ADDR (0xb24148) +#define NBL_SEC049_SIZE (1) +#define NBL_SEC049_ADDR (0xb24104) +#define NBL_SEC050_SIZE (1) +#define NBL_SEC050_ADDR (0xb2414c) +#define NBL_SEC051_SIZE (1) +#define NBL_SEC051_ADDR (0xb24150) +#define NBL_SEC052_SIZE (256) +#define NBL_SEC052_ADDR (0xb25000) +#define NBL_SEC052_REGI(i) (0xb25000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC053_SIZE (32) +#define NBL_SEC053_ADDR (0xb25800) +#define NBL_SEC053_REGI(i) (0xb25800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC054_SIZE (1) +#define NBL_SEC054_ADDR (0xb34148) +#define NBL_SEC055_SIZE (1) +#define NBL_SEC055_ADDR (0xb34104) +#define NBL_SEC056_SIZE (1) +#define NBL_SEC056_ADDR (0xb3414c) +#define NBL_SEC057_SIZE (1) +#define NBL_SEC057_ADDR (0xb34150) +#define NBL_SEC058_SIZE (256) +#define NBL_SEC058_ADDR (0xb35000) +#define NBL_SEC058_REGI(i) (0xb35000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC059_SIZE (32) +#define NBL_SEC059_ADDR (0xb35800) +#define NBL_SEC059_REGI(i) (0xb35800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC060_SIZE (1) +#define NBL_SEC060_ADDR (0xe74630) +#define NBL_SEC061_SIZE (1) +#define NBL_SEC061_ADDR (0xe74634) +#define NBL_SEC062_SIZE (64) +#define NBL_SEC062_ADDR (0xe75000) +#define NBL_SEC062_REGI(i) (0xe75000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC063_SIZE (32) +#define NBL_SEC063_ADDR (0xe75480) +#define NBL_SEC063_REGI(i) (0xe75480 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC064_SIZE (16) +#define NBL_SEC064_ADDR (0xe75980) +#define NBL_SEC064_REGI(i) (0xe75980 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC065_SIZE (32) +#define NBL_SEC065_ADDR (0x15f000) +#define NBL_SEC065_REGI(i) (0x15f000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC066_SIZE (32) +#define NBL_SEC066_ADDR (0x75f000) +#define NBL_SEC066_REGI(i) (0x75f000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC067_SIZE (1) +#define NBL_SEC067_ADDR (0xb64108) +#define NBL_SEC068_SIZE (1) +#define NBL_SEC068_ADDR (0xb6410c) +#define NBL_SEC069_SIZE (1) +#define NBL_SEC069_ADDR (0xb64140) +#define NBL_SEC070_SIZE (1) +#define NBL_SEC070_ADDR (0xb64144) +#define NBL_SEC071_SIZE (512) +#define NBL_SEC071_ADDR (0xb65000) +#define NBL_SEC071_REGI(i) (0xb65000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC072_SIZE (32) +#define NBL_SEC072_ADDR (0xb65800) +#define NBL_SEC072_REGI(i) (0xb65800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC073_SIZE (1) +#define NBL_SEC073_ADDR (0x8c210) +#define NBL_SEC074_SIZE (1) +#define NBL_SEC074_ADDR (0x85c210) +#define NBL_SEC075_SIZE (4) +#define NBL_SEC075_ADDR (0x8c1b0) +#define NBL_SEC075_REGI(i) (0x8c1b0 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC076_SIZE (4) +#define NBL_SEC076_ADDR (0x8c1c0) +#define NBL_SEC076_REGI(i) (0x8c1c0 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC077_SIZE (4) +#define NBL_SEC077_ADDR (0x85c1b0) +#define NBL_SEC077_REGI(i) (0x85c1b0 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC078_SIZE (1) +#define NBL_SEC078_ADDR (0x85c1ec) +#define NBL_SEC079_SIZE (1) +#define NBL_SEC079_ADDR (0x8c1ec) +#define NBL_SEC080_SIZE (1) +#define NBL_SEC080_ADDR (0xb04440) +#define NBL_SEC081_SIZE (1) +#define NBL_SEC081_ADDR (0xb04448) +#define NBL_SEC082_SIZE (1) +#define NBL_SEC082_ADDR (0xb14450) +#define NBL_SEC083_SIZE (1) +#define NBL_SEC083_ADDR (0xb24450) +#define NBL_SEC084_SIZE (1) +#define NBL_SEC084_ADDR (0xb34450) +#define NBL_SEC085_SIZE (1) +#define NBL_SEC085_ADDR (0xa04188) +#define NBL_SEC086_SIZE (1) +#define NBL_SEC086_ADDR (0xe74218) +#define NBL_SEC087_SIZE (1) +#define NBL_SEC087_ADDR (0xe7421c) +#define NBL_SEC088_SIZE (1) +#define NBL_SEC088_ADDR (0xe74220) +#define NBL_SEC089_SIZE (1) +#define NBL_SEC089_ADDR (0xe74224) +#define NBL_SEC090_SIZE (1) +#define NBL_SEC090_ADDR (0x75c22c) +#define NBL_SEC091_SIZE (1) +#define NBL_SEC091_ADDR (0x75c230) +#define NBL_SEC092_SIZE (1) +#define NBL_SEC092_ADDR (0x75c238) +#define NBL_SEC093_SIZE (1) +#define NBL_SEC093_ADDR (0x75c244) +#define NBL_SEC094_SIZE (1) +#define NBL_SEC094_ADDR (0x75c248) +#define NBL_SEC095_SIZE (1) +#define NBL_SEC095_ADDR (0x75c250) +#define NBL_SEC096_SIZE (1) +#define NBL_SEC096_ADDR (0x15c230) +#define NBL_SEC097_SIZE (1) +#define NBL_SEC097_ADDR (0x15c234) +#define NBL_SEC098_SIZE (1) +#define NBL_SEC098_ADDR (0x15c238) +#define NBL_SEC099_SIZE (1) +#define NBL_SEC099_ADDR (0x15c23c) +#define NBL_SEC100_SIZE (1) +#define NBL_SEC100_ADDR (0x15c244) +#define NBL_SEC101_SIZE (1) +#define NBL_SEC101_ADDR (0x15c248) +#define NBL_SEC102_SIZE (1) +#define NBL_SEC102_ADDR (0xb6432c) +#define NBL_SEC103_SIZE (1) +#define NBL_SEC103_ADDR (0xb64220) +#define NBL_SEC104_SIZE (1) +#define NBL_SEC104_ADDR (0xb44804) +#define NBL_SEC105_SIZE (1) +#define NBL_SEC105_ADDR (0xb44a00) +#define NBL_SEC106_SIZE (1) +#define NBL_SEC106_ADDR (0xe84210) +#define NBL_SEC107_SIZE (1) +#define NBL_SEC107_ADDR (0xe84214) +#define NBL_SEC108_SIZE (1) +#define NBL_SEC108_ADDR (0xe64228) +#define NBL_SEC109_SIZE (1) +#define NBL_SEC109_ADDR (0x65413c) +#define NBL_SEC110_SIZE (1) +#define NBL_SEC110_ADDR (0x984144) +#define NBL_SEC111_SIZE (1) +#define NBL_SEC111_ADDR (0x114130) +#define NBL_SEC112_SIZE (1) +#define NBL_SEC112_ADDR (0x714138) +#define NBL_SEC113_SIZE (1) +#define NBL_SEC113_ADDR (0x114134) +#define NBL_SEC114_SIZE (1) +#define NBL_SEC114_ADDR (0x71413c) +#define NBL_SEC115_SIZE (1) +#define NBL_SEC115_ADDR (0x90437c) +#define NBL_SEC116_SIZE (32) +#define NBL_SEC116_ADDR (0xb05000) +#define NBL_SEC116_REGI(i) (0xb05000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC117_SIZE (1) +#define NBL_SEC117_ADDR (0xb043e0) +#define NBL_SEC118_SIZE (1) +#define NBL_SEC118_ADDR (0xb043f0) +#define NBL_SEC119_SIZE (5) +#define NBL_SEC119_ADDR (0x8c230) +#define NBL_SEC119_REGI(i) (0x8c230 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC120_SIZE (1) +#define NBL_SEC120_ADDR (0x8c1f4) +#define NBL_SEC121_SIZE (1) +#define NBL_SEC121_ADDR (0x2046c4) +#define NBL_SEC122_SIZE (1) +#define NBL_SEC122_ADDR (0x85c1f4) +#define NBL_SEC123_SIZE (1) +#define NBL_SEC123_ADDR (0x75c194) +#define NBL_SEC124_SIZE (256) +#define NBL_SEC124_ADDR (0xa05000) +#define NBL_SEC124_REGI(i) (0xa05000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC125_SIZE (256) +#define NBL_SEC125_ADDR (0xa06000) +#define NBL_SEC125_REGI(i) (0xa06000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC126_SIZE (256) +#define NBL_SEC126_ADDR (0xa07000) +#define NBL_SEC126_REGI(i) (0xa07000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC127_SIZE (1) +#define NBL_SEC127_ADDR (0x75c204) +#define NBL_SEC128_SIZE (1) +#define NBL_SEC128_ADDR (0x15c204) +#define NBL_SEC129_SIZE (1) +#define NBL_SEC129_ADDR (0x75c208) +#define NBL_SEC130_SIZE (1) +#define NBL_SEC130_ADDR (0x15c208) +#define NBL_SEC131_SIZE (1) +#define NBL_SEC131_ADDR (0x75c20c) +#define NBL_SEC132_SIZE (1) +#define NBL_SEC132_ADDR (0x15c20c) +#define NBL_SEC133_SIZE (1) +#define NBL_SEC133_ADDR (0x75c210) +#define NBL_SEC134_SIZE (1) +#define NBL_SEC134_ADDR (0x15c210) +#define NBL_SEC135_SIZE (1) +#define NBL_SEC135_ADDR (0x75c214) +#define NBL_SEC136_SIZE (1) +#define NBL_SEC136_ADDR (0x15c214) +#define NBL_SEC137_SIZE (32) +#define NBL_SEC137_ADDR (0x15d000) +#define NBL_SEC137_REGI(i) (0x15d000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC138_SIZE (32) +#define NBL_SEC138_ADDR (0x75d000) +#define NBL_SEC138_REGI(i) (0x75d000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC139_SIZE (1) +#define NBL_SEC139_ADDR (0x75c310) +#define NBL_SEC140_SIZE (1) +#define NBL_SEC140_ADDR (0x75c314) +#define NBL_SEC141_SIZE (1) +#define NBL_SEC141_ADDR (0x75c340) +#define NBL_SEC142_SIZE (1) +#define NBL_SEC142_ADDR (0x75c344) +#define NBL_SEC143_SIZE (1) +#define NBL_SEC143_ADDR (0x75c348) +#define NBL_SEC144_SIZE (1) +#define NBL_SEC144_ADDR (0x75c34c) +#define NBL_SEC145_SIZE (32) +#define NBL_SEC145_ADDR (0xb15800) +#define NBL_SEC145_REGI(i) (0xb15800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC146_SIZE (32) +#define NBL_SEC146_ADDR (0xb25800) +#define NBL_SEC146_REGI(i) (0xb25800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC147_SIZE (32) +#define NBL_SEC147_ADDR (0xb35800) +#define NBL_SEC147_REGI(i) (0xb35800 + NBL_BYTES_IN_REG * (i)) + +static u32 nbl_sec046_1p_data[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xa0000000, 0x00077c2b, 0x005c0000, + 0x00000000, 0x00008100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x00073029, 0x00480000, + 0x00000000, 0x00008100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x00073029, 0x00480000, + 0x70000000, 0x00000020, 0x24140000, 0x00000020, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xa0000000, 0x00000009, 0x00000000, + 0x00000000, 0x00002100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x00000009, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x70000000, 0x00000000, 0x20140000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x70000000, 0x00000000, 0x20140000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x38430000, + 0x70000006, 0x00000020, 0x24140000, 0x00000020, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x98cb1180, 0x6e36d469, + 0x9d8eb91c, 0x87e3ef47, 0xa2931288, 0x08405c5a, + 0x73865086, 0x00000080, 0x30140000, 0x00000080, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x000b3849, 0x38430000, + 0x00000006, 0x0000c100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x00133889, 0x08400000, + 0x03865086, 0x4c016100, 0x00000014, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec071_1p_data[] = { + 0x00000000, 0x00000000, 0x00113d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7029b00, 0x00000000, + 0x00000000, 0x43000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x51e00000, 0x00000c9c, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00293d00, 0x00000000, + 0x00000000, 0x00000000, 0x67089b00, 0x00000002, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x80000000, 0x00000000, 0xb1e00000, 0x0000189c, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00213d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7069b00, 0x00000001, + 0x00000000, 0x43000000, 0x014b0c70, 0x00000000, + 0x00000000, 0x00000000, 0x92600000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00213d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7069b00, 0x00000001, + 0x00000000, 0x43000000, 0x015b0c70, 0x00000000, + 0x00000000, 0x00000000, 0x92600000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00553d00, 0x00000000, + 0x00000000, 0x00000000, 0xe6d29a00, 0x000149c4, + 0x00000000, 0x4b000000, 0x00000004, 0x00000000, + 0x80000000, 0x00022200, 0x62600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00553d00, 0x00000000, + 0x00000000, 0x00000000, 0xe6d2c000, 0x000149c4, + 0x00000000, 0x5b000000, 0x00000004, 0x00000000, + 0x80000000, 0x00022200, 0x62600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x006d3d00, 0x00000000, + 0x00000000, 0x00000000, 0x64d49200, 0x5e556945, + 0xc666d89a, 0x4b0001a9, 0x00004c84, 0x00000000, + 0x80000000, 0x00022200, 0xc2600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x006d3d00, 0x00000000, + 0x00000000, 0x00000000, 0x6ed4ba00, 0x5ef56bc5, + 0xc666d8c0, 0x5b0001a9, 0x00004dc4, 0x00000000, + 0x80000000, 0x00022200, 0xc2600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000002, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00700000, 0x00000000, 0x08028000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec046_2p_data[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xa0000000, 0x00077c2b, 0x005c0000, + 0x00000000, 0x00008100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x00073029, 0x00480000, + 0x00000000, 0x00008100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x00073029, 0x00480000, + 0x70000000, 0x00000020, 0x04140000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xa0000000, 0x00000009, 0x00000000, + 0x00000000, 0x00002100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x00000009, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x70000000, 0x00000000, 0x00140000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x70000000, 0x00000000, 0x00140000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x38430000, + 0x70000006, 0x00000020, 0x04140000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x98cb1180, 0x6e36d469, + 0x9d8eb91c, 0x87e3ef47, 0xa2931288, 0x08405c5a, + 0x73865086, 0x00000080, 0x10140000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x000b3849, 0x38430000, + 0x00000006, 0x0000c100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x00133889, 0x08400000, + 0x03865086, 0x4c016100, 0x00000014, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec071_2p_data[] = { + 0x00000000, 0x00000000, 0x00113d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7029b00, 0x00000000, + 0x00000000, 0x43000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x51e00000, 0x00000c9c, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00293d00, 0x00000000, + 0x00000000, 0x00000000, 0x67089b00, 0x00000002, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x80000000, 0x00000000, 0xb1e00000, 0x0000189c, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00213d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7069b00, 0x00000001, + 0x00000000, 0x43000000, 0x014b0c70, 0x00000000, + 0x00000000, 0x00000000, 0x92600000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00213d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7069b00, 0x00000001, + 0x00000000, 0x43000000, 0x015b0c70, 0x00000000, + 0x00000000, 0x00000000, 0x92600000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00553d00, 0x00000000, + 0x00000000, 0x00000000, 0xe6d29a00, 0x000149c4, + 0x00000000, 0x4b000000, 0x00000004, 0x00000000, + 0x80000000, 0x00022200, 0x62600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00553d00, 0x00000000, + 0x00000000, 0x00000000, 0xe6d2c000, 0x000149c4, + 0x00000000, 0x5b000000, 0x00000004, 0x00000000, + 0x80000000, 0x00022200, 0x62600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x006d3d00, 0x00000000, + 0x00000000, 0x00000000, 0x64d49200, 0x5e556945, + 0xc666d89a, 0x4b0001a9, 0x00004c84, 0x00000000, + 0x80000000, 0x00022200, 0xc2600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x006d3d00, 0x00000000, + 0x00000000, 0x00000000, 0x6ed4ba00, 0x5ef56bc5, + 0xc666d8c0, 0x5b0001a9, 0x00004dc4, 0x00000000, + 0x80000000, 0x00022200, 0xc2600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000002, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00700000, 0x00000000, 0x00028000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec006_data[] = { + 0x81008100, 0x00000001, 0x88a88100, 0x00000001, + 0x810088a8, 0x00000001, 0x88a888a8, 0x00000001, + 0x81000000, 0x00000001, 0x88a80000, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08004000, 0x00000001, 0x86dd6000, 0x00000001, + 0x81000000, 0x00000001, 0x88a80000, 0x00000001, + 0x08060000, 0x00000001, 0x80350000, 0x00000001, + 0x88080000, 0x00000001, 0x88f70000, 0x00000001, + 0x88cc0000, 0x00000001, 0x88090000, 0x00000001, + 0x89150000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x11006000, 0x00000001, 0x06006000, 0x00000001, + 0x02006000, 0x00000001, 0x3a006000, 0x00000001, + 0x2f006000, 0x00000001, 0x84006000, 0x00000001, + 0x32006000, 0x00000001, 0x2c006000, 0x00000001, + 0x3c006000, 0x00000001, 0x2b006000, 0x00000001, + 0x00006000, 0x00000001, 0x00004000, 0x00000001, + 0x00004000, 0x00000001, 0x20004000, 0x00000001, + 0x40004000, 0x00000001, 0x00000000, 0x00000001, + 0x11000000, 0x00000001, 0x06000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x2f000000, 0x00000001, 0x84000000, 0x00000001, + 0x32000000, 0x00000001, 0x2c000000, 0x00000001, + 0x2b000000, 0x00000001, 0x3c000000, 0x00000001, + 0x3b000000, 0x00000001, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x11000000, 0x00000001, 0x06000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x2f000000, 0x00000001, 0x84000000, 0x00000001, + 0x32000000, 0x00000001, 0x00000000, 0x00000000, + 0x2c000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x2b000000, 0x00000001, 0x3c000000, 0x00000001, + 0x3b000000, 0x00000001, 0x00000000, 0x00000001, + 0x06001072, 0x00000001, 0x06000000, 0x00000001, + 0x110017c1, 0x00000001, 0x110012b7, 0x00000001, + 0x110012b5, 0x00000001, 0x01000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x11000043, 0x00000001, 0x11000044, 0x00000001, + 0x11000222, 0x00000001, 0x11000000, 0x00000001, + 0x2f006558, 0x00000001, 0x32000000, 0x00000001, + 0x84000000, 0x00000001, 0x00000000, 0x00000001, + 0x65582000, 0x00000001, 0x65583000, 0x00000001, + 0x6558a000, 0x00000001, 0x6558b000, 0x00000001, + 0x65580000, 0x00000001, 0x12b50000, 0x00000001, + 0x02000102, 0x00000001, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x65580000, 0x00000001, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x81008100, 0x00000001, 0x88a88100, 0x00000001, + 0x810088a8, 0x00000001, 0x88a888a8, 0x00000001, + 0x81000000, 0x00000001, 0x88a80000, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08004000, 0x00000001, 0x86dd6000, 0x00000001, + 0x81000000, 0x00000001, 0x88a80000, 0x00000001, + 0x08060000, 0x00000001, 0x80350000, 0x00000001, + 0x88080000, 0x00000001, 0x88f70000, 0x00000001, + 0x88cc0000, 0x00000001, 0x88090000, 0x00000001, + 0x89150000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x11006000, 0x00000001, 0x06006000, 0x00000001, + 0x02006000, 0x00000001, 0x3a006000, 0x00000001, + 0x2f006000, 0x00000001, 0x84006000, 0x00000001, + 0x32006000, 0x00000001, 0x2c006000, 0x00000001, + 0x3c006000, 0x00000001, 0x2b006000, 0x00000001, + 0x00006000, 0x00000001, 0x00004000, 0x00000001, + 0x00004000, 0x00000001, 0x20004000, 0x00000001, + 0x40004000, 0x00000001, 0x00000000, 0x00000001, + 0x11000000, 0x00000001, 0x06000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x2f000000, 0x00000001, 0x84000000, 0x00000001, + 0x32000000, 0x00000001, 0x2c000000, 0x00000001, + 0x2b000000, 0x00000001, 0x3c000000, 0x00000001, + 0x3b000000, 0x00000001, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x11000000, 0x00000001, 0x06000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x2f000000, 0x00000001, 0x84000000, 0x00000001, + 0x32000000, 0x00000001, 0x00000000, 0x00000000, + 0x2c000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x2b000000, 0x00000001, 0x3c000000, 0x00000001, + 0x3b000000, 0x00000001, 0x00000000, 0x00000001, + 0x06001072, 0x00000001, 0x06000000, 0x00000001, + 0x110012b7, 0x00000001, 0x01000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x32000000, 0x00000001, 0x84000000, 0x00000001, + 0x11000043, 0x00000001, 0x11000044, 0x00000001, + 0x11000222, 0x00000001, 0x11000000, 0x00000001, + 0x2f006558, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec007_data[] = { + 0x10001000, 0x00001000, 0x10000000, 0x00000000, + 0x1000ffff, 0x0000ffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000fff, 0x00000fff, 0x1000ffff, 0x0000ffff, + 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, + 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, + 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, + 0x00ff0fff, 0x10ff0fff, 0xffff0fff, 0x00000fff, + 0x1fff0fff, 0x1fff0fff, 0x1fff0fff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0x00ff0000, 0x00ffffff, 0x00ff0000, 0x00ff0000, + 0x00ff0000, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ff0000, 0x00ff0000, 0x00ff0001, 0x00ffffff, + 0x00ff0000, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0x00000fff, 0x00000fff, 0x00000fff, 0x00000fff, + 0x00000fff, 0x0000ffff, 0xc0ff0000, 0xc0ffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x0000ffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x10001000, 0x00001000, 0x10000000, 0x00000000, + 0x1000ffff, 0x0000ffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000fff, 0x00000fff, 0x1000ffff, 0x0000ffff, + 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, + 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, + 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, + 0x00ff0fff, 0x10ff0fff, 0xffff0fff, 0x00000fff, + 0x1fff0fff, 0x1fff0fff, 0x1fff0fff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0x00ff0000, 0x00ffffff, 0x00ff0000, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ff0000, 0x00ff0000, 0x00ff0001, 0x00ffffff, + 0x00ff0000, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, +}; + +static u32 nbl_sec008_data[] = { + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00800090, 0x12009092, 0x00000100, 0x00000000, + 0x00800090, 0x12009092, 0x00000100, 0x00000000, + 0x00800000, 0x0e008c8e, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08909581, 0x00008680, 0x00000200, 0x00000000, + 0x10900082, 0x28008680, 0x00000200, 0x00000000, + 0x809b0093, 0x00000000, 0x00000100, 0x00000000, + 0x809b0093, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b0000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x009b0000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000200, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000200, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000200, 0x00000000, + 0x40000000, 0x01c180c2, 0x00000300, 0x00000000, + 0x00000000, 0x00a089c2, 0x000005f0, 0x00000000, + 0x000b0085, 0x00a00000, 0x000002f0, 0x00000000, + 0x000b0085, 0x00a00000, 0x000002f0, 0x00000000, + 0x00000000, 0x00a089c2, 0x000005f0, 0x00000000, + 0x000b0000, 0x00000000, 0x00000200, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000300, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000300, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000300, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000300, 0x00000000, + 0x40000000, 0x01c180c2, 0x00000400, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000400, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x01ab0083, 0x0ca00000, 0x0000050f, 0x00000000, + 0x01ab0083, 0x0ca00000, 0x0000050f, 0x00000000, + 0x02a00084, 0x08008890, 0x00000600, 0x00000000, + 0x02ab848a, 0x08000000, 0x00000500, 0x00000000, + 0x02a00084, 0x10008200, 0x00000600, 0x00000000, + 0x00ab8f8e, 0x04000000, 0x00000500, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000500, 0x00000000, + 0x00ab8f8e, 0x04000000, 0x00000500, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000500, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000500, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000500, 0x00000000, + 0x02ab0084, 0x08000000, 0x00000500, 0x00000000, + 0x00a00000, 0x04008280, 0x00000600, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000500, 0x00000000, + 0x04ab8e84, 0x0c000000, 0x00000500, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000500, 0x00000000, + 0x00000000, 0x0400ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x0800ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x0800ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x0c00ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x0000ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x0000ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x10008200, 0x00000700, 0x00000000, + 0x00000000, 0x08008200, 0x00000700, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x0000ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x0000ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00808786, 0x16009496, 0x00000900, 0x00000000, + 0x00808786, 0x16009496, 0x00000900, 0x00000000, + 0x00808786, 0x16009496, 0x00000900, 0x00000000, + 0x00808786, 0x16009496, 0x00000900, 0x00000000, + 0x00800086, 0x12009092, 0x00000900, 0x00000000, + 0x00800086, 0x12009092, 0x00000900, 0x00000000, + 0x00800000, 0x0e008c8e, 0x00000900, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08908192, 0x00008680, 0x00000a00, 0x00000000, + 0x10908292, 0x28008680, 0x00000a00, 0x00000000, + 0x809b9392, 0x00000000, 0x00000900, 0x00000000, + 0x809b9392, 0x00000000, 0x00000900, 0x00000000, + 0x009b8f92, 0x00000000, 0x00000900, 0x00000000, + 0x009b8f92, 0x00000000, 0x00000900, 0x00000000, + 0x009b8f92, 0x00000000, 0x00000900, 0x00000000, + 0x009b8f92, 0x00000000, 0x00000900, 0x00000000, + 0x009b8f92, 0x00000000, 0x00000900, 0x00000000, + 0x009b8f92, 0x00000000, 0x00000900, 0x00000000, + 0x009b0092, 0x00000000, 0x00000900, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x009b0092, 0x00000000, 0x00000900, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000a00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000a00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000a00, 0x00000000, + 0x40000000, 0x01c180c2, 0x00000b00, 0x00000000, + 0x00000000, 0x00a089c2, 0x00000df0, 0x00000000, + 0x000b0085, 0x00a00000, 0x00000af0, 0x00000000, + 0x000b0085, 0x00a00000, 0x00000af0, 0x00000000, + 0x00000000, 0x00a089c2, 0x00000df0, 0x00000000, + 0x000b0000, 0x00000000, 0x00000a00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000b00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000b00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000b00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000b00, 0x00000000, + 0x40000000, 0x01c180c2, 0x00000c00, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000c00, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000c00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000c00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000c00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000c00, 0x00000000, + 0x01ab0083, 0x0ca00000, 0x00000d0f, 0x00000000, + 0x01ab0083, 0x0ca00000, 0x00000d0f, 0x00000000, + 0x02ab8a84, 0x08000000, 0x00000d00, 0x00000000, + 0x00ab8f8e, 0x04000000, 0x00000d00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000d00, 0x00000000, + 0x00ab8f8e, 0x04000000, 0x00000d00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000d00, 0x00000000, + 0x04ab8e84, 0x0c000000, 0x00000d00, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000d00, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000d00, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000d00, 0x00000000, + 0x02ab0084, 0x08000000, 0x00000d00, 0x00000000, + 0x00ab0000, 0x04000000, 0x00000d00, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000d00, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec009_data[] = { + 0x00000000, 0x00000060, 0x00000000, 0x00000090, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000050, 0x00000000, 0x000000a0, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x000000a0, 0x00000000, 0x00000050, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000800, 0x00000000, 0x00000700, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000900, 0x00000000, 0x00000600, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00008000, 0x00000000, 0x00007000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00009000, 0x00000000, 0x00006000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x0000a000, 0x00000000, 0x00005000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x000c0000, 0x00000000, 0x00030000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x000d0000, 0x00000000, 0x00020000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x000e0000, 0x00000000, 0x00010000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000040, 0x00000000, 0x000000b0, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000070, 0x00000000, 0x00000080, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000090, 0x00000000, 0x00000060, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000080, 0x00000000, 0x00000070, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000700, 0x00000000, 0x00000800, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00007000, 0x00000000, 0x00008000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00080000, 0x00000000, 0x00070000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000c00, 0x00000000, 0x00000300, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000d00, 0x00000000, 0x00000200, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00600000, 0x00000000, 0x00900000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00d00000, 0x00000000, 0x00200000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00500000, 0x00000000, 0x00a00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00700000, 0x00000000, 0x00800000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00e00000, 0x00000000, 0x00100000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00f00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00f00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00100000, 0x00000000, 0x00e00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00300000, 0x00000000, 0x00c00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00800000, 0x00000000, 0x00700000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00900000, 0x00000000, 0x00600000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00a00000, 0x00000000, 0x00500000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00b00000, 0x00000000, 0x00400000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000060, 0x00400000, 0x00000090, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000050, 0x00400000, 0x000000a0, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000000a0, 0x00400000, 0x00000050, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000800, 0x00400000, 0x00000700, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000900, 0x00400000, 0x00000600, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00008000, 0x00400000, 0x00007000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00009000, 0x00400000, 0x00006000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x0000a000, 0x00400000, 0x00005000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000c0000, 0x00400000, 0x00030000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000d0000, 0x00400000, 0x00020000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000e0000, 0x00400000, 0x00010000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000070, 0x00400000, 0x00000080, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000700, 0x00400000, 0x00000800, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00007000, 0x00400000, 0x00008000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00080000, 0x00400000, 0x00070000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000c00, 0x00400000, 0x00000300, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000d00, 0x00400000, 0x00000200, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x00400000, 0x000000b0, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000090, 0x00400000, 0x00000060, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000080, 0x00400000, 0x00000070, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000060, 0x06000000, 0x00000090, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000060, 0x07000000, 0x00000090, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000050, 0x06000000, 0x000000a0, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000050, 0x07000000, 0x000000a0, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000000a0, 0x06000000, 0x00000050, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000000a0, 0x07000000, 0x00000050, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000800, 0x06000000, 0x00000700, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000900, 0x06000000, 0x00000600, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00008000, 0x06000000, 0x00007000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00009000, 0x06000000, 0x00006000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x0000a000, 0x06000000, 0x00005000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000c0000, 0x06000000, 0x00030000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000d0000, 0x06000000, 0x00020000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000e0000, 0x06000000, 0x00010000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000800, 0x07000000, 0x00000700, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000900, 0x07000000, 0x00000600, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00008000, 0x07000000, 0x00007000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00009000, 0x07000000, 0x00006000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x0000a000, 0x07000000, 0x00005000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000c0000, 0x07000000, 0x00030000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000d0000, 0x07000000, 0x00020000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000e0000, 0x07000000, 0x00010000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000070, 0x06000000, 0x00000080, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000070, 0x07000000, 0x00000080, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000700, 0x06000000, 0x00000800, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00007000, 0x06000000, 0x00008000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00080000, 0x06000000, 0x00070000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000c00, 0x06000000, 0x00000300, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000d00, 0x06000000, 0x00000200, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000700, 0x07000000, 0x00000800, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00007000, 0x07000000, 0x00008000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00080000, 0x07000000, 0x00070000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000c00, 0x07000000, 0x00000300, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000d00, 0x07000000, 0x00000200, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x06000000, 0x000000b0, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x07000000, 0x000000b0, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000090, 0x06000000, 0x00000060, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000090, 0x07000000, 0x00000060, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000080, 0x06000000, 0x00000070, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000080, 0x07000000, 0x00000070, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000060, 0x00c00000, 0x00000090, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000050, 0x00c00000, 0x000000a0, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000000a0, 0x00c00000, 0x00000050, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000800, 0x00c00000, 0x00000700, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000900, 0x00c00000, 0x00000600, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00008000, 0x00c00000, 0x00007000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00009000, 0x00c00000, 0x00006000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x0000a000, 0x00c00000, 0x00005000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000c0000, 0x00c00000, 0x00030000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000d0000, 0x00c00000, 0x00020000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000e0000, 0x00c00000, 0x00010000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000070, 0x00c00000, 0x00000080, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000700, 0x00c00000, 0x00000800, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00007000, 0x00c00000, 0x00008000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00080000, 0x00c00000, 0x00070000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000c00, 0x00c00000, 0x00000300, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000d00, 0x00c00000, 0x00000200, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x00c00000, 0x000000b0, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000090, 0x00c00000, 0x00000060, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000080, 0x00c00000, 0x00000070, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00400000, 0x00400000, 0x00b00000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00600000, 0x00400000, 0x00900000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00300000, 0x00400000, 0x00c00000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00500000, 0x00400000, 0x00a00000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00700000, 0x00400000, 0x00800000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00200000, 0x00400000, 0x00d00000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00800000, 0x00400000, 0x00700000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00900000, 0x00400000, 0x00600000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00a00000, 0x00400000, 0x00500000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00b00000, 0x00400000, 0x00400000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00400000, 0x00f00000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00400000, 0x00f00000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00100000, 0x00400000, 0x00e00000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00400000, 0x06000000, 0x00b00000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00400000, 0x07000000, 0x00b00000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00600000, 0x06000000, 0x00900000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00600000, 0x07000000, 0x00900000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00300000, 0x06000000, 0x00c00000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00300000, 0x07000000, 0x00c00000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00500000, 0x06000000, 0x00a00000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00500000, 0x07000000, 0x00a00000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00700000, 0x06000000, 0x00800000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00700000, 0x07000000, 0x00800000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00200000, 0x06000000, 0x00d00000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00200000, 0x07000000, 0x00d00000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00800000, 0x06000000, 0x00700000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00900000, 0x06000000, 0x00600000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00a00000, 0x06000000, 0x00500000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00b00000, 0x06000000, 0x00400000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00800000, 0x07000000, 0x00700000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00900000, 0x07000000, 0x00600000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00a00000, 0x07000000, 0x00500000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00b00000, 0x07000000, 0x00400000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x06000000, 0x00f00000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x07000000, 0x00f00000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x06000000, 0x00f00000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00100000, 0x06000000, 0x00e00000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x07000000, 0x00f00000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00100000, 0x07000000, 0x00e00000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00400000, 0x00c00000, 0x00b00000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00600000, 0x00c00000, 0x00900000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00300000, 0x00c00000, 0x00c00000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00500000, 0x00c00000, 0x00a00000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00700000, 0x00c00000, 0x00800000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00200000, 0x00c00000, 0x00d00000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00800000, 0x00c00000, 0x00700000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00900000, 0x00c00000, 0x00600000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00a00000, 0x00c00000, 0x00500000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00b00000, 0x00c00000, 0x00400000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00c00000, 0x00f00000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00c00000, 0x00f00000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00100000, 0x00c00000, 0x00e00000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000f0000, 0x00400000, 0x00000000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00f00000, 0x00400000, 0x00000000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000f0000, 0x06000000, 0x00000000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00f00000, 0x06000000, 0x00000000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000f0000, 0x07000000, 0x00000000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00f00000, 0x07000000, 0x00000000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000f0000, 0x00c00000, 0x00000000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00f00000, 0x00c00000, 0x00000000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x000f0000, 0x00000000, 0x00000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00f00000, 0x00000000, 0x00000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec010_data[] = { + 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, + 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, + 0x0000000a, 0x0000000a, 0x0000000a, 0x00000000, + 0x0000000b, 0x00000008, 0x00000009, 0x0000000f, + 0x0000000f, 0x0000000f, 0x0000000f, 0x0000000f, + 0x0000000c, 0x0000000d, 0x00000001, 0x00000001, + 0x0000000e, 0x00000005, 0x00000002, 0x00000002, + 0x00000004, 0x00000003, 0x00000003, 0x00000003, + 0x00000003, 0x00000040, 0x00000040, 0x00000040, + 0x00000040, 0x00000040, 0x00000040, 0x00000040, + 0x00000040, 0x00000040, 0x00000040, 0x00000040, + 0x00000045, 0x00000044, 0x00000044, 0x00000044, + 0x00000044, 0x00000044, 0x00000041, 0x00000042, + 0x00000043, 0x00000046, 0x00000046, 0x00000046, + 0x00000046, 0x00000046, 0x00000046, 0x00000046, + 0x00000046, 0x00000046, 0x00000046, 0x00000046, + 0x00000046, 0x00000046, 0x00000046, 0x00000046, + 0x00000046, 0x00000046, 0x00000046, 0x00000046, + 0x00000046, 0x00000046, 0x00000046, 0x0000004b, + 0x0000004b, 0x0000004a, 0x0000004a, 0x0000004a, + 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a, + 0x0000004a, 0x0000004a, 0x0000004a, 0x00000047, + 0x00000047, 0x00000048, 0x00000048, 0x00000049, + 0x00000049, 0x0000004c, 0x0000004c, 0x0000004c, + 0x0000004c, 0x0000004c, 0x0000004c, 0x0000004c, + 0x0000004c, 0x0000004c, 0x0000004c, 0x0000004c, + 0x00000051, 0x00000050, 0x00000050, 0x00000050, + 0x00000050, 0x00000050, 0x0000004d, 0x0000004e, + 0x0000004f, 0x00000052, 0x00000053, 0x00000054, + 0x00000054, 0x00000055, 0x00000056, 0x00000057, + 0x00000057, 0x00000057, 0x00000057, 0x00000058, + 0x00000059, 0x00000059, 0x0000005a, 0x0000005a, + 0x0000005b, 0x0000005b, 0x0000005c, 0x0000005c, + 0x0000005c, 0x0000005c, 0x0000005d, 0x0000005d, + 0x0000005e, 0x0000005e, 0x0000005f, 0x0000005f, + 0x0000005f, 0x0000005f, 0x0000005f, 0x0000005f, + 0x0000005f, 0x0000005f, 0x00000060, 0x00000060, + 0x00000061, 0x00000061, 0x00000061, 0x00000061, + 0x00000062, 0x00000063, 0x00000064, 0x00000064, + 0x00000065, 0x00000066, 0x00000067, 0x00000067, + 0x00000067, 0x00000067, 0x00000068, 0x00000069, + 0x00000069, 0x00000040, 0x00000040, 0x00000046, + 0x00000046, 0x00000046, 0x00000046, 0x0000004c, + 0x0000004c, 0x0000000a, 0x0000000a, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec011_data[] = { + 0x0008002c, 0x00080234, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080230, + 0x00080332, 0x0008063c, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0008002c, 0x00080234, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080230, + 0x00080332, 0x00080738, 0x0008083c, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0008002c, 0x00080234, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080230, + 0x00080332, 0x00080738, 0x0008093a, 0x00080a3c, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080634, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080730, 0x00080834, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080730, 0x00080932, 0x00080a34, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00090200, 0x00090304, 0x00090408, 0x0009050c, + 0x00090610, 0x00090714, 0x00090818, 0x0009121c, + 0x0009131e, 0x00000000, 0x00000000, 0x00000000, + 0x00090644, 0x00000000, 0x000d8045, 0x000d4145, + 0x0009030c, 0x0009041c, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00090145, 0x00090944, 0x00000000, 0x00000000, + 0x0009061c, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x0009033a, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00090200, 0x00090304, 0x00090408, 0x0009050c, + 0x00090610, 0x00090714, 0x00090818, 0x0009121c, + 0x0009131e, 0x00000000, 0x00000000, 0x00000000, + 0x0009063d, 0x00090740, 0x000d803f, 0x000d413f, + 0x0009030c, 0x0009041c, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0009013f, 0x00090840, 0x000dc93d, 0x000d093d, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a0324, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a003e, + 0x000a0140, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a0324, 0x000a0520, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a003e, + 0x000a0140, 0x000a0842, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a0124, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a0224, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a003c, 0x000a0037, 0x000ec139, 0x000e0139, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x000a0742, 0x00000000, 0x00000000, + 0x000a0d41, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x00000000, 0x00000000, 0x00000000, + 0x000a0d3e, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a0037, 0x000a0139, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080634, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080730, 0x00080834, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080730, 0x00080932, 0x00080a34, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0009061c, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x0009033a, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00090200, 0x00090304, 0x00090408, 0x0009050c, + 0x00090610, 0x00090714, 0x00090818, 0x0009121c, + 0x0009131e, 0x00000000, 0x00000000, 0x00000000, + 0x0009063d, 0x00090740, 0x000d803f, 0x000d413f, + 0x0009030c, 0x0009041c, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0009013f, 0x00090840, 0x000dc93d, 0x000d093d, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a003c, 0x000a0037, 0x000ec139, 0x000e0139, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x000a0742, 0x00000000, 0x00000000, + 0x000a0d41, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x00000000, 0x00000000, 0x00000000, + 0x000a0d3e, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a0037, 0x000a0139, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec012_data[] = { + 0x00000006, 0x00000001, 0x00000004, 0x00000001, + 0x00000006, 0x00000001, 0x00000000, 0x00000001, + 0x00000004, 0x00000001, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000010, 0x00000001, 0x00000000, 0x00000001, + 0x00000040, 0x00000001, 0x00000010, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x06200000, 0x00000001, 0x00c00000, 0x00000001, + 0x02c00000, 0x00000001, 0x00200000, 0x00000001, + 0x00400000, 0x00000001, 0x00700000, 0x00000001, + 0x00300000, 0x00000001, 0x00000000, 0x00000001, + 0x00a00000, 0x00000001, 0x00b00000, 0x00000001, + 0x00e00000, 0x00000001, 0x00500000, 0x00000001, + 0x00800000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000004, 0x00000001, 0x00000000, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x00000001, 0x00000010, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00500000, 0x00000001, 0x00700000, 0x00000001, + 0x00a00000, 0x00000001, 0x00b00000, 0x00000001, + 0x00200000, 0x00000001, 0x00000000, 0x00000001, + 0x00300000, 0x00000001, 0x00800000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec013_data[] = { + 0xf7fffff0, 0xf7fffff1, 0xfffffff0, 0xf7fffff3, + 0xfffffff1, 0xfffffff3, 0xffffffff, 0xffffffff, + 0xf7ffff0f, 0xf7ffff0f, 0xffffff0f, 0xffffff0f, + 0xffffff0f, 0xffffffff, 0xffffffff, 0xffffffff, + 0x100fffff, 0xf10fffff, 0xf10fffff, 0xf70fffff, + 0xf70fffff, 0xff0fffff, 0xff0fffff, 0xff1fffff, + 0xff0fffff, 0xff0fffff, 0xff0fffff, 0xff0fffff, + 0xff1fffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xfffffff1, 0xfffffff3, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffff0f, 0xffffff0f, 0xffffff0f, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xff0fffff, 0xff0fffff, 0xff0fffff, 0xff0fffff, + 0xff0fffff, 0xff1fffff, 0xff0fffff, 0xff1fffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, +}; + +static u32 nbl_sec014_data[] = { + 0x00000000, 0x00000001, 0x00000003, 0x00000002, + 0x00000004, 0x00000005, 0x00000000, 0x00000000, + 0x00000000, 0x00000001, 0x00000002, 0x00000003, + 0x00000004, 0x00000000, 0x00000000, 0x00000000, + 0x00000001, 0x00000002, 0x00000003, 0x00000000, + 0x00000000, 0x00000004, 0x00000005, 0x00000006, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000001, 0x00000002, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000001, 0x00000002, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000001, 0x00000001, 0x00000001, + 0x00000002, 0x00000003, 0x00000004, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec022_data[] = { + 0x81008100, 0x00000001, 0x88a88100, 0x00000001, + 0x810088a8, 0x00000001, 0x88a888a8, 0x00000001, + 0x81000000, 0x00000001, 0x88a80000, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08004000, 0x00000001, 0x86dd6000, 0x00000001, + 0x81000000, 0x00000001, 0x88a80000, 0x00000001, + 0x08060000, 0x00000001, 0x80350000, 0x00000001, + 0x88080000, 0x00000001, 0x88f70000, 0x00000001, + 0x88cc0000, 0x00000001, 0x88090000, 0x00000001, + 0x89150000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x11006000, 0x00000001, 0x06006000, 0x00000001, + 0x02006000, 0x00000001, 0x3a006000, 0x00000001, + 0x2f006000, 0x00000001, 0x84006000, 0x00000001, + 0x32006000, 0x00000001, 0x2c006000, 0x00000001, + 0x3c006000, 0x00000001, 0x2b006000, 0x00000001, + 0x00006000, 0x00000001, 0x00004000, 0x00000001, + 0x00004000, 0x00000001, 0x20004000, 0x00000001, + 0x40004000, 0x00000001, 0x00000000, 0x00000001, + 0x11000000, 0x00000001, 0x06000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x2f000000, 0x00000001, 0x84000000, 0x00000001, + 0x32000000, 0x00000001, 0x2c000000, 0x00000001, + 0x2b000000, 0x00000001, 0x3c000000, 0x00000001, + 0x3b000000, 0x00000001, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x11000000, 0x00000001, 0x06000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x2f000000, 0x00000001, 0x84000000, 0x00000001, + 0x32000000, 0x00000001, 0x00000000, 0x00000000, + 0x2c000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x2b000000, 0x00000001, 0x3c000000, 0x00000001, + 0x3b000000, 0x00000001, 0x00000000, 0x00000001, + 0x06001072, 0x00000001, 0x06000000, 0x00000001, + 0x110012b7, 0x00000001, 0x01000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x32000000, 0x00000001, 0x84000000, 0x00000001, + 0x11000043, 0x00000001, 0x11000044, 0x00000001, + 0x11000222, 0x00000001, 0x11000000, 0x00000001, + 0x2f006558, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec023_data[] = { + 0x10001000, 0x00001000, 0x10000000, 0x00000000, + 0x1000ffff, 0x0000ffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000fff, 0x00000fff, 0x1000ffff, 0x0000ffff, + 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, + 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, + 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, + 0x00ff0fff, 0x10ff0fff, 0xffff0fff, 0x00000fff, + 0x1fff0fff, 0x1fff0fff, 0x1fff0fff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0x00ff0000, 0x00ffffff, 0x00ff0000, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ff0000, 0x00ff0000, 0x00ff0001, 0x00ffffff, + 0x00ff0000, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, +}; + +static u32 nbl_sec024_data[] = { + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00800090, 0x12009092, 0x00000100, 0x00000000, + 0x00800090, 0x12009092, 0x00000100, 0x00000000, + 0x00800000, 0x0e008c8e, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08900081, 0x00008680, 0x00000200, 0x00000000, + 0x10900082, 0x28008680, 0x00000200, 0x00000000, + 0x809b0093, 0x00000000, 0x00000100, 0x00000000, + 0x809b0093, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b0000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x009b0000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000200, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000200, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000200, 0x00000000, + 0x40000000, 0x01c180c2, 0x00000300, 0x00000000, + 0x00000000, 0x00a089c2, 0x000005f0, 0x00000000, + 0x000b0085, 0x00a00000, 0x000002f0, 0x00000000, + 0x000b0085, 0x00a00000, 0x000002f0, 0x00000000, + 0x00000000, 0x00a089c2, 0x000005f0, 0x00000000, + 0x000b0000, 0x00000000, 0x00000200, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000300, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000300, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000300, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000300, 0x00000000, + 0x40000000, 0x01c180c2, 0x00000400, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000400, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x01ab0083, 0x0ca00000, 0x0000050f, 0x00000000, + 0x01ab0083, 0x0ca00000, 0x0000050f, 0x00000000, + 0x02ab848a, 0x08000000, 0x00000500, 0x00000000, + 0x00ab8f8e, 0x04000000, 0x00000500, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000500, 0x00000000, + 0x00ab8f8e, 0x04000000, 0x00000500, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000500, 0x00000000, + 0x04ab8e84, 0x0c000000, 0x00000500, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000500, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000500, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000500, 0x00000000, + 0x02ab0084, 0x08000000, 0x00000500, 0x00000000, + 0x00ab0000, 0x04000000, 0x00000500, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000500, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec025_data[] = { + 0x00000060, 0x00000090, 0x00000001, 0x00000000, + 0x00000050, 0x000000a0, 0x00000001, 0x00000000, + 0x000000a0, 0x00000050, 0x00000001, 0x00000000, + 0x00000800, 0x00000700, 0x00000001, 0x00000000, + 0x00000900, 0x00000600, 0x00000001, 0x00000000, + 0x00008000, 0x00007000, 0x00000001, 0x00000000, + 0x00009000, 0x00006000, 0x00000001, 0x00000000, + 0x0000a000, 0x00005000, 0x00000001, 0x00000000, + 0x000c0000, 0x00030000, 0x00000001, 0x00000000, + 0x000d0000, 0x00020000, 0x00000001, 0x00000000, + 0x000e0000, 0x00010000, 0x00000001, 0x00000000, + 0x00000040, 0x000000b0, 0x00000001, 0x00000000, + 0x00000070, 0x00000080, 0x00000001, 0x00000000, + 0x00000090, 0x00000060, 0x00000001, 0x00000000, + 0x00000080, 0x00000070, 0x00000001, 0x00000000, + 0x00000700, 0x00000800, 0x00000001, 0x00000000, + 0x00007000, 0x00008000, 0x00000001, 0x00000000, + 0x00080000, 0x00070000, 0x00000001, 0x00000000, + 0x00000c00, 0x00000300, 0x00000001, 0x00000000, + 0x00000d00, 0x00000200, 0x00000001, 0x00000000, + 0x00400000, 0x00b00000, 0x00000001, 0x00000000, + 0x00600000, 0x00900000, 0x00000001, 0x00000000, + 0x00300000, 0x00c00000, 0x00000001, 0x00000000, + 0x00500000, 0x00a00000, 0x00000001, 0x00000000, + 0x00700000, 0x00800000, 0x00000001, 0x00000000, + 0x00000000, 0x00f00000, 0x00000001, 0x00000000, + 0x00000000, 0x00f00000, 0x00000001, 0x00000000, + 0x00100000, 0x00e00000, 0x00000001, 0x00000000, + 0x00200000, 0x00d00000, 0x00000001, 0x00000000, + 0x00800000, 0x00700000, 0x00000001, 0x00000000, + 0x00900000, 0x00600000, 0x00000001, 0x00000000, + 0x00a00000, 0x00500000, 0x00000001, 0x00000000, + 0x00b00000, 0x00400000, 0x00000001, 0x00000000, + 0x000f0000, 0x00000000, 0x00000001, 0x00000000, + 0x00f00000, 0x00000000, 0x00000001, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec026_data[] = { + 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, + 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, + 0x0000000a, 0x0000000a, 0x0000000a, 0x00000000, + 0x0000000b, 0x00000008, 0x00000009, 0x0000000f, + 0x0000000f, 0x0000000f, 0x0000000f, 0x0000000f, + 0x0000000c, 0x0000000d, 0x00000001, 0x00000001, + 0x0000000e, 0x00000005, 0x00000002, 0x00000002, + 0x00000004, 0x00000003, 0x00000003, 0x00000003, + 0x00000003, 0x0000000a, 0x0000000a, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec027_data[] = { + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080634, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080730, 0x00080834, 0x0008082e, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080730, 0x00080932, 0x00080a34, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0009061c, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x0009033a, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00090200, 0x00090304, 0x00090408, 0x0009050c, + 0x00090610, 0x00090714, 0x00090818, 0x0009121c, + 0x0009131e, 0x00000000, 0x00000000, 0x00000000, + 0x0009063d, 0x00090740, 0x000d803f, 0x000d413f, + 0x0009030c, 0x0009041c, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0009013f, 0x00090840, 0x000dc93d, 0x000d093d, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a003c, 0x000a0037, 0x000ec139, 0x000e0139, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x000a0742, 0x00000000, 0x00000000, + 0x000a0d41, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x00000000, 0x00000000, 0x00000000, + 0x000a0d3e, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a0037, 0x000a0139, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec028_data[] = { + 0x00000006, 0x00000001, 0x00000004, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x00000001, 0x00000010, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00500000, 0x00000001, 0x00700000, 0x00000001, + 0x00a00000, 0x00000001, 0x00b00000, 0x00000001, + 0x00200000, 0x00000001, 0x00000000, 0x00000001, + 0x00300000, 0x00000001, 0x00800000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec029_data[] = { + 0xfffffff0, 0xfffffff1, 0xfffffff3, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffff0f, 0xffffff0f, 0xffffff0f, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xff0fffff, 0xff0fffff, 0xff0fffff, 0xff0fffff, + 0xff0fffff, 0xff1fffff, 0xff0fffff, 0xff1fffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, +}; + +static u32 nbl_sec030_data[] = { + 0x00000000, 0x00000001, 0x00000002, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000001, 0x00000002, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000001, 0x00000001, 0x00000001, + 0x00000002, 0x00000003, 0x00000004, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec039_data[] = { + 0xfef80000, 0x00000002, 0x000002e0, 0x00000000, + 0xfef8013e, 0x00000002, 0x000002e0, 0x00000000, + 0x6660013e, 0x726e6802, 0x02224e42, 0x00000000, + 0x6660013e, 0x726e6802, 0x02224e42, 0x00000000, + 0x66600000, 0x726e6802, 0x02224e42, 0x00000000, + 0x66600000, 0x726e6802, 0x02224e42, 0x00000000, + 0x66600000, 0x00026802, 0x02224e40, 0x00000000, + 0x66627800, 0x00026802, 0x02224e40, 0x00000000, + 0x66600000, 0x00026a76, 0x02224e40, 0x00000000, + 0x66600000, 0x00026802, 0x00024e40, 0x00000000, + 0x66600000, 0x00026802, 0x00024e40, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec040_data[] = { + 0x0040fb3f, 0x00000001, 0x0440fb3f, 0x00000001, + 0x0502fa00, 0x00000001, 0x0602f900, 0x00000001, + 0x0903e600, 0x00000001, 0x0a03e500, 0x00000001, + 0x1101e600, 0x00000001, 0x1201e500, 0x00000001, + 0x0000ff00, 0x00000001, 0x0008ff07, 0x00000001, + 0x00ffff00, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec046_4p_data[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xa0000000, 0x00077c2b, 0x005c0000, + 0x00000000, 0x00008100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x00073029, 0x00480000, + 0x00000000, 0x00008100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x00073029, 0x00480000, + 0x70000000, 0x00000020, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xa0000000, 0x00000009, 0x00000000, + 0x00000000, 0x00002100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x00000009, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x70000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x70000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x38430000, + 0x70000006, 0x00000020, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x98cb1180, 0x6e36d469, + 0x9d8eb91c, 0x87e3ef47, 0xa2931288, 0x08405c5a, + 0x73865086, 0x00000080, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x000b3849, 0x38430000, + 0x00000006, 0x0000c100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x00133889, 0x08400000, + 0x03865086, 0x4c016100, 0x00000014, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec047_data[] = { + 0x2040dc3f, 0x00000001, 0x2000dcff, 0x00000001, + 0x2200dcff, 0x00000001, 0x0008dc01, 0x00000001, + 0x0001de00, 0x00000001, 0x2900c4ff, 0x00000001, + 0x3100c4ff, 0x00000001, 0x2b00c4ff, 0x00000001, + 0x3300c4ff, 0x00000001, 0x2700d8ff, 0x00000001, + 0x2300d8ff, 0x00000001, 0x2502d800, 0x00000001, + 0x2102d800, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec052_data[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x30000000, 0x000b844c, 0xc8580000, + 0x00000006, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0xb0d3668b, 0xb0555e12, + 0x03b055c6, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0xa64b3449, 0x405a3cc1, + 0x00000006, 0x3d2d3300, 0x00000010, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x26473429, 0x00482cc1, + 0x00000000, 0x00ccd300, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec053_data[] = { + 0x0840f03f, 0x00000001, 0x0040f03f, 0x00000001, + 0x0140fa3f, 0x00000001, 0x0100fa0f, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec058_data[] = { + 0x00000000, 0x00000000, 0x59f89400, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00470000, + 0x00000000, 0x3c000000, 0xa2e40006, 0x00000017, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x19fa1400, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x28440000, + 0x038e5186, 0x3c000000, 0xa8e40012, 0x00000047, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x0001f3d0, 0x00000000, + 0x00000000, 0xb0000000, 0x00133889, 0x38c30000, + 0x0000000a, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x0001f3d0, 0x00000000, + 0x00000000, 0xb0000000, 0x00133889, 0x38c30000, + 0x0000000a, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x000113d0, 0x00000000, + 0x00000000, 0xb0000000, 0x00073829, 0x00430000, + 0x00000000, 0x3c000000, 0x0000000a, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x000293d0, 0x00000000, + 0x00000000, 0xb0000000, 0x00133889, 0x08400000, + 0x03865086, 0x3c000000, 0x00000016, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec059_data[] = { + 0x0200e4ff, 0x00000001, 0x0400e2ff, 0x00000001, + 0x1300ecff, 0x00000001, 0x1500eaff, 0x00000001, + 0x0300e4ff, 0x00000001, 0x0500e2ff, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec062_data[] = { + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec063_data[] = { + 0x0500e2ff, 0x00000001, 0x0900e2ff, 0x00000001, + 0x1900e2ff, 0x00000001, 0x1100e2ff, 0x00000001, + 0x0100e2ff, 0x00000001, 0x0600e1ff, 0x00000001, + 0x0a00e1ff, 0x00000001, 0x1a00e1ff, 0x00000001, + 0x1200e1ff, 0x00000001, 0x0200e1ff, 0x00000001, + 0x0000fcff, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec065_data[] = { + 0x006e120c, 0x006e1210, 0x006e4208, 0x006e4218, + 0x00200b02, 0x00200b00, 0x000e1900, 0x000e1906, + 0x00580208, 0x00580204, 0x004c0208, 0x004c0207, + 0x0002110c, 0x0002110c, 0x0012010c, 0x00100110, + 0x0010010c, 0x000a010c, 0x0008010c, 0x00060000, + 0x00160000, 0x00140000, 0x001e0000, 0x001e0000, + 0x001e0000, 0x001e0000, 0x001e0000, 0x001e0000, + 0x001e0000, 0x001e0000, 0x001e0000, 0x001e0000, +}; + +static u32 nbl_sec066_data[] = { + 0x006e120c, 0x006e1210, 0x006e4208, 0x006e4218, + 0x00200b02, 0x00200b00, 0x000e1900, 0x000e1906, + 0x00580208, 0x00580204, 0x004c0208, 0x004c0207, + 0x0002110c, 0x0002110c, 0x0012010c, 0x00100110, + 0x0010010c, 0x000a010c, 0x0008010c, 0x00060000, + 0x00160000, 0x00140000, 0x001e0000, 0x001e0000, + 0x001e0000, 0x001e0000, 0x001e0000, 0x001e0000, + 0x001e0000, 0x001e0000, 0x001e0000, 0x001e0000, +}; + +static u32 nbl_sec071_4p_data[] = { + 0x00000000, 0x00000000, 0x00113d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7029b00, 0x00000000, + 0x00000000, 0x43000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x51e00000, 0x00000c9c, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00293d00, 0x00000000, + 0x00000000, 0x00000000, 0x67089b00, 0x00000002, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x80000000, 0x00000000, 0xb1e00000, 0x0000189c, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00213d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7069b00, 0x00000001, + 0x00000000, 0x43000000, 0x014b0c70, 0x00000000, + 0x00000000, 0x00000000, 0x92600000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00213d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7069b00, 0x00000001, + 0x00000000, 0x43000000, 0x015b0c70, 0x00000000, + 0x00000000, 0x00000000, 0x92600000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00553d00, 0x00000000, + 0x00000000, 0x00000000, 0xe6d29a00, 0x000149c4, + 0x00000000, 0x4b000000, 0x00000004, 0x00000000, + 0x80000000, 0x00022200, 0x62600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00553d00, 0x00000000, + 0x00000000, 0x00000000, 0xe6d2c000, 0x000149c4, + 0x00000000, 0x5b000000, 0x00000004, 0x00000000, + 0x80000000, 0x00022200, 0x62600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x006d3d00, 0x00000000, + 0x00000000, 0x00000000, 0x64d49200, 0x5e556945, + 0xc666d89a, 0x4b0001a9, 0x00004c84, 0x00000000, + 0x80000000, 0x00022200, 0xc2600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x006d3d00, 0x00000000, + 0x00000000, 0x00000000, 0x6ed4ba00, 0x5ef56bc5, + 0xc666d8c0, 0x5b0001a9, 0x00004dc4, 0x00000000, + 0x80000000, 0x00022200, 0xc2600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000002, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00700000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec072_data[] = { + 0x84006aff, 0x00000001, 0x880066ff, 0x00000001, + 0x140040ff, 0x00000001, 0x70000cff, 0x00000001, + 0x180040ff, 0x00000001, 0x30000cff, 0x00000001, + 0x10004cff, 0x00000001, 0x30004cff, 0x00000001, + 0x0100ecff, 0x00000001, 0x0300ecff, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec116_data[] = { + 0x00000000, 0x00000000, 0x3fff8000, 0x00000007, + 0x3fff8000, 0x00000007, 0x3fff8000, 0x00000007, + 0x3fff8000, 0x00000003, 0x3fff8000, 0x00000003, + 0x3fff8000, 0x00000007, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec124_data[] = { + 0xfffffffc, 0xffffffff, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000500, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x00300010, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000500, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x00300010, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000500, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x00300fff, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000580, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x00301fff, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000580, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x0030ffff, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000580, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x0030ffff, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000580, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x0030ffff, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000580, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x0030ffff, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000580, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000500, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0000fffe, 0x00000000, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000480, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0x00ffffff, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000480, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffe, 0x0000000f, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000580, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec125_data[] = { + 0xfffffffc, 0x01ffffff, 0x00300000, 0x70000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000480, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffe, 0x00000001, 0x00300000, 0x70000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000540, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffe, 0x011003ff, 0x00300000, 0x70000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000005c0, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0x103fffff, 0x00300001, 0x70000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000480, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec126_data[] = { + 0xfffffffc, 0xffffffff, 0x00300001, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000500, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffe, 0x000001ff, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000005c0, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00002013, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000400, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00002013, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000400, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0x01ffffff, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000480, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffe, 0x00000001, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000540, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec137_data[] = { + 0x0000017a, 0x000000f2, 0x00000076, 0x0000017a, + 0x0000017a, 0x00000080, 0x00000024, 0x0000017a, + 0x0000017a, 0x00000191, 0x00000035, 0x0000017a, + 0x0000017a, 0x0000017a, 0x0000017a, 0x0000017a, + 0x0000017a, 0x000000d2, 0x00000066, 0x0000017a, + 0x0000017a, 0x0000017a, 0x0000017a, 0x0000017a, + 0x0000017a, 0x000000f2, 0x00000076, 0x0000017a, + 0x0000017a, 0x0000017a, 0x0000017a, 0x0000017a, +}; + +static u32 nbl_sec138_data[] = { + 0x0000017a, 0x000000f2, 0x00000076, 0x0000017a, + 0x0000017a, 0x00000080, 0x00000024, 0x0000017a, + 0x0000017a, 0x00000191, 0x00000035, 0x0000017a, + 0x0000017a, 0x0000017a, 0x0000017a, 0x0000017a, + 0x0000017a, 0x000000d2, 0x00000066, 0x0000017a, + 0x0000017a, 0x0000017a, 0x0000017a, 0x0000017a, + 0x0000017a, 0x000000f2, 0x00000076, 0x0000017a, + 0x0000017a, 0x0000017a, 0x0000017a, 0x0000017a, +}; + +void nbl_write_all_regs(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + u32 *nbl_sec046_data; + u32 *nbl_sec071_data; + u8 eth_mode = NBL_COMMON_TO_ETH_MODE(common); + u32 i = 0; + + switch (eth_mode) { + case 1: + nbl_sec046_data = nbl_sec046_1p_data; + nbl_sec071_data = nbl_sec071_1p_data; + break; + case 2: + nbl_sec046_data = nbl_sec046_2p_data; + nbl_sec071_data = nbl_sec071_2p_data; + break; + case 4: + nbl_sec046_data = nbl_sec046_4p_data; + nbl_sec071_data = nbl_sec071_4p_data; + break; + default: + nbl_sec046_data = nbl_sec046_2p_data; + nbl_sec071_data = nbl_sec071_2p_data; + } + + for (i = 0; i < NBL_SEC006_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC006_REGI(i), nbl_sec006_data[i]); + } + + for (i = 0; i < NBL_SEC007_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC007_REGI(i), nbl_sec007_data[i]); + + for (i = 0; i < NBL_SEC008_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC008_REGI(i), nbl_sec008_data[i]); + } + + for (i = 0; i < NBL_SEC009_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC009_REGI(i), nbl_sec009_data[i]); + } + + for (i = 0; i < NBL_SEC010_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC010_REGI(i), nbl_sec010_data[i]); + + for (i = 0; i < NBL_SEC011_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC011_REGI(i), nbl_sec011_data[i]); + } + + for (i = 0; i < NBL_SEC012_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC012_REGI(i), nbl_sec012_data[i]); + + for (i = 0; i < NBL_SEC013_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC013_REGI(i), nbl_sec013_data[i]); + + for (i = 0; i < NBL_SEC014_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC014_REGI(i), nbl_sec014_data[i]); + + for (i = 0; i < NBL_SEC022_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC022_REGI(i), nbl_sec022_data[i]); + + for (i = 0; i < NBL_SEC023_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC023_REGI(i), nbl_sec023_data[i]); + + for (i = 0; i < NBL_SEC024_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC024_REGI(i), nbl_sec024_data[i]); + } + + for (i = 0; i < NBL_SEC025_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC025_REGI(i), nbl_sec025_data[i]); + } + + for (i = 0; i < NBL_SEC026_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC026_REGI(i), nbl_sec026_data[i]); + + for (i = 0; i < NBL_SEC027_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC027_REGI(i), nbl_sec027_data[i]); + } + + for (i = 0; i < NBL_SEC028_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC028_REGI(i), nbl_sec028_data[i]); + + for (i = 0; i < NBL_SEC029_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC029_REGI(i), nbl_sec029_data[i]); + + for (i = 0; i < NBL_SEC030_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC030_REGI(i), nbl_sec030_data[i]); + + for (i = 0; i < NBL_SEC039_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC039_REGI(i), nbl_sec039_data[i]); + + for (i = 0; i < NBL_SEC040_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC040_REGI(i), nbl_sec040_data[i]); + + for (i = 0; i < NBL_SEC046_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC046_REGI(i), nbl_sec046_data[i]); + + for (i = 0; i < NBL_SEC047_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC047_REGI(i), nbl_sec047_data[i]); + + for (i = 0; i < NBL_SEC052_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC052_REGI(i), nbl_sec052_data[i]); + + for (i = 0; i < NBL_SEC053_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC053_REGI(i), nbl_sec053_data[i]); + + for (i = 0; i < NBL_SEC058_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC058_REGI(i), nbl_sec058_data[i]); + + for (i = 0; i < NBL_SEC059_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC059_REGI(i), nbl_sec059_data[i]); + + for (i = 0; i < NBL_SEC062_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC062_REGI(i), nbl_sec062_data[i]); + + for (i = 0; i < NBL_SEC063_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC063_REGI(i), nbl_sec063_data[i]); + + for (i = 0; i < NBL_SEC065_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC065_REGI(i), nbl_sec065_data[i]); + + for (i = 0; i < NBL_SEC066_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC066_REGI(i), nbl_sec066_data[i]); + + for (i = 0; i < NBL_SEC071_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC071_REGI(i), nbl_sec071_data[i]); + } + + for (i = 0; i < NBL_SEC072_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC072_REGI(i), nbl_sec072_data[i]); + + for (i = 0; i < NBL_SEC116_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC116_REGI(i), nbl_sec116_data[i]); + + for (i = 0; i < NBL_SEC124_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC124_REGI(i), nbl_sec124_data[i]); + + for (i = 0; i < NBL_SEC125_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC125_REGI(i), nbl_sec125_data[i]); + + for (i = 0; i < NBL_SEC126_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC126_REGI(i), nbl_sec126_data[i]); + + for (i = 0; i < NBL_SEC137_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC137_REGI(i), nbl_sec137_data[i]); + + for (i = 0; i < NBL_SEC138_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC138_REGI(i), nbl_sec138_data[i]); + + nbl_hw_wr32(phy_mgt, NBL_SEC000_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC001_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC002_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC003_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC004_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC005_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC015_ADDR, 0x000f0908); + nbl_hw_wr32(phy_mgt, NBL_SEC016_ADDR, 0x10110607); + nbl_hw_wr32(phy_mgt, NBL_SEC017_ADDR, 0x383a3032); + nbl_hw_wr32(phy_mgt, NBL_SEC018_ADDR, 0x0201453f); + nbl_hw_wr32(phy_mgt, NBL_SEC019_ADDR, 0x00000a41); + nbl_hw_wr32(phy_mgt, NBL_SEC020_ADDR, 0x000000c8); + nbl_hw_wr32(phy_mgt, NBL_SEC021_ADDR, 0x00000400); + nbl_hw_wr32(phy_mgt, NBL_SEC031_ADDR, 0x000f0908); + nbl_hw_wr32(phy_mgt, NBL_SEC032_ADDR, 0x00001011); + nbl_hw_wr32(phy_mgt, NBL_SEC033_ADDR, 0x00003032); + nbl_hw_wr32(phy_mgt, NBL_SEC034_ADDR, 0x0201003f); + nbl_hw_wr32(phy_mgt, NBL_SEC035_ADDR, 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC036_ADDR, 0x00001701); + nbl_hw_wr32(phy_mgt, NBL_SEC037_ADDR, 0x009238a1); + nbl_hw_wr32(phy_mgt, NBL_SEC038_ADDR, 0x0000002e); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(0), 0x00000200); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(1), 0x00000300); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(2), 0x00000105); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(3), 0x00000106); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(4), 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(5), 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(6), 0x00000041); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(7), 0x00000082); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(8), 0x00000020); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(9), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(10), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(11), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(12), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(13), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(14), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(15), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC042_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC043_ADDR, 0x00000002); + nbl_hw_wr32(phy_mgt, NBL_SEC044_ADDR, 0x28212000); + nbl_hw_wr32(phy_mgt, NBL_SEC045_ADDR, 0x00002b29); + nbl_hw_wr32(phy_mgt, NBL_SEC048_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC049_ADDR, 0x00000002); + nbl_hw_wr32(phy_mgt, NBL_SEC050_ADDR, 0x352b2000); + nbl_hw_wr32(phy_mgt, NBL_SEC051_ADDR, 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC054_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC055_ADDR, 0x00000002); + nbl_hw_wr32(phy_mgt, NBL_SEC056_ADDR, 0x2b222100); + nbl_hw_wr32(phy_mgt, NBL_SEC057_ADDR, 0x00000038); + nbl_hw_wr32(phy_mgt, NBL_SEC060_ADDR, 0x24232221); + nbl_hw_wr32(phy_mgt, NBL_SEC061_ADDR, 0x0000002e); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(0), 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(1), 0x00000005); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(2), 0x00000011); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(3), 0x00000005); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(4), 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(5), 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(6), 0x00000006); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(7), 0x00000012); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(8), 0x00000006); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(9), 0x00000002); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(10), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(11), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(12), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(13), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(14), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(15), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC067_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC068_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC069_ADDR, 0x22212000); + nbl_hw_wr32(phy_mgt, NBL_SEC070_ADDR, 0x3835322b); + nbl_hw_wr32(phy_mgt, NBL_SEC073_ADDR, 0x0316a5ff); + nbl_hw_wr32(phy_mgt, NBL_SEC074_ADDR, 0x0316a5ff); + nbl_hw_wr32(phy_mgt, NBL_SEC075_REGI(0), 0x08802080); + nbl_hw_wr32(phy_mgt, NBL_SEC075_REGI(1), 0x12a05080); + nbl_hw_wr32(phy_mgt, NBL_SEC075_REGI(2), 0xffffffff); + nbl_hw_wr32(phy_mgt, NBL_SEC075_REGI(3), 0xffffffff); + nbl_hw_wr32(phy_mgt, NBL_SEC076_REGI(0), 0x08802080); + nbl_hw_wr32(phy_mgt, NBL_SEC076_REGI(1), 0x12a05080); + nbl_hw_wr32(phy_mgt, NBL_SEC076_REGI(2), 0xffffffff); + nbl_hw_wr32(phy_mgt, NBL_SEC076_REGI(3), 0xffffffff); + nbl_hw_wr32(phy_mgt, NBL_SEC077_REGI(0), 0x08802080); + nbl_hw_wr32(phy_mgt, NBL_SEC077_REGI(1), 0x12a05080); + nbl_hw_wr32(phy_mgt, NBL_SEC077_REGI(2), 0xffffffff); + nbl_hw_wr32(phy_mgt, NBL_SEC077_REGI(3), 0xffffffff); + nbl_hw_wr32(phy_mgt, NBL_SEC078_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC079_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC080_ADDR, 0x0014a248); + nbl_hw_wr32(phy_mgt, NBL_SEC081_ADDR, 0x00000d33); + nbl_hw_wr32(phy_mgt, NBL_SEC082_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC083_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC084_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC085_ADDR, 0x000144d2); + nbl_hw_wr32(phy_mgt, NBL_SEC086_ADDR, 0x31322e2f); + nbl_hw_wr32(phy_mgt, NBL_SEC087_ADDR, 0x0a092d2c); + nbl_hw_wr32(phy_mgt, NBL_SEC088_ADDR, 0x33050804); + nbl_hw_wr32(phy_mgt, NBL_SEC089_ADDR, 0x14131535); + nbl_hw_wr32(phy_mgt, NBL_SEC090_ADDR, 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC091_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC092_ADDR, 0x00000008); + nbl_hw_wr32(phy_mgt, NBL_SEC093_ADDR, 0x0000000e); + nbl_hw_wr32(phy_mgt, NBL_SEC094_ADDR, 0x0000000f); + nbl_hw_wr32(phy_mgt, NBL_SEC095_ADDR, 0x00000015); + nbl_hw_wr32(phy_mgt, NBL_SEC096_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC097_ADDR, 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC098_ADDR, 0x00000008); + nbl_hw_wr32(phy_mgt, NBL_SEC099_ADDR, 0x00000011); + nbl_hw_wr32(phy_mgt, NBL_SEC100_ADDR, 0x00000013); + nbl_hw_wr32(phy_mgt, NBL_SEC101_ADDR, 0x00000014); + nbl_hw_wr32(phy_mgt, NBL_SEC102_ADDR, 0x00000010); + nbl_hw_wr32(phy_mgt, NBL_SEC103_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC104_ADDR, 0x0000004d); + nbl_hw_wr32(phy_mgt, NBL_SEC105_ADDR, 0x08020a09); + nbl_hw_wr32(phy_mgt, NBL_SEC106_ADDR, 0x00000005); + nbl_hw_wr32(phy_mgt, NBL_SEC107_ADDR, 0x00000006); + nbl_hw_wr32(phy_mgt, NBL_SEC108_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC109_ADDR, 0x00110a09); + nbl_hw_wr32(phy_mgt, NBL_SEC110_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC111_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC112_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC113_ADDR, 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC114_ADDR, 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC115_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC117_ADDR, 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC118_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC119_REGI(0), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC119_REGI(1), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC119_REGI(2), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC119_REGI(3), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC119_REGI(4), 0x00000100); + nbl_hw_wr32(phy_mgt, NBL_SEC120_ADDR, 0x0000003c); + nbl_hw_wr32(phy_mgt, NBL_SEC121_ADDR, 0x00000003); + nbl_hw_wr32(phy_mgt, NBL_SEC122_ADDR, 0x000000bc); + nbl_hw_wr32(phy_mgt, NBL_SEC123_ADDR, 0x0000023b); + nbl_hw_wr32(phy_mgt, NBL_SEC127_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC128_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC129_ADDR, 0x00000002); + nbl_hw_wr32(phy_mgt, NBL_SEC130_ADDR, 0x00000002); + nbl_hw_wr32(phy_mgt, NBL_SEC131_ADDR, 0x00000003); + nbl_hw_wr32(phy_mgt, NBL_SEC132_ADDR, 0x00000003); + nbl_hw_wr32(phy_mgt, NBL_SEC133_ADDR, 0x00000004); + nbl_hw_wr32(phy_mgt, NBL_SEC134_ADDR, 0x00000004); + nbl_hw_wr32(phy_mgt, NBL_SEC135_ADDR, 0x0000000e); + nbl_hw_wr32(phy_mgt, NBL_SEC136_ADDR, 0x0000000e); +} + diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..6b416b9b1ab58ddecb62aa56efd918fadcaf4471 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_PHY_LEONIS_REGS_H_ +#define _NBL_PHY_LEONIS_REGS_H_ + +void nbl_write_all_regs(void *priv); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c index 92ad9254e5680a9ffb442326ce73de0c0602c7d9..4e934b5ce8861deffc087bb5c802fecc30f054b9 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c @@ -5,6 +5,9 @@ */ #include "nbl_queue_leonis.h" +#include "nbl_resource_leonis.h" + +static int nbl_res_queue_reset_uvn_pkt_drop_stats(void *priv, u16 func_id, u16 global_queue_id); static struct nbl_queue_vsi_info * nbl_res_queue_get_vsi_info(struct nbl_resource_mgt *res_mgt, u16 vsi_id) @@ -26,16 +29,23 @@ nbl_res_queue_get_vsi_info(struct nbl_resource_mgt *res_mgt, u16 vsi_id) static int nbl_res_queue_get_net_id(u16 func_id, u16 vsi_type) { + int net_id; + switch (vsi_type) { case NBL_VSI_DATA: - return func_id; + case NBL_VSI_XDP: + net_id = func_id; + break; case NBL_VSI_USER: - return func_id + NBL_SPECIFIC_VSI_NET_ID_OFFSET; case NBL_VSI_CTRL: - return func_id + NBL_SPECIFIC_VSI_NET_ID_OFFSET; + net_id = func_id + NBL_SPECIFIC_VSI_NET_ID_OFFSET; + break; default: - return func_id; + net_id = func_id; + break; } + + return net_id; } static int nbl_res_queue_setup_queue_info(struct nbl_resource_mgt *res_mgt, u16 func_id, @@ -44,7 +54,9 @@ static int nbl_res_queue_setup_queue_info(struct nbl_resource_mgt *res_mgt, u16 struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_event_queue_update_data event_data; u16 *txrx_queues, *queues_context; + u32 *uvn_stat_pkt_drop; u16 queue_index; int i, ret = 0; @@ -63,9 +75,16 @@ static int nbl_res_queue_setup_queue_info(struct nbl_resource_mgt *res_mgt, u16 goto alloc_queue_contex_fail; } + uvn_stat_pkt_drop = kcalloc(num_queues, sizeof(*uvn_stat_pkt_drop), GFP_ATOMIC); + if (!uvn_stat_pkt_drop) { + ret = -ENOMEM; + goto alloc_uvn_stat_pkt_drop_fail; + } + queue_info->num_txrx_queues = num_queues; queue_info->txrx_queues = txrx_queues; queue_info->queues_context = queues_context; + queue_info->uvn_stat_pkt_drop = uvn_stat_pkt_drop; for (i = 0; i < num_queues; i++) { queue_index = find_first_zero_bit(queue_mgt->txrx_queue_bitmap, NBL_MAX_TXRX_QUEUE); @@ -77,15 +96,24 @@ static int nbl_res_queue_setup_queue_info(struct nbl_resource_mgt *res_mgt, u16 set_bit(queue_index, queue_mgt->txrx_queue_bitmap); } + event_data.func_id = func_id; + event_data.ring_num = num_queues; + event_data.map = txrx_queues; + nbl_event_notify(NBL_EVENT_QUEUE_ALLOC, &event_data, NBL_COMMON_TO_VSI_ID(common), + NBL_COMMON_TO_BOARD_ID(common)); + return 0; get_txrx_queue_fail: + kfree(uvn_stat_pkt_drop); while (--i + 1) { queue_index = txrx_queues[i]; clear_bit(queue_index, queue_mgt->txrx_queue_bitmap); } queue_info->num_txrx_queues = 0; queue_info->txrx_queues = NULL; +alloc_uvn_stat_pkt_drop_fail: + kfree(queues_context); alloc_queue_contex_fail: kfree(txrx_queues); alloc_txrx_queues_fail: @@ -103,8 +131,10 @@ static void nbl_res_queue_remove_queue_info(struct nbl_resource_mgt *res_mgt, u1 kfree(queue_info->txrx_queues); kfree(queue_info->queues_context); + kfree(queue_info->uvn_stat_pkt_drop); queue_info->txrx_queues = NULL; queue_info->queues_context = NULL; + queue_info->uvn_stat_pkt_drop = NULL; queue_info->num_txrx_queues = 0; } @@ -269,25 +299,45 @@ void nbl_res_queue_remove_qid_map_table_leonis(struct nbl_resource_mgt *res_mgt, nbl_res_queue_set_qid_map_table(res_mgt, tail); } -static int nbl_res_queue_get_rss_ret_base(struct nbl_resource_mgt *res_mgt, u16 count, u16 *result) +static int +nbl_res_queue_get_rss_ret_base(struct nbl_resource_mgt *res_mgt, u16 count, u16 rss_entry_size, + struct nbl_queue_vsi_info *vsi_info) { struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + u32 rss_ret_base_start; + u32 rss_ret_base_end; + u16 func_id; + u16 rss_entry_count; u16 index, i, j, k; int success = 1; int ret = -EFAULT; - for (i = 0; i < NBL_EPRO_RSS_RET_TBL_DEPTH;) { + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_info->vsi_id); + if (func_id < NBL_MAX_ETHERNET && + (vsi_info->vsi_index == NBL_VSI_DATA || vsi_info->vsi_index == NBL_VSI_USER)) { + rss_ret_base_start = 0; + rss_ret_base_end = NBL_EPRO_PF_RSS_RET_TBL_DEPTH; + vsi_info->rss_entry_size = NBL_EPRO_PF_RSS_ENTRY_SIZE; + rss_entry_count = NBL_EPRO_PF_RSS_RET_TBL_COUNT; + } else { + rss_ret_base_start = NBL_EPRO_PF_RSS_RET_TBL_DEPTH; + rss_ret_base_end = NBL_EPRO_RSS_RET_TBL_DEPTH; + vsi_info->rss_entry_size = rss_entry_size; + rss_entry_count = count; + } + + for (i = rss_ret_base_start; i < rss_ret_base_end;) { index = find_next_zero_bit(queue_mgt->rss_ret_bitmap, - NBL_EPRO_RSS_RET_TBL_DEPTH, i); - if (index == NBL_EPRO_RSS_RET_TBL_DEPTH) { + rss_ret_base_end, i); + if (index == rss_ret_base_end) { nbl_err(common, NBL_DEBUG_QUEUE, "There is no available rss ret left"); break; } success = 1; - for (j = index + 1; j < (index + count); j++) { - if (j >= NBL_EPRO_RSS_RET_TBL_DEPTH) { + for (j = index + 1; j < (index + rss_entry_count); j++) { + if (j >= rss_ret_base_end) { success = 0; break; } @@ -298,9 +348,9 @@ static int nbl_res_queue_get_rss_ret_base(struct nbl_resource_mgt *res_mgt, u16 } } if (success) { - for (k = index; k < (index + count); k++) + for (k = index; k < (index + rss_entry_count); k++) set_bit(k, queue_mgt->rss_ret_bitmap); - *result = index; + vsi_info->rss_ret_base = index; ret = 0; break; } @@ -326,10 +376,10 @@ static int nbl_res_queue_setup_q2vsi(void *priv, u16 vsi_id) if (!vsi_info) return -ENOENT; - /*config ipro queue tbl*/ + /* config ipro queue tbl */ for (i = vsi_info->queue_offset; - i < vsi_info->queue_offset + vsi_info->queue_num && i < queue_info->num_txrx_queues; - i++) { + i < vsi_info->queue_offset + vsi_info->queue_num && + i < queue_info->num_txrx_queues; i++) { ret = phy_ops->cfg_ipro_queue_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), queue_info->txrx_queues[i], vsi_id, 1); if (ret) { @@ -380,14 +430,14 @@ static int nbl_res_queue_setup_rss(void *priv, u16 vsi_id) rss_entry_size = (vsi_info->queue_num + NBL_EPRO_RSS_ENTRY_SIZE_UNIT - 1) / NBL_EPRO_RSS_ENTRY_SIZE_UNIT; + rss_entry_size = ilog2(roundup_pow_of_two(rss_entry_size)); count = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << rss_entry_size; - ret = nbl_res_queue_get_rss_ret_base(res_mgt, count, &vsi_info->rss_ret_base); + ret = nbl_res_queue_get_rss_ret_base(res_mgt, count, rss_entry_size, vsi_info); if (ret) return -ENOSPC; - vsi_info->rss_entry_size = rss_entry_size; vsi_info->rss_vld = true; return 0; @@ -444,16 +494,46 @@ static void nbl_res_queue_setup_queue_cfg(struct nbl_queue_mgt *queue_mgt, cfg_param->half_offload_en = queue_param->half_offload_en; } +static void nbl_res_queue_update_netid_refnum(struct nbl_queue_mgt *queue_mgt, u16 net_id, bool add) +{ + if (net_id >= NBL_MAX_NET_ID) + return; + + if (add) { + queue_mgt->net_id_ref_vsinum[net_id]++; + } else { + /* probe call clear_queue first, so judge nor zero to support disable dsch more than + * once + */ + if (queue_mgt->net_id_ref_vsinum[net_id]) + queue_mgt->net_id_ref_vsinum[net_id]--; + } +} + +static u16 nbl_res_queue_get_netid_refnum(struct nbl_queue_mgt *queue_mgt, u16 net_id) +{ + if (net_id >= NBL_MAX_NET_ID) + return 0; + + return queue_mgt->net_id_ref_vsinum[net_id]; +} + static void nbl_res_queue_setup_hw_dq(struct nbl_resource_mgt *res_mgt, - struct nbl_queue_cfg_param *queue_cfg, u16 func_id) + struct nbl_queue_cfg_param *queue_cfg, + u16 func_id, u16 vsi_id) { struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_queue_vsi_info *vsi_info; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); struct nbl_vnet_queue_info_param param = {0}; u16 global_queue_id = queue_cfg->global_queue_id; u8 bus, dev, func; + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + nbl_res_func_id_to_bdf(res_mgt, func_id, &bus, &dev, &func); queue_info->split = queue_cfg->split; queue_info->queue_size = queue_cfg->size; @@ -478,11 +558,15 @@ static void nbl_res_queue_setup_hw_dq(struct nbl_resource_mgt *res_mgt, queue_cfg->last_avail_idx); phy_ops->cfg_tx_queue(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), queue_cfg, global_queue_id); + if (nbl_res_queue_get_netid_refnum(queue_mgt, vsi_info->net_id)) + phy_ops->cfg_q2tc_netid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue_id, vsi_info->net_id, 1); } else { phy_ops->set_vnet_queue_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¶m, NBL_PAIR_ID_GET_RX(global_queue_id)); phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + nbl_res_queue_reset_uvn_pkt_drop_stats(res_mgt, func_id, global_queue_id); if (!queue_cfg->extend_header) phy_ops->restore_uvn_context(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id, queue_cfg->split, @@ -492,6 +576,46 @@ static void nbl_res_queue_setup_hw_dq(struct nbl_resource_mgt *res_mgt, } } +static void nbl_res_queue_remove_hw_dq(struct nbl_resource_mgt *res_mgt, + struct nbl_queue_cfg_param *queue_cfg, u16 func_id) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 global_queue_id = queue_cfg->global_queue_id; + int ret = 0; + + if (queue_cfg->tx) { + ret = phy_ops->lso_dsch_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + if (ret) { + pr_err("lso_dsch_drain failed\n"); + return; + } + phy_ops->clear_vnet_queue_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + NBL_PAIR_ID_GET_TX(global_queue_id)); + + phy_ops->disable_dvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + + queue_info->queues_context[NBL_PAIR_ID_GET_TX(global_queue_id)] = + phy_ops->save_dvn_ctx(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue_id, queue_info->split); + phy_ops->reset_dvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + } else { + phy_ops->clear_vnet_queue_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + NBL_PAIR_ID_GET_RX(global_queue_id)); + phy_ops->disable_uvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + phy_ops->rsc_cache_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + + queue_info->queues_context[NBL_PAIR_ID_GET_RX(global_queue_id)] = + phy_ops->save_uvn_ctx(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue_id, queue_info->split, + queue_info->queue_size); + + phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + nbl_res_queue_reset_uvn_pkt_drop_stats(res_mgt, func_id, global_queue_id); + } +} + static void nbl_res_queue_remove_all_hw_dq(struct nbl_resource_mgt *res_mgt, u16 func_id, struct nbl_queue_vsi_info *vsi_info) { @@ -530,6 +654,7 @@ static void nbl_res_queue_remove_all_hw_dq(struct nbl_resource_mgt *res_mgt, u16 for (i = start; i < end; i++) { global_queue = queue_info->txrx_queues[i]; phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + nbl_res_queue_reset_uvn_pkt_drop_stats(res_mgt, func_id, global_queue); phy_ops->reset_dvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); } @@ -582,13 +707,17 @@ static int nbl_res_queue_init_epro_vpt_table(struct nbl_resource_mgt *res_mgt, u struct nbl_sriov_info *sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; int pfid, vfid; u16 vsi_id, vf_vsi_id; + u16 i; - vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_PF_DATA_TYPE); nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); if (sriov_info->bdf != 0) { /* init pf vsi */ - phy_ops->init_epro_vpt_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id); + for (i = NBL_VSI_SERV_PF_DATA_TYPE; i <= NBL_VSI_SERV_PF_XDP_TYPE; i++) { + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, i); + phy_ops->init_epro_vpt_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id); + } for (vfid = 0; vfid < sriov_info->num_vfs; vfid++) { vf_vsi_id = nbl_res_pfvfid_to_vsi_id(res_mgt, pfid, vfid, NBL_VSI_DATA); @@ -602,8 +731,8 @@ static int nbl_res_queue_init_epro_vpt_table(struct nbl_resource_mgt *res_mgt, u return 0; } -static int nbl_res_vsi_init_ipro_dn_sport_tbl(struct nbl_resource_mgt *res_mgt, - u16 func_id, u16 bmode, bool binit) +static int nbl_res_queue_init_ipro_dn_sport_tbl(struct nbl_resource_mgt *res_mgt, + u16 func_id, u16 bmode, bool binit) { struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); @@ -612,7 +741,7 @@ static int nbl_res_vsi_init_ipro_dn_sport_tbl(struct nbl_resource_mgt *res_mgt, u16 eth_id, vsi_id, vf_vsi_id; int i; - vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_PF_DATA_TYPE); nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); if (sriov_info->bdf != 0) { @@ -635,11 +764,11 @@ static int nbl_res_vsi_init_ipro_dn_sport_tbl(struct nbl_resource_mgt *res_mgt, return 0; } -static int nbl_res_vsi_set_bridge_mode(void *priv, u16 func_id, u16 bmode) +static int nbl_res_queue_set_bridge_mode(void *priv, u16 func_id, u16 bmode) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - return nbl_res_vsi_init_ipro_dn_sport_tbl(res_mgt, func_id, bmode, false); + return nbl_res_queue_init_ipro_dn_sport_tbl(res_mgt, func_id, bmode, false); } static int nbl_res_queue_init_rss(struct nbl_resource_mgt *res_mgt, @@ -678,9 +807,15 @@ static void nbl_res_queue_free_txrx_queues(void *priv, u16 vsi_id) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_event_queue_update_data event_data; nbl_res_queue_remove_qid_map_table_leonis(res_mgt, func_id); nbl_res_queue_remove_queue_info(res_mgt, func_id); + event_data.func_id = func_id; + event_data.ring_num = 0; + event_data.map = NULL; + nbl_event_notify(NBL_EVENT_QUEUE_ALLOC, &event_data, NBL_COMMON_TO_VSI_ID(res_mgt->common), + NBL_COMMON_TO_BOARD_ID(res_mgt->common)); } static int nbl_res_queue_setup_queue(void *priv, struct nbl_txrx_queue_param *param, bool is_tx) @@ -691,8 +826,19 @@ static int nbl_res_queue_setup_queue(void *priv, struct nbl_txrx_queue_param *pa nbl_res_queue_setup_queue_cfg(NBL_RES_MGT_TO_QUEUE_MGT(res_mgt), &cfg_param, param, is_tx, func_id); + nbl_res_queue_setup_hw_dq(res_mgt, &cfg_param, func_id, param->vsi_id); + return 0; +} + +static int nbl_res_queue_remove_queue(void *priv, struct nbl_txrx_queue_param *param, bool is_tx) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_cfg_param cfg_param = {0}; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, param->vsi_id); - nbl_res_queue_setup_hw_dq(res_mgt, &cfg_param, func_id); + nbl_res_queue_setup_queue_cfg(NBL_RES_MGT_TO_QUEUE_MGT(res_mgt), + &cfg_param, param, is_tx, func_id); + nbl_res_queue_remove_hw_dq(res_mgt, &cfg_param, func_id); return 0; } @@ -723,12 +869,12 @@ static int nbl_res_queue_register_vsi2q(void *priv, u16 vsi_index, u16 vsi_id, vsi_info = &queue_info->vsi_info[vsi_index]; memset(vsi_info, 0, sizeof(*vsi_info)); - vsi_info->vld = 1; vsi_info->vsi_index = vsi_index; vsi_info->vsi_id = vsi_id; vsi_info->queue_offset = queue_offset; vsi_info->queue_num = queue_num; + vsi_info->net_id = nbl_res_queue_get_net_id(func_id, vsi_info->vsi_index); return 0; } @@ -742,34 +888,44 @@ static int nbl_res_queue_cfg_dsch(void *priv, u16 vsi_id, bool vld) struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); struct nbl_queue_vsi_info *vsi_info; u16 group_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi_id); /* group_id is same with eth_id */ + u16 start = 0, end = 0; int i, ret = 0; vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); if (!vsi_info) return -ENOENT; - vsi_info->net_id = nbl_res_queue_get_net_id(func_id, vsi_info->vsi_index); + start = vsi_info->queue_offset; + end = vsi_info->queue_num + vsi_info->queue_offset; - if (!vld) + /* When setting up, g2p -> n2g -> q2tc; when down, q2tc -> n2g -> g2p */ + if (!vld) { phy_ops->deactive_shaping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id); - - for (i = vsi_info->queue_offset; i < vsi_info->queue_num + vsi_info->queue_offset; i++) { - phy_ops->cfg_q2tc_netid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), - queue_info->txrx_queues[i], vsi_info->net_id, vld); + for (i = start; i < end; i++) + phy_ops->cfg_q2tc_netid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[i], vsi_info->net_id, vld); + nbl_res_queue_update_netid_refnum(queue_mgt, vsi_info->net_id, false); } - ret = phy_ops->cfg_dsch_net_to_group(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + if (!nbl_res_queue_get_netid_refnum(queue_mgt, vsi_info->net_id)) { + ret = phy_ops->cfg_dsch_net_to_group(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id, group_id, vld); - if (ret) - return ret; + if (ret) + return ret; + } - if (vld) + if (vld) { + for (i = start; i < end; i++) + phy_ops->cfg_q2tc_netid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[i], vsi_info->net_id, vld); phy_ops->active_shaping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id); + nbl_res_queue_update_netid_refnum(queue_mgt, vsi_info->net_id, true); + } return 0; } -static int nbl_res_queue_setup_cqs(void *priv, u16 vsi_id, u16 real_qps) +static int nbl_res_queue_setup_cqs(void *priv, u16 vsi_id, u16 real_qps, bool rss_indir_set) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); @@ -788,11 +944,11 @@ static int nbl_res_queue_setup_cqs(void *priv, u16 vsi_id, u16 real_qps) if (real_qps == vsi_info->curr_qps) return 0; - if (real_qps) + if (real_qps && rss_indir_set) phy_ops->cfg_epro_rss_ret(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->rss_ret_base, vsi_info->rss_entry_size, real_qps, - queue_info->txrx_queues + vsi_info->queue_offset); + queue_info->txrx_queues + vsi_info->queue_offset, NULL); if (!vsi_info->curr_qps) phy_ops->set_epro_rss_pt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, @@ -857,7 +1013,7 @@ static int nbl_res_queue_init(void *priv) for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { nbl_res_queue_init_epro_vpt_table(res_mgt, i); - nbl_res_vsi_init_ipro_dn_sport_tbl(res_mgt, i, BRIDGE_MODE_VEB, true); + nbl_res_queue_init_ipro_dn_sport_tbl(res_mgt, i, BRIDGE_MODE_VEB, true); } phy_ops->init_pfc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), NBL_MAX_ETHERNET); @@ -893,6 +1049,168 @@ static int nbl_res_queue_get_queue_err_stats(void *priv, u16 func_id, u8 queue_i return 0; } +static int nbl_res_queue_cfg_qdisc_mqprio(void *priv, struct nbl_tc_qidsc_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info; + struct nbl_queue_vsi_info *vsi_info = NULL; + u64 total_tx_rate = 0, max_rate = 0, max_tc_rate = 0; + u16 func_id, curr_qps = 0, queue_id = 0; + u8 *weight; + bool is_active = false; + int i, j, gravity; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, param->vsi_id); + queue_info = &queue_mgt->queue_info[func_id]; + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, param->vsi_id); + + switch (res_info->board_info.eth_speed) { + case NBL_FW_PORT_SPEED_100G: + max_rate = NBL_RATE_MBPS_100G; + break; + case NBL_FW_PORT_SPEED_25G: + max_rate = NBL_RATE_MBPS_25G; + break; + case NBL_FW_PORT_SPEED_10G: + max_rate = NBL_RATE_MBPS_10G; + break; + default: + return -EOPNOTSUPP; + } + + for (i = 0; i < param->num_tc; i++) + total_tx_rate += param->info[i].max_tx_rate; + + if (total_tx_rate > max_rate) { + nbl_err(NBL_RES_MGT_TO_COMMON(res_mgt), NBL_DEBUG_QUEUE, + "Invalid total_tx_rate: %llu mbps, should within %llu mbps", + total_tx_rate, max_rate); + return -EINVAL; + } + + for (i = 0; i < vsi_info->curr_qps; i++) { + queue_id = queue_info->txrx_queues[i + vsi_info->queue_offset]; + phy_ops->lso_dsch_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), queue_id); + is_active |= phy_ops->check_q2tc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), queue_id); + } + + /* Config tc */ + for (i = 0; i < param->num_tc; i++) + for (j = 0; j < param->info[i].count; j++) { + phy_ops->cfg_q2tc_tcid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[curr_qps + + vsi_info->queue_offset], i); + curr_qps++; + } + + for (i = curr_qps; i < param->origin_qps; i++) + phy_ops->cfg_q2tc_tcid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[i + vsi_info->queue_offset], 0); + + /* Config weight */ + weight = kcalloc(param->num_tc, sizeof(*weight), GFP_KERNEL); + if (!weight) + return -ENOMEM; + + for (i = 0; i < param->num_tc; i++) + if (param->info[i].max_tx_rate > max_tc_rate) + max_tc_rate = param->info[i].max_tx_rate; + + gravity = max_tc_rate / NBL_SHAPING_WGT_MAX + 1; + + for (i = 0; i < param->num_tc; i++) + weight[i] = param->info[i].max_tx_rate / gravity; + + phy_ops->set_tc_wgt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id, + weight, param->num_tc); + + /* Config shaping */ + phy_ops->set_shaping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id, total_tx_rate, 0, + param->enable && total_tx_rate, is_active); + + kfree(weight); + return 0; +} + +static int nbl_res_queue_set_tc_wgt(void *priv, u16 vsi_id, u8 *weight, u8 num_tc) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_vsi_info *vsi_info = NULL; + u8 *weight_to_set; + int i; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + weight_to_set = kcalloc(num_tc, sizeof(*weight_to_set), GFP_KERNEL); + if (!weight_to_set) + return -ENOMEM; + + for (i = 0; i < num_tc; i++) + weight_to_set[i] = weight[i] * NBL_SHAPING_WGT_MAX / NBL_TC_MAX_BW; + phy_ops->set_tc_wgt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id, + weight_to_set, num_tc); + + kfree(weight_to_set); + return 0; +} + +static void nbl_res_restore_tc_mgt(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_vsi_info *vsi_info = NULL; + u8 *weight; + int i; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + weight = kcalloc(NBL_MAX_TC_NUM, sizeof(*weight), GFP_KERNEL); + if (!weight) + return; + + for (i = 0; i < NBL_MAX_TC_NUM; i++) + weight[i] = 1; + phy_ops->set_tc_wgt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id, + weight, NBL_MAX_TC_NUM); + + kfree(weight); +} + +static u16 nbl_res_queue_get_local_queue_id(void *priv, u16 vsi_id, u16 global_queue_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + int i; + + queue_info = &queue_mgt->queue_info[func_id]; + + if (queue_info->txrx_queues) + for (i = 0; i < queue_info->num_txrx_queues; i++) + if (global_queue_id == queue_info->txrx_queues[i]) + return i; + + return U16_MAX; +} + +static u16 nbl_res_queue_get_vsi_global_qid(void *priv, u16 vsi_id, u16 local_qid) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + + if (!queue_info->num_txrx_queues) + return 0xffff; + + return queue_info->txrx_queues[local_qid]; +} + static void nbl_res_queue_get_rxfh_indir_size(void *priv, u16 vsi_id, u32 *rxfh_indir_size) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; @@ -909,25 +1227,65 @@ static void nbl_res_queue_get_rxfh_indir(void *priv, u16 vsi_id, u32 *indir) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_queue_vsi_info *vsi_info = NULL; - int i, j; - u32 rxfh_indir_size; - u16 queue_num; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 i, indir_size; vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); if (!vsi_info) return; - queue_num = vsi_info->curr_qps_static ? vsi_info->curr_qps_static : vsi_info->queue_num; - rxfh_indir_size = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << vsi_info->rss_entry_size; + phy_ops->read_rss_indir(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, indir, + vsi_info->rss_ret_base, vsi_info->rss_entry_size); - for (i = 0, j = 0; i < rxfh_indir_size; i++) { - indir[i] = j; - j++; - if (j == queue_num) - j = 0; + indir_size = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << vsi_info->rss_entry_size; + for (i = 0; i < indir_size; i++) { + indir[i] = nbl_res_queue_get_local_queue_id(res_mgt, vsi_id, indir[i]); + indir[i] -= vsi_info->queue_offset; } } +static int nbl_res_queue_set_rxfh_indir(void *priv, u16 vsi_id, const u32 *indir, u32 indir_size) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_vsi_info *vsi_info = NULL; + struct nbl_queue_info *queue_info = NULL; + u32 *rss_ret; + u16 func_id = 0; + int i = 0; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return -ENOENT; + + if (indir) { + rss_ret = kcalloc(indir_size, sizeof(indir[0]), GFP_KERNEL); + if (!rss_ret) { + return -ENOMEM; + } + func_id = NBL_COMMON_TO_MGT_PF(common); + queue_info = &queue_mgt->queue_info[func_id]; + /* local queue to global queue */ + for (i = 0; i < indir_size; i++) + rss_ret[i] = nbl_res_queue_get_vsi_global_qid(res_mgt, vsi_id, + vsi_info->queue_offset + + indir[i]); + phy_ops->cfg_epro_rss_ret(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vsi_info->rss_ret_base, + vsi_info->rss_entry_size, 0, + NULL, rss_ret); + kfree(rss_ret); + } + + if (!vsi_info->curr_qps) + phy_ops->set_epro_rss_pt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + vsi_info->rss_ret_base, vsi_info->rss_entry_size); + + return 0; +} + static void nbl_res_queue_get_rxfh_rss_key_size(void *priv, u32 *rxfh_rss_key_size) { *rxfh_rss_key_size = NBL_EPRO_RSS_SK_SIZE; @@ -957,12 +1315,22 @@ static void nbl_res_queue_get_rss_key(void *priv, u8 *rss_key) nbl_res_rss_key_reverse_order(rss_key + i * NBL_EPRO_RSS_PER_KEY_SIZE); } -static void nbl_res_queue_get_rss_alg_sel(void *priv, u8 *alg_sel, u8 eth_id) +static void nbl_res_queue_get_rss_alg_sel(void *priv, u16 vsi_id, u8 *alg_sel) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - phy_ops->get_rss_alg_sel(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, alg_sel); + phy_ops->get_rss_alg_sel(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, alg_sel); +} + +static int nbl_res_queue_set_rss_alg_sel(void *priv, u16 vsi_id, u8 alg_sel) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int ret = 0; + + ret = phy_ops->set_rss_alg_sel(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, alg_sel); + return ret; } static void nbl_res_queue_clear_queues(void *priv, u16 vsi_id) @@ -971,30 +1339,59 @@ static void nbl_res_queue_clear_queues(void *priv, u16 vsi_id) u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_tc_qidsc_param param; nbl_res_queue_remove_rss(priv, vsi_id); nbl_res_queue_remove_q2vsi(priv, vsi_id); + nbl_res_restore_tc_mgt(priv, vsi_id); if (!queue_info->num_txrx_queues) return; + memset(¶m, 0, sizeof(param)); + /* clear shapping */ + param.vsi_id = vsi_id; + param.enable = false; + nbl_res_queue_cfg_qdisc_mqprio(priv, ¶m); nbl_res_queue_remove_cqs(res_mgt, vsi_id); nbl_res_queue_cfg_dsch(res_mgt, vsi_id, false); nbl_res_queue_remove_all_queues(res_mgt, vsi_id); nbl_res_queue_free_txrx_queues(res_mgt, vsi_id); } -/* for pmd driver */ -static u16 nbl_res_queue_get_vsi_global_qid(void *priv, u16 vsi_id, u16 local_qid) +static int nbl_res_queue_cfg_log(void *priv, u16 vsi_id, u16 qps, bool vld) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 global_queue, i; + + if (!queue_info->num_txrx_queues) + return 0; + + for (i = 0; i < qps; i++) { + global_queue = queue_info->txrx_queues[i]; + phy_ops->cfg_vnet_qinfo_log(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + NBL_PAIR_ID_GET_RX(global_queue), vld); + phy_ops->cfg_vnet_qinfo_log(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + NBL_PAIR_ID_GET_TX(global_queue), vld); + } + + return 0; +} + +static u16 nbl_req_queue_get_ctx(void *priv, u16 vsi_id, u16 qid) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; if (!queue_info->num_txrx_queues) return 0xffff; - return queue_info->txrx_queues[local_qid]; + return queue_info->queues_context[qid]; } static u16 nbl_get_adapt_desc_gother_level(u16 last_level, u64 rates) @@ -1055,6 +1452,20 @@ static void nbl_res_queue_adapt_desc_gother(void *priv) } } +static void nbl_res_queue_set_desc_high_throughput(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_adapt_desc_gother *adapt_desc_gother = &queue_mgt->adapt_desc_gother; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (adapt_desc_gother->level != NBL_ADAPT_DESC_GOTHER_LEVEL1) { + phy_ops->set_uvn_desc_wr_timeout(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + NBL_ADAPT_DESC_GOTHER_LEVEL1_TIMEOUT); + adapt_desc_gother->level = NBL_ADAPT_DESC_GOTHER_LEVEL1; + } +} + static void nbl_res_flr_clear_queues(void *priv, u16 vf_id) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; @@ -1071,9 +1482,13 @@ static int nbl_res_queue_restore_tx_queue(struct nbl_resource_mgt *res_mgt, u16 struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); struct nbl_queue_info *queue_info; + struct nbl_queue_vsi_info *vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); struct nbl_queue_cfg_param queue_cfg = {0}; u16 global_queue, func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + if (!vsi_info) + return -ENOSPC; + queue_info = &queue_mgt->queue_info[func_id]; global_queue = queue_info->txrx_queues[local_queue_id]; @@ -1089,6 +1504,8 @@ static int nbl_res_queue_restore_tx_queue(struct nbl_resource_mgt *res_mgt, u16 phy_ops->reset_dvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); phy_ops->cfg_tx_queue(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), &queue_cfg, global_queue); + phy_ops->cfg_q2tc_netid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue, vsi_info->net_id, 1); return 0; } @@ -1115,6 +1532,7 @@ static int nbl_res_queue_restore_rx_queue(struct nbl_resource_mgt *res_mgt, u16 phy_ops->rsc_cache_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + nbl_res_queue_reset_uvn_pkt_drop_stats(res_mgt, func_id, global_queue); phy_ops->cfg_rx_queue(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), &queue_cfg, global_queue); @@ -1138,22 +1556,225 @@ static int nbl_res_queue_restore_hw_queue(void *priv, u16 vsi_id, u16 local_queu return -EINVAL; } -static u16 nbl_res_queue_get_local_queue_id(void *priv, u16 vsi_id, u16 global_queue_id) +static int +nbl_res_queue_stop_abnormal_hw_queue(void *priv, u16 vsi_id, u16 local_queue_id, int type) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); struct nbl_queue_info *queue_info; - u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); - int i; + u16 global_queue, func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); queue_info = &queue_mgt->queue_info[func_id]; + global_queue = queue_info->txrx_queues[local_queue_id]; + switch (type) { + case NBL_TX: + phy_ops->lso_dsch_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + phy_ops->disable_dvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + + phy_ops->reset_dvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + return 0; + case NBL_RX: + phy_ops->disable_uvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + phy_ops->rsc_cache_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + + phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + nbl_res_queue_reset_uvn_pkt_drop_stats(res_mgt, func_id, global_queue); + return 0; + default: + break; + } + + return -EINVAL; +} + +static int nbl_res_queue_set_tx_rate(void *priv, u16 func_id, int tx_rate, int burst) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 vsi_id, queue_id; + bool is_active = false; + int max_rate = 0, i; + + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_VF_DATA_TYPE); + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + + if (!vsi_info) + return 0; + + switch (res_info->board_info.eth_speed) { + case NBL_FW_PORT_SPEED_100G: + max_rate = NBL_RATE_MBPS_100G; + break; + case NBL_FW_PORT_SPEED_25G: + max_rate = NBL_RATE_MBPS_25G; + break; + case NBL_FW_PORT_SPEED_10G: + max_rate = NBL_RATE_MBPS_10G; + break; + default: + return -EOPNOTSUPP; + } + + if (tx_rate > max_rate) + return -EINVAL; if (queue_info->txrx_queues) - for (i = 0; i < queue_info->num_txrx_queues; i++) - if (global_queue_id == queue_info->txrx_queues[i]) - return i; + for (i = 0; i < vsi_info->curr_qps; i++) { + queue_id = queue_info->txrx_queues[i + vsi_info->queue_offset]; + is_active |= phy_ops->check_q2tc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_id); + } - return U16_MAX; + /* Config shaping */ + return phy_ops->set_shaping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id, tx_rate, + burst, !!(tx_rate), is_active); +} + +static int nbl_res_queue_set_rx_rate(void *priv, u16 func_id, int rx_rate, int burst) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 vsi_id; + int max_rate = 0; + + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + + if (!vsi_info) + return 0; + + switch (res_info->board_info.eth_speed) { + case NBL_FW_PORT_SPEED_100G: + max_rate = NBL_RATE_MBPS_100G; + break; + case NBL_FW_PORT_SPEED_25G: + max_rate = NBL_RATE_MBPS_25G; + break; + case NBL_FW_PORT_SPEED_10G: + max_rate = NBL_RATE_MBPS_10G; + break; + default: + return -EOPNOTSUPP; + } + + if (rx_rate > max_rate) + return -EINVAL; + + /* Config ucar */ + return phy_ops->set_ucar(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, rx_rate, + burst, !!(rx_rate)); +} + +static void nbl_res_queue_get_active_func_bitmaps(void *priv, unsigned long *bitmap, int max_func) +{ + int i; + int func_id_end; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + func_id_end = max_func > NBL_MAX_FUNC ? NBL_MAX_FUNC : max_func; + for (i = 0; i < func_id_end; i++) { + if (!nbl_res_check_func_active_by_queue(res_mgt, i)) + continue; + + set_bit(i, bitmap); + } +} + +static int nbl_res_queue_configure_mirror_table(void *priv, bool mirror_en, u16 func_id, u8 mt_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info; + struct nbl_event_mirror_outputport_data blacklist_data = {0}; + u16 vsi_id; + u16 queue_id = 0; + + queue_info = &queue_mgt->queue_info[func_id]; + if (!queue_info->num_txrx_queues) { + dev_err(dev, "func:%d num_txrx_queues is 0!", func_id); + return -EINVAL; + } + + blacklist_data.opcode = mirror_en; + blacklist_data.func_id = func_id; + nbl_event_notify(NBL_EVENT_MIRROR_OUTPUTPORT, &blacklist_data, NBL_COMMON_TO_VSI_ID(common), + NBL_COMMON_TO_BOARD_ID(common)); + + queue_id = queue_info->txrx_queues[0]; + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_VF_DATA_TYPE); + + return phy_ops->configure_mirror_table(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + mirror_en, vsi_id, queue_id, mt_id); +} + +static void nbl_res_queue_set_dvn_desc_req(void *priv, u32 desc_req) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->set_dvn_desc_req(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), desc_req); +} + +static u32 nbl_res_queue_get_dvn_desc_req(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_dvn_desc_req(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static int nbl_res_queue_reset_uvn_pkt_drop_stats(void *priv, u16 func_id, u16 global_queue_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + u16 vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_PF_DATA_TYPE); + u16 local_queue_id; + + local_queue_id = nbl_res_queue_get_local_queue_id(res_mgt, vsi_id, global_queue_id); + queue_info->uvn_stat_pkt_drop[local_queue_id] = 0; + return 0; +} + +static int nbl_res_queue_get_uvn_pkt_drop_stats(void *priv, u16 vsi_id, + u16 num_queues, u32 *uvn_stat_pkt_drop) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = NULL; + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 func_id = 0; + u32 pkt_drop_num = 0; + int i = 0; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + queue_info = &queue_mgt->queue_info[func_id]; + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return -ENOENT; + + for (i = vsi_info->queue_offset; + i < vsi_info->queue_offset + num_queues && + i < queue_info->num_txrx_queues; i++) { + phy_ops->get_uvn_pkt_drop_stats(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[i], &pkt_drop_num); + *uvn_stat_pkt_drop = pkt_drop_num - queue_info->uvn_stat_pkt_drop[i]; + uvn_stat_pkt_drop++; + queue_info->uvn_stat_pkt_drop[i] = pkt_drop_num; + } + + return 0; } /* NBL_QUEUE_SET_OPS(ops_name, func) @@ -1170,24 +1791,40 @@ do { \ NBL_QUEUE_SET_OPS(setup_rss, nbl_res_queue_setup_rss); \ NBL_QUEUE_SET_OPS(remove_rss, nbl_res_queue_remove_rss); \ NBL_QUEUE_SET_OPS(setup_queue, nbl_res_queue_setup_queue); \ + NBL_QUEUE_SET_OPS(remove_queue, nbl_res_queue_remove_queue); \ NBL_QUEUE_SET_OPS(remove_all_queues, nbl_res_queue_remove_all_queues); \ NBL_QUEUE_SET_OPS(cfg_dsch, nbl_res_queue_cfg_dsch); \ NBL_QUEUE_SET_OPS(setup_cqs, nbl_res_queue_setup_cqs); \ NBL_QUEUE_SET_OPS(remove_cqs, nbl_res_queue_remove_cqs); \ NBL_QUEUE_SET_OPS(queue_init, nbl_res_queue_init); \ NBL_QUEUE_SET_OPS(get_queue_err_stats, nbl_res_queue_get_queue_err_stats); \ + NBL_QUEUE_SET_OPS(cfg_qdisc_mqprio, nbl_res_queue_cfg_qdisc_mqprio); \ NBL_QUEUE_SET_OPS(get_rxfh_indir_size, nbl_res_queue_get_rxfh_indir_size); \ NBL_QUEUE_SET_OPS(get_rxfh_indir, nbl_res_queue_get_rxfh_indir); \ + NBL_QUEUE_SET_OPS(set_rxfh_indir, nbl_res_queue_set_rxfh_indir); \ NBL_QUEUE_SET_OPS(get_rxfh_rss_key_size, nbl_res_queue_get_rxfh_rss_key_size); \ NBL_QUEUE_SET_OPS(get_rxfh_rss_key, nbl_res_queue_get_rss_key); \ NBL_QUEUE_SET_OPS(get_rss_alg_sel, nbl_res_queue_get_rss_alg_sel); \ + NBL_QUEUE_SET_OPS(set_rss_alg_sel, nbl_res_queue_set_rss_alg_sel); \ NBL_QUEUE_SET_OPS(clear_queues, nbl_res_queue_clear_queues); \ NBL_QUEUE_SET_OPS(get_vsi_global_queue_id, nbl_res_queue_get_vsi_global_qid); \ + NBL_QUEUE_SET_OPS(cfg_queue_log, nbl_res_queue_cfg_log); \ + NBL_QUEUE_SET_OPS(get_queue_ctx, nbl_req_queue_get_ctx); \ NBL_QUEUE_SET_OPS(adapt_desc_gother, nbl_res_queue_adapt_desc_gother); \ + NBL_QUEUE_SET_OPS(set_desc_high_throughput, nbl_res_queue_set_desc_high_throughput); \ NBL_QUEUE_SET_OPS(flr_clear_queues, nbl_res_flr_clear_queues); \ NBL_QUEUE_SET_OPS(restore_hw_queue, nbl_res_queue_restore_hw_queue); \ NBL_QUEUE_SET_OPS(get_local_queue_id, nbl_res_queue_get_local_queue_id); \ - NBL_QUEUE_SET_OPS(set_bridge_mode, nbl_res_vsi_set_bridge_mode); \ + NBL_QUEUE_SET_OPS(set_bridge_mode, nbl_res_queue_set_bridge_mode); \ + NBL_QUEUE_SET_OPS(set_tx_rate, nbl_res_queue_set_tx_rate); \ + NBL_QUEUE_SET_OPS(set_rx_rate, nbl_res_queue_set_rx_rate); \ + NBL_QUEUE_SET_OPS(stop_abnormal_hw_queue, nbl_res_queue_stop_abnormal_hw_queue); \ + NBL_QUEUE_SET_OPS(get_active_func_bitmaps, nbl_res_queue_get_active_func_bitmaps); \ + NBL_QUEUE_SET_OPS(set_tc_wgt, nbl_res_queue_set_tc_wgt); \ + NBL_QUEUE_SET_OPS(configure_mirror_table, nbl_res_queue_configure_mirror_table); \ + NBL_QUEUE_SET_OPS(get_dvn_desc_req, nbl_res_queue_get_dvn_desc_req); \ + NBL_QUEUE_SET_OPS(set_dvn_desc_req, nbl_res_queue_set_dvn_desc_req); \ + NBL_QUEUE_SET_OPS(get_uvn_pkt_drop_stats, nbl_res_queue_get_uvn_pkt_drop_stats); \ } while (0) int nbl_queue_setup_ops_leonis(struct nbl_resource_ops *res_ops) diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h index 72fad47a3d87a1524465929751c341ee47d40c08..05d6ba171c895f27689a34e5a20dc31a9c686874 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h @@ -20,4 +20,6 @@ #define NBL_ADAPT_DESC_GOTHER_LEVEL0_TIMEOUT (0x12c) #define NBL_ADAPT_DESC_GOTHER_LEVEL1_TIMEOUT (0x960) +#define NBL_SHAPING_WGT_MAX (255) + #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.c index 1550e38885918e0ea7f0d0285518cdd2aa0153f5..65bf31ba763abda3f305e56159038fa808ea9a5a 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.c @@ -50,18 +50,26 @@ static u32 nbl_res_get_pfvf_queue_num(struct nbl_resource_mgt *res_mgt, int pfid return queue_num; } +static void nbl_res_get_rep_queue_info(void *priv, u16 *queue_num, u16 *queue_size) +{ + *queue_size = NBL_DEFAULT_DESC_NUM; + *queue_num = NBL_DEFAULT_REP_HW_QUEUE_NUM; +} + static void nbl_res_get_user_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); struct nbl_net_ring_num_info *num_info = &res_info->net_ring_num_info; u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + u16 default_queue; if (num_info->net_max_qp_num[func_id] != 0) - *queue_num = num_info->net_max_qp_num[func_id]; + default_queue = num_info->net_max_qp_num[func_id]; else - *queue_num = num_info->pf_def_max_net_qp_num; + default_queue = num_info->pf_def_max_net_qp_num; + *queue_num = min_t(u16, default_queue, NBL_VSI_PF_LEGACY_QUEUE_NUM_MAX - default_queue); *queue_size = NBL_DEFAULT_DESC_NUM; if (*queue_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) { @@ -71,8 +79,8 @@ static void nbl_res_get_user_queue_info(void *priv, u16 *queue_num, u16 *queue_s } } -static int nbl_res_get_queue_num(struct nbl_resource_mgt *res_mgt, - u16 func_id, u16 *tx_queue_num, u16 *rx_queue_num) +static int __maybe_unused nbl_res_get_queue_num(struct nbl_resource_mgt *res_mgt, + u16 func_id, u16 *tx_queue_num, u16 *rx_queue_num) { int pfid, vfid; @@ -88,44 +96,40 @@ static int nbl_res_save_vf_bar_info(struct nbl_resource_mgt *res_mgt, u16 func_id, struct nbl_register_net_param *register_param) { struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); struct nbl_sriov_info *sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; u64 pf_bar_start; - u16 pf_bdf; u64 vf_bar_start; + u16 pf_bdf; u64 vf_bar_size; u16 total_vfs; u16 offset; u16 stride; - pf_bar_start = register_param->pf_bar_start; - if (pf_bar_start) { + if (func_id < NBL_RES_MGT_TO_PF_NUM(res_mgt)) { + pf_bar_start = phy_ops->get_pf_bar_addr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id); sriov_info->pf_bar_start = pf_bar_start; dev_info(dev, "sriov_info, pf_bar_start:%llx\n", sriov_info->pf_bar_start); } - pf_bdf = register_param->pf_bdf; - vf_bar_start = register_param->vf_bar_start; + pf_bdf = (u16)sriov_info->bdf; vf_bar_size = register_param->vf_bar_size; total_vfs = register_param->total_vfs; offset = register_param->offset; stride = register_param->stride; if (total_vfs) { - if (pf_bdf != sriov_info->bdf) { - dev_err(dev, "PF bdf donot equal, af record = %u, real pf bdf: %u\n", - sriov_info->bdf, pf_bdf); - return -EIO; - } sriov_info->offset = offset; sriov_info->stride = stride; + vf_bar_start = phy_ops->get_vf_bar_addr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id); sriov_info->vf_bar_start = vf_bar_start; sriov_info->vf_bar_len = vf_bar_size / total_vfs; - dev_info(dev, "sriov_info, bdf:%x:%x.%x, num_vfs:%d\n", - PCI_BUS_NUM(pf_bdf), PCI_SLOT(pf_bdf & 0xff), - PCI_FUNC(pf_bdf & 0xff), sriov_info->num_vfs); - dev_info(dev, "start_vf_func_id:%d, offset:%d, stride:%d\n", - sriov_info->start_vf_func_id, offset, stride); + dev_info(dev, "sriov_info, bdf:%x:%x.%x, num_vfs:%d, start_vf_func_id:%d, " + "offset:%d, stride:%d, vf_bar_start: %llx", + PCI_BUS_NUM(pf_bdf), PCI_SLOT(pf_bdf & 0xff), PCI_FUNC(pf_bdf & 0xff), + sriov_info->num_vfs, sriov_info->start_vf_func_id, offset, stride, + sriov_info->vf_bar_start); } return 0; @@ -135,9 +139,7 @@ static int nbl_res_prepare_vf_chan(struct nbl_resource_mgt *res_mgt, u16 func_id, struct nbl_register_net_param *register_param) { struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); struct nbl_sriov_info *sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; - u16 pf_bdf; u16 total_vfs; u16 offset; u16 stride; @@ -150,18 +152,11 @@ static int nbl_res_prepare_vf_chan(struct nbl_resource_mgt *res_mgt, u8 function; u16 vf_func_id; - pf_bdf = register_param->pf_bdf; total_vfs = register_param->total_vfs; offset = register_param->offset; stride = register_param->stride; if (total_vfs) { - if (pf_bdf != sriov_info->bdf) { - dev_err(dev, "PF bdf donot equal, af record = %u, real pf bdf: %u\n", - sriov_info->bdf, pf_bdf); - return -EIO; - } - /* Configure mailbox qinfo_map_table for the pf's all vf, * so vf's mailbox is ready, vf can use mailbox. */ @@ -217,39 +212,82 @@ static int nbl_res_update_active_vf_num(struct nbl_resource_mgt *res_mgt, u16 fu return 0; } +static u32 nbl_res_get_quirks(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_quirks(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + static int nbl_res_register_net(void *priv, u16 func_id, struct nbl_register_net_param *register_param, struct nbl_register_net_result *register_result) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_vdpa_status **vf_status = NBL_RES_MGT_TO_VDPA_VF_STATS(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); netdev_features_t csumo_features = 0; netdev_features_t tso_features = 0; + netdev_features_t pf_features = 0; + netdev_features_t vlano_features = 0; u16 tx_queue_num, rx_queue_num; u8 mac[ETH_ALEN] = {0}; + u32 quirks; + u16 vsi_id; int ret = 0; - csumo_features = NBL_FEATURE(NETIF_F_RXCSUM) | - NBL_FEATURE(NETIF_F_IP_CSUM) | - NBL_FEATURE(NETIF_F_IPV6_CSUM); - tso_features = NBL_FEATURE(NETIF_F_TSO) | - NBL_FEATURE(NETIF_F_TSO6) | - NBL_FEATURE(NETIF_F_GSO_UDP_L4); + if (func_id < NBL_MAX_PF) { + nbl_res_get_eth_mac(res_mgt, mac, nbl_res_pf_to_eth_id(res_mgt, func_id)); + pf_features = NBL_FEATURE(NETIF_F_NTUPLE); + register_result->trusted = 1; + } else { + ether_addr_copy(mac, vsi_info->mac_info[func_id].mac); + register_result->trusted = vsi_info->mac_info[func_id].trusted; + } + ether_addr_copy(register_result->mac, mac); + + quirks = nbl_res_get_quirks(res_mgt); + if (performance_mode & BIT(NBL_QUIRKS_NO_TOE) || + !(quirks & BIT(NBL_QUIRKS_NO_TOE))) { + csumo_features = NBL_FEATURE(NETIF_F_RXCSUM) | + NBL_FEATURE(NETIF_F_IP_CSUM) | + NBL_FEATURE(NETIF_F_IPV6_CSUM); + tso_features = NBL_FEATURE(NETIF_F_TSO) | + NBL_FEATURE(NETIF_F_TSO6) | + NBL_FEATURE(NETIF_F_GSO_UDP_L4); + } + - register_result->hw_features |= csumo_features | + if (func_id < NBL_MAX_PF) /* vf unsupport */ + vlano_features = NBL_FEATURE(NETIF_F_HW_VLAN_CTAG_TX) | + NBL_FEATURE(NETIF_F_HW_VLAN_CTAG_RX) | + NBL_FEATURE(NETIF_F_HW_VLAN_STAG_TX) | + NBL_FEATURE(NETIF_F_HW_VLAN_STAG_RX); + + register_result->hw_features |= pf_features | + csumo_features | tso_features | + vlano_features | NBL_FEATURE(NETIF_F_SG) | - NBL_FEATURE(NETIF_F_HW_TC); + NBL_FEATURE(NETIF_F_HW_TC) | + NBL_FEATURE(NETIF_F_RXHASH); + register_result->features |= register_result->hw_features | NBL_FEATURE(NETIF_F_HW_TC) | NBL_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER) | NBL_FEATURE(NETIF_F_HW_VLAN_STAG_FILTER); + register_result->vlan_features = register_result->features; + register_result->max_mtu = NBL_MAX_JUMBO_FRAME_SIZE - NBL_PKT_HDR_PAD; - if (func_id < NBL_MAX_PF) - nbl_res_get_eth_mac(res_mgt, mac, nbl_res_pf_to_eth_id(res_mgt, func_id)); - memcpy(register_result->mac, mac, ETH_ALEN); + register_result->vlan_proto = vsi_info->mac_info[func_id].vlan_proto; + register_result->vlan_tci = vsi_info->mac_info[func_id].vlan_tci; + register_result->rate = vsi_info->mac_info[func_id].rate; nbl_res_get_queue_num(res_mgt, func_id, &tx_queue_num, &rx_queue_num); register_result->tx_queue_num = tx_queue_num; @@ -263,6 +301,31 @@ static int nbl_res_register_net(void *priv, u16 func_id, goto update_active_vf_fail; } + if (register_param->is_vdpa) { + set_bit(func_id, resource_info->vdpa.vdpa_func_bitmap); + + if (!vf_status[func_id]) { + vf_status[func_id] = devm_kzalloc(dev, sizeof(struct nbl_vdpa_status), + GFP_KERNEL); + if (!vf_status[func_id]) { + ret = -ENOMEM; + goto alloc_nbl_vf_stats_fail; + } + } + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + phy_ops->get_dstat_vsi_stat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + &vf_status[func_id]->init_stats.tx_packets, + &vf_status[func_id]->init_stats.tx_bytes); + phy_ops->get_ustat_vsi_stat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + &vf_status[func_id]->init_stats.rx_packets, + &vf_status[func_id]->init_stats.rx_bytes); + memcpy(&vf_status[func_id]->prev_stats, &vf_status[func_id]->init_stats, + sizeof(vf_status[func_id]->prev_stats)); + vf_status[func_id]->timestamp = jiffies; + } else { + clear_bit(func_id, resource_info->vdpa.vdpa_func_bitmap); + } + if (func_id >= NBL_RES_MGT_TO_PF_NUM(res_mgt)) return 0; @@ -280,199 +343,1587 @@ static int nbl_res_register_net(void *priv, u16 func_id, prepare_vf_chan_fail: save_vf_bar_info_fail: +alloc_nbl_vf_stats_fail: update_active_vf_fail: - return -EIO; + return ret; +} + +static int nbl_res_unregister_net(void *priv, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_update_active_vf_num(res_mgt, func_id, 0); +} + +static u16 nbl_res_get_vsi_id(void *priv, u16 func_id, u16 type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_func_id_to_vsi_id(res_mgt, func_id, type); +} + +static void nbl_res_get_eth_id(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id, u8 *logic_eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u16 pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + + *eth_mode = eth_info->eth_num; + if (pf_id < eth_info->eth_num) { + *eth_id = eth_info->eth_id[pf_id]; + *logic_eth_id = pf_id; + /* if pf_id > eth_num, use eth_id 0 */ + } else { + *eth_id = eth_info->eth_id[0]; + *logic_eth_id = 0; + } +} + +static DEFINE_IDA(nbl_adev_ida); + +static void nbl_res_setup_rdma_id(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u16 func_id; + + for_each_set_bit(func_id, resource_info->rdma_info.func_cap, NBL_MAX_FUNC) + resource_info->rdma_info.rdma_id[func_id] = ida_alloc(&nbl_adev_ida, GFP_KERNEL); +} + +static void nbl_res_remove_rdma_id(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u16 func_id; + + for_each_set_bit(func_id, resource_info->rdma_info.func_cap, NBL_MAX_FUNC) + ida_free(&nbl_adev_ida, resource_info->rdma_info.rdma_id[func_id]); +} + +static void nbl_res_register_rdma(void *priv, u16 vsi_id, struct nbl_rdma_register_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + /* Even if we don't have capability, we would still return mem_type */ + param->has_rdma = false; + param->mem_type = resource_info->rdma_info.mem_type; + + if (test_bit(func_id, resource_info->rdma_info.func_cap)) { + param->has_rdma = true; + param->intr_num = NBL_RES_RDMA_INTR_NUM; + + param->id = resource_info->rdma_info.rdma_id[func_id]; + } +} + +static void nbl_res_unregister_rdma(void *priv, u16 vsi_id) +{ +} + +static void nbl_res_register_rdma_bond(void *priv, struct nbl_lag_member_list_param *list_param, + struct nbl_rdma_register_param *register_param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u16 func_id = 0; + int i; + + register_param->has_rdma = false; + register_param->mem_type = resource_info->rdma_info.mem_type; + + /* Rdma bond can be created only if all members have rdma cap */ + for (i = 0; i < list_param->lag_num; i++) { + func_id = nbl_res_vsi_id_to_func_id(res_mgt, list_param->member_list[i].vsi_id); + + if (!test_bit(func_id, resource_info->rdma_info.func_cap)) + return; + } + + register_param->has_rdma = true; + register_param->intr_num = NBL_RES_RDMA_INTR_NUM; +} + +static void nbl_res_unregister_rdma_bond(void *priv, u16 lag_id) +{ +} + +static u8 __iomem *nbl_res_get_hw_addr(void *priv, size_t *size) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_hw_addr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), size); +} + +static u64 nbl_res_get_real_hw_addr(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + return nbl_res_get_func_bar_base_addr(res_mgt, func_id); +} + +static u16 nbl_res_get_function_id(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); +} + +static void nbl_res_get_real_bdf(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + nbl_res_func_id_to_bdf(res_mgt, func_id, bus, dev, function); +} + +static u32 nbl_res_check_active_vf(void *priv, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_sriov_info *sriov_info = res_mgt->resource_info->sriov_info; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int pfid = 0; + int vfid = 0; + int ret; + + ret = nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "convert func id to pfvfid failed\n"); + return ret; + } + + return sriov_info[pfid].active_vf_num; +} + +static void nbl_res_set_dport_fc_th_vld(void *priv, u8 eth_id, bool vld) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->set_dport_fc_th_vld(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, vld); +} + +static void nbl_res_set_shaping_dport_vld(void *priv, u8 eth_id, bool vld) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->set_shaping_dport_vld(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, vld); +} + +static int nbl_res_set_phy_flow(struct nbl_resource_mgt *res_mgt, u8 eth_id, bool status) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + u8 pf_id = nbl_res_eth_id_to_pf_id(res_mgt, eth_id); + int i, ret = 0; + + for (i = 0; i < NBL_VSI_SERV_MAX_TYPE; i++) { + ret = phy_ops->cfg_phy_flow(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vsi_info->serv_info[pf_id][i].base_id, + vsi_info->serv_info[pf_id][i].num, eth_id, status); + if (ret) + return ret; + } + + nbl_res_set_dport_fc_th_vld(res_mgt, eth_id, !status); + nbl_res_set_shaping_dport_vld(res_mgt, eth_id, !status); + phy_ops->cfg_eth_port_priority_replace(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, status); + + return 0; +} + +static void nbl_res_get_base_mac_addr(void *priv, u8 *mac) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + nbl_res_get_eth_mac(res_mgt, mac, nbl_res_pf_to_eth_id(res_mgt, 0)); +} + +static int nbl_res_update_offload_status(struct nbl_resource_mgt_leonis *res_mgt_leonis) +{ + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_eth_bond_info *eth_bond_info = NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt); + struct nbl_event_acl_state_update_data event_data = {0}; + struct nbl_sriov_info *sriov_info; + bool status; + int i, j, start, end, vsi_match, eth_id, eth_tmp, lag_id, ret = 0; + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + status = false; + eth_id = nbl_res_pf_to_eth_id(res_mgt, i); + + start = nbl_res_pfvfid_to_vsi_id(res_mgt, i, U32_MAX, NBL_VSI_DATA); + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + i; + end = nbl_res_pfvfid_to_vsi_id(res_mgt, i, sriov_info->num_vfs, NBL_VSI_DATA); + vsi_match = find_next_bit(rep_status->rep_vsi_bitmap, + NBL_OFFLOAD_STATUS_MAX_VSI, start); + if (vsi_match <= end || test_bit(eth_id, rep_status->rep_eth_bitmap)) + status = true; + + if (rep_status->status[eth_id] != status) { + ret = nbl_res_set_phy_flow(res_mgt, eth_id, status); + if (ret) + return ret; + rep_status->status[eth_id] = status; + } + } + + /* Update bond offload status. + * For bond, there will be only one pf is bind to ovs-dpdk, but all pfs should + * change to offload. + */ + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + status = false; + eth_id = nbl_res_pf_to_eth_id(res_mgt, i); + lag_id = nbl_res_eth_id_to_lag_id(res_mgt, eth_id); + + if (lag_id >= 0 && lag_id < NBL_LAG_MAX_NUM) { + for (j = 0; j < eth_bond_info->entry[lag_id].lag_num && + NBL_ETH_BOND_VALID_PORT(j); j++) { + /* If bond, any port is offload means all ports are offload */ + eth_tmp = eth_bond_info->entry[lag_id].eth_id[j]; + if (rep_status->status[eth_tmp]) { + status = true; + break; + } + } + + if (rep_status->status[eth_id] != status) { + ret = nbl_res_set_phy_flow(res_mgt, eth_id, status); + if (ret) + return ret; + rep_status->status[eth_id] = status; + } + } + } + + event_data.is_offload = false; + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + eth_id = nbl_res_pf_to_eth_id(res_mgt, i); + if (rep_status->status[eth_id]) + event_data.is_offload = true; + } + + nbl_event_notify(NBL_EVENT_ACL_STATE_UPDATE, &event_data, NBL_COMMON_TO_VSI_ID(common), + NBL_COMMON_TO_BOARD_ID(common)); + + return 0; +} + +static int nbl_res_set_pmd_debug(void *priv, bool pmd_debug) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + + rep_status->pmd_debug = pmd_debug; + return 0; +} + +static void nbl_res_set_offload_status(void *priv, u16 func_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_upcall_port_info *upcall_port_info = + &res_mgt_leonis->pmd_status.upcall_port_info; + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + + if (!upcall_port_info->upcall_port_active || + upcall_port_info->func_id != func_id) + return; + + rep_status->timestamp = jiffies; +} + +static void nbl_res_vdpa_itr_update(struct nbl_resource_mgt *res_mgt, + u16 func_id, bool active) +{ + struct nbl_vdpa_info *vdpa_info = &res_mgt->resource_info->vdpa; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_vdpa_status *vdpa_vf_stats = vdpa_info->vf_stats[func_id]; + struct nbl_vf_stats cur_stats = {0}, *prev_stats; + u64 tx_rates = 0, rx_rates = 0, pkt_rates = 0, time_diff; + u16 itr_level = 0; + u16 vsi_id; + + if (!vdpa_vf_stats) + return; + + if (active) { + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + phy_ops->get_dstat_vsi_stat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + &cur_stats.tx_packets, &cur_stats.tx_bytes); + phy_ops->get_ustat_vsi_stat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + &cur_stats.rx_packets, &cur_stats.rx_bytes); + + time_diff = jiffies - vdpa_vf_stats->timestamp; + if (time_diff > 0) { + prev_stats = &vdpa_vf_stats->prev_stats; + tx_rates = (cur_stats.tx_packets - prev_stats->tx_packets) / time_diff * HZ; + rx_rates = (cur_stats.rx_packets - prev_stats->rx_packets) / time_diff * HZ; + pkt_rates = max_t(u64, tx_rates, rx_rates); + + itr_level = nbl_res_intr_get_suppress_level(res_mgt, pkt_rates, + vdpa_vf_stats->itr_level); + } else { + itr_level = vdpa_vf_stats->itr_level; + } + + memcpy(&vdpa_vf_stats->prev_stats, &cur_stats, sizeof(cur_stats)); + vdpa_vf_stats->timestamp = jiffies; + } + + if (itr_level != vdpa_vf_stats->itr_level) { + nbl_res_intr_set_intr_suppress_level(res_mgt, func_id, 0, U16_MAX, itr_level); + vdpa_vf_stats->itr_level = itr_level; + } +} + +static int nbl_res_check_offload_status(void *priv, bool *is_down) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_resource_info *res_info = res_mgt->resource_info; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_upcall_port_info *upcall_port_info = + &res_mgt_leonis->pmd_status.upcall_port_info; + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int i; + u16 func_id; + u32 start, batch_cnt; + + if (!upcall_port_info->upcall_port_active) + return 0; + + /* check pmd debug, no check if pmd_debug is on */ + if (rep_status->pmd_debug) { + nbl_info(common, NBL_DEBUG_FLOW, "pmd is in debug mode now"); + rep_status->timestamp = jiffies; + return 0; + } + + start = res_info->vdpa.start; + batch_cnt = NBL_VDPA_ITR_BATCH_CNT; + if (rep_status->timestamp && time_after(jiffies, rep_status->timestamp + 30 * HZ)) { + for (i = 0; i < NBL_OFFLOAD_STATUS_MAX_VSI; i++) + clear_bit(i, rep_status->rep_vsi_bitmap); + + for (i = 0; i < NBL_OFFLOAD_STATUS_MAX_ETH; i++) + clear_bit(i, rep_status->rep_eth_bitmap); + + upcall_port_info->upcall_port_active = false; + nbl_err(common, NBL_DEBUG_FLOW, "offload found inactive!"); + phy_ops->clear_profile_table_action(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + phy_ops->ipro_chksum_err_ctrl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), 0); + nbl_res_update_offload_status(res_mgt_leonis); + *is_down = true; + + start = 0; + batch_cnt = NBL_MAX_FUNC; + } + + i = 0; + for (; start < NBL_MAX_FUNC;) { + func_id = find_next_bit(res_info->vdpa.vdpa_func_bitmap, NBL_MAX_FUNC, start); + if (func_id >= NBL_MAX_FUNC) { + start = 0; + break; + } + i++; + start = func_id + 1; + + nbl_res_vdpa_itr_update(res_mgt, func_id, + upcall_port_info->upcall_port_active); + if (i >= batch_cnt) + break; + } + + res_info->vdpa.start = start; + + return 0; +} + +static void nbl_res_get_rep_feature(void *priv, struct nbl_register_net_result *register_result) +{ + netdev_features_t csumo_features; + + csumo_features = NBL_FEATURE(NETIF_F_RXCSUM) | + NBL_FEATURE(NETIF_F_IP_CSUM) | + NBL_FEATURE(NETIF_F_IPV6_CSUM) | + NBL_FEATURE(NETIF_F_SCTP_CRC); + register_result->hw_features = csumo_features | NBL_FEATURE(NETIF_F_HW_TC); + register_result->features |= csumo_features | NBL_FEATURE(NETIF_F_HW_TC); + register_result->max_mtu = NBL_MAX_JUMBO_FRAME_SIZE - NBL_PKT_HDR_PAD; +} + +static void nbl_res_set_eswitch_mode(void *priv, u16 switch_mode) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_tx_ring *tx_ring; + struct nbl_res_rx_ring *rx_ring; + int i; + + resource_info->eswitch_info->mode = switch_mode; + + /* set ring info switch_mode */ + for (i = 0; i < txrx_mgt->rx_ring_num; i++) { + rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, i); + tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, i); + + tx_ring->mode = switch_mode; + rx_ring->mode = switch_mode; + } +} + +static u16 nbl_res_get_eswitch_mode(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + + if (resource_info->eswitch_info) + return resource_info->eswitch_info->mode; + else + return NBL_ESWITCH_NONE; +} + +static int nbl_res_alloc_rep_data(void *priv, int num_vfs, u16 vf_base_vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + + eswitch_info->rep_data = devm_kcalloc(dev, num_vfs, + sizeof(struct nbl_rep_data), GFP_KERNEL); + if (!eswitch_info->rep_data) + return -ENOMEM; + eswitch_info->num_vfs = num_vfs; + eswitch_info->vf_base_vsi_id = vf_base_vsi_id; + return 0; +} + +static void nbl_res_free_rep_data(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eswitch_info **eswitch_info = &NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + + if ((*eswitch_info)->rep_data) { + devm_kfree(dev, (*eswitch_info)->rep_data); + (*eswitch_info)->rep_data = NULL; + } + (*eswitch_info)->num_vfs = 0; +} + +static void nbl_res_set_rep_netdev_info(void *priv, void *rep_data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_rep_data *rep = (struct nbl_rep_data *)rep_data; + u16 rep_data_index; + + rep_data_index = nbl_res_get_rep_idx(eswitch_info, rep->rep_vsi_id); + if (rep_data_index >= eswitch_info->num_vfs) + return; + eswitch_info->rep_data[rep_data_index].rep_vsi_id = rep->rep_vsi_id; + eswitch_info->rep_data[rep_data_index].netdev = rep->netdev; + nbl_info(common, NBL_DEBUG_RESOURCE, "nbl set rep netdev rep_vsi_id %d netdev %p\n", + rep->rep_vsi_id, rep->netdev); +} + +static void nbl_res_unset_rep_netdev_info(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + + memset(eswitch_info->rep_data, 0, + eswitch_info->num_vfs * sizeof(struct nbl_rep_data)); +} + +static struct net_device *nbl_res_get_rep_netdev_info(void *priv, u16 rep_data_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + + if (rep_data_index >= eswitch_info->num_vfs) + return NULL; + return eswitch_info->rep_data[rep_data_index].netdev; +} + +static int nbl_res_disable_phy_flow(void *priv, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_set_phy_flow(res_mgt, eth_id, true); +} + +static int nbl_res_enable_phy_flow(void *priv, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_set_phy_flow(res_mgt, eth_id, false); +} + +static void nbl_res_init_acl(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (!res_mgt->resource_info->init_acl_refcnt) + phy_ops->init_acl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + res_mgt->resource_info->init_acl_refcnt++; +} + +static void nbl_res_uninit_acl(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + res_mgt->resource_info->init_acl_refcnt--; + + if (!res_mgt->resource_info->init_acl_refcnt) + phy_ops->uninit_acl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static int nbl_res_set_upcall_rule(void *priv, u8 eth_id, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->set_upcall_rule(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, vsi_id); +} + +static int nbl_res_unset_upcall_rule(void *priv, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->unset_upcall_rule(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id); +} + +static void nbl_res_get_rep_stats(void *priv, u16 rep_vsi_id, + struct nbl_rep_stats *rep_stats, bool is_tx) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + struct nbl_rep_data *rep_data; + unsigned int start; + u16 rep_data_index = 0; + + if (!eswitch_info || eswitch_info->mode != NBL_ESWITCH_OFFLOADS || + ((nbl_res_get_rep_idx(eswitch_info, rep_vsi_id)) == U32_MAX)) + return; + + rep_data_index = nbl_res_get_rep_idx(eswitch_info, rep_vsi_id); + if (rep_data_index >= eswitch_info->num_vfs) + return; + rep_data = &eswitch_info->rep_data[rep_data_index]; + if (rep_data->rep_vsi_id != rep_vsi_id) + return; + + if (is_tx) { + do { + start = u64_stats_fetch_begin(&rep_data->rep_syncp); + rep_stats->packets = rep_data->tx_packets; + rep_stats->bytes = rep_data->tx_bytes; + } while (u64_stats_fetch_retry(&rep_data->rep_syncp, start)); + } else { + do { + start = u64_stats_fetch_begin(&rep_data->rep_syncp); + rep_stats->packets = rep_data->rx_packets; + rep_stats->bytes = rep_data->rx_bytes; + } while (u64_stats_fetch_retry(&rep_data->rep_syncp, start)); + } +} + +static u16 nbl_res_get_rep_index(void *priv, u16 rep_vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + + return nbl_res_get_rep_idx(eswitch_info, rep_vsi_id); +} + +static void nbl_res_register_net_rep(void *priv, u16 pf_id, u16 vf_id, + struct nbl_register_net_rep_result *result) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + int pf_id_tmp, vf_id_tmp; + + pf_id_tmp = pf_id; + if (vf_id == U16_MAX) + vf_id_tmp = U32_MAX; + else + vf_id_tmp = vf_id; + + result->vsi_id = nbl_res_pfvfid_to_vsi_id(res_mgt, pf_id_tmp, vf_id_tmp, NBL_VSI_DATA); + result->func_id = nbl_res_pfvfid_to_func_id(res_mgt, pf_id_tmp, vf_id_tmp); + + if (result->vsi_id >= NBL_OFFLOAD_STATUS_MAX_VSI) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "register_net_rep pf %d vf %d vsi_id %d err\n", + pf_id, vf_id, result->vsi_id); + return; + } + + set_bit(result->vsi_id, rep_status->rep_vsi_bitmap); + nbl_res_update_offload_status(res_mgt_leonis); +} + +static void nbl_res_unregister_net_rep(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (vsi_id >= NBL_OFFLOAD_STATUS_MAX_VSI) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "unregister_net_rep vsi_id %d err\n", vsi_id); + return; + } + + /* set rss to l4 */ + phy_ops->set_epro_rss_default(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id); + clear_bit(vsi_id, rep_status->rep_vsi_bitmap); + nbl_res_update_offload_status(res_mgt_leonis); +} + +static void nbl_res_register_eth_rep(void *priv, u8 eth_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + + if (eth_id >= NBL_OFFLOAD_STATUS_MAX_ETH) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "register_eth_rep eth_id %d err\n", eth_id); + return; + } + set_bit(eth_id, rep_status->rep_eth_bitmap); + nbl_res_update_offload_status(res_mgt_leonis); +} + +static void nbl_res_unregister_eth_rep(void *priv, u8 eth_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + + if (eth_id >= NBL_OFFLOAD_STATUS_MAX_ETH) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "unregister_eth_rep eth_id %d err\n", eth_id); + return; + } + + clear_bit(eth_id, rep_status->rep_eth_bitmap); + nbl_res_update_offload_status(res_mgt_leonis); +} + +static int nbl_res_register_upcall_port(void *priv, u16 func_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_upcall_port_info *upcall_port_info = + &res_mgt_leonis->pmd_status.upcall_port_info; + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + u16 vsi_id = nbl_res_func_id_to_vsi_id(&res_mgt_leonis->res_mgt, func_id, + NBL_VSI_SERV_PF_DATA_TYPE); + int i; + + rep_status->timestamp = jiffies; + + if (!upcall_port_info->upcall_port_active) { + upcall_port_info->func_id = func_id; + upcall_port_info->upcall_port_active = true; + + for (i = 0; i < NBL_OFFLOAD_STATUS_MAX_VSI; i++) + clear_bit(i, rep_status->rep_vsi_bitmap); + + for (i = 0; i < NBL_OFFLOAD_STATUS_MAX_ETH; i++) + clear_bit(i, rep_status->rep_eth_bitmap); + + set_bit(vsi_id, rep_status->rep_vsi_bitmap); + + nbl_res_update_offload_status(res_mgt_leonis); + return 0; + } + + if (func_id != upcall_port_info->func_id) { + nbl_err(NBL_RES_MGT_TO_COMMON(&res_mgt_leonis->res_mgt), NBL_DEBUG_RESOURCE, + "can not add rep port with two pf port, register_upcall_port failed\n"); + return -EINVAL; + } + + return 0; +} + +static void nbl_res_unregister_upcall_port(void *priv, u16 func_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_upcall_port_info *upcall_port_info = + &res_mgt_leonis->pmd_status.upcall_port_info; + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + int i; + + if (!upcall_port_info->upcall_port_active || + upcall_port_info->func_id != func_id) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "func_id %d unregister upcall failed\n", func_id); + return; + } + + for (i = 0; i < NBL_OFFLOAD_STATUS_MAX_VSI; i++) + clear_bit(i, rep_status->rep_vsi_bitmap); + + for (i = 0; i < NBL_OFFLOAD_STATUS_MAX_ETH; i++) + clear_bit(i, rep_status->rep_eth_bitmap); + + nbl_res_update_offload_status(res_mgt_leonis); + upcall_port_info->upcall_port_active = false; +} + +static void nbl_res_init_offload_fwd(void *priv, u16 func_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->init_offload_fwd(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id); +} + +static void nbl_res_init_cmdq(void *priv, void *data, u16 func_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_chan_cmdq_init_info *cmdq_param = + (struct nbl_chan_cmdq_init_info *)data; + u8 bus; + u8 dev; + u8 func; + + nbl_res_func_id_to_bdf(res_mgt, func_id, &bus, &dev, &func); + cmdq_param->bdf_num = (u16)PCI_DEVID(bus, PCI_DEVFN(dev, func)); + + phy_ops->init_cmdq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), data, func_id); +} + +static void nbl_res_destroy_cmdq(void *priv) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->destroy_cmdq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static void nbl_res_reset_cmdq(void *priv) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->reset_cmdq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static void nbl_res_init_rep(void *priv, u16 vsi_id, u8 inner_type, + u8 outer_type, u8 rep_type) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->init_rep(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + inner_type, outer_type, rep_type); +} + +static void nbl_res_init_flow(void *priv, void *param) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->init_flow(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), param); +} + +static void nbl_res_deinit_flow(void *priv) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->deinit_flow(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static void nbl_res_offload_flow_rule(void *priv, void *data) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->offload_flow_rule(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), data); +} + +static void nbl_res_get_flow_acl_switch(void *priv, u8 *acl_enable) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->get_flow_acl_switch(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + acl_enable); +} + +static void nbl_res_get_line_rate_info(void *priv, void *data, void *result) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->get_line_rate_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), data, result); +} + +/* return value need to convert to Mil degree Celsius(1/1000) */ +static u32 nbl_res_get_chip_temperature(void *priv, enum nbl_hwmon_type type, u32 senser_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_chip_temperature(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), type, senser_id); +} + +static int nbl_res_init_vdpaq(void *priv, u16 func_id, u64 pa, u32 size) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u8 bus, dev, func; + u16 bdf; + + nbl_res_func_id_to_bdf(res_mgt, func_id, &bus, &dev, &func); + bdf = PCI_DEVID(bus, PCI_DEVFN(dev, func)); + + return phy_ops->init_vdpaq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, bdf, pa, size); +} + +static void nbl_res_destroy_vdpaq(void *priv) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->destroy_vdpaq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static int nbl_res_get_upcall_port(void *priv, u16 *bdf) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_upcall_port_info *upcall_port_info = + &res_mgt_leonis->pmd_status.upcall_port_info; + u8 bus, dev, func; + + if (!upcall_port_info->upcall_port_active) + return U32_MAX; + + nbl_res_func_id_to_bdf(res_mgt, upcall_port_info->func_id, &bus, &dev, &func); + *bdf = (u16)PCI_DEVID(common->bus, PCI_DEVFN(dev, func)); + return 0; +} + +static void nbl_res_get_reg_dump(void *priv, u32 *data, u32 len) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->get_reg_dump(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), data, len); +} + +static int nbl_res_get_reg_dump_len(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_reg_dump_len(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static int nbl_res_process_abnormal_event(void *priv, struct nbl_abnormal_event_info *abnomal_info) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->process_abnormal_event(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), abnomal_info); +} + +static int nbl_res_cfg_lag_hash_algorithm(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->cfg_lag_hash_algorithm(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + eth_id, lag_id, hash_type); +} + +static int nbl_res_cfg_lag_member_fwd(void *priv, u16 eth_id, u16 lag_id, u8 fwd) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->cfg_lag_member_fwd(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, lag_id, fwd); +} + +static int nbl_res_cfg_lag_member_list(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->cfg_lag_member_list(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), param); +} + +static int nbl_res_cfg_lag_member_up_attr(void *priv, u16 eth_id, u16 lag_id, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->cfg_lag_member_up_attr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + eth_id, lag_id, enable); +} + +static int nbl_res_cfg_bond_shaping(void *priv, u8 eth_id, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->cfg_bond_shaping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, + res_mgt->resource_info->board_info.eth_speed, enable); +} + +static void nbl_res_cfg_bgid_back_pressure(void *priv, u8 main_eth_id, u8 other_eth_id, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->cfg_bgid_back_pressure(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), main_eth_id, other_eth_id, + enable, res_mgt->resource_info->board_info.eth_speed); +} + +static int nbl_res_switchdev_init_cmdq(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_mgt *chan_mgt = NBL_RES_MGT_TO_CHAN_PRIV(res_mgt); + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)); + + return chan_ops->init_cmdq(dev, chan_mgt); +} + +static int nbl_res_switchdev_deinit_cmdq(void *priv, u8 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_mgt *chan_mgt = NBL_RES_MGT_TO_CHAN_PRIV(res_mgt); + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)); + + return chan_ops->deinit_cmdq(dev, chan_mgt, index); +} + +static int nbl_res_set_tc_flow_info(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int i = 0; + + if (common->tc_inst_id >= NBL_TC_FLOW_INST_COUNT) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow set inst_id=%d is invalid.\n", + common->tc_inst_id); + return -EINVAL; + } + + if (!tc_flow_mgt->pf_set_tc_count) { + nbl_tc_set_flow_info(tc_flow_mgt, common->tc_inst_id); + nbl_info(common, NBL_DEBUG_FLOW, "tc flow set inst_id=%d success.\n", + common->tc_inst_id); + + nbl_info(common, NBL_DEBUG_FLOW, "tc flow set kgen cvlan zero, set ped vsi type zero\n"); + phy_ops->set_tc_kgen_cvlan_zero(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + for (i = 0; i < NBL_TPID_PORT_NUM; i++) + phy_ops->set_ped_tab_vsi_type(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), i, 0); + } + + tc_flow_mgt->pf_set_tc_count++; + phy_ops->ipro_chksum_err_ctrl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), 1); + nbl_info(common, NBL_DEBUG_FLOW, "tc flow set pf_set_tc_count++=%d\n", + tc_flow_mgt->pf_set_tc_count); + + return 0; +} + +static int nbl_res_unset_tc_flow_info(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int ret = 0; + int i = 0; + + if (common->tc_inst_id >= NBL_TC_FLOW_INST_COUNT) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow unset inst_id=%d is invalid.\n", + common->tc_inst_id); + return -EINVAL; + } + + tc_flow_mgt->pf_set_tc_count--; + nbl_info(common, NBL_DEBUG_FLOW, "tc flow set pf_set_tc_count--=%d\n", + tc_flow_mgt->pf_set_tc_count); + + if (!tc_flow_mgt->pf_set_tc_count) { + ret = nbl_tc_flow_flush_flow(res_mgt); + if (ret) + return -EINVAL; + + nbl_info(common, NBL_DEBUG_FLOW, "tc flow unset kgen cvlan, set ped vsi type zero\n"); + phy_ops->unset_tc_kgen_cvlan(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + for (i = 0; i < NBL_TPID_PORT_NUM; i++) { + if (tc_flow_mgt->port_tpid_type[i] != 0) { + phy_ops->set_ped_tab_vsi_type(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + i, 0); + tc_flow_mgt->port_tpid_type[i] = 0; + } + } + + nbl_tc_unset_flow_info(common->tc_inst_id); + nbl_info(common, NBL_DEBUG_FLOW, "tc flow unset inst_id=%d success.\n", + common->tc_inst_id); + + phy_ops->ipro_chksum_err_ctrl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), 0); + } + + return 0; +} + +static int nbl_res_get_tc_flow_info(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (common->tc_inst_id >= NBL_TC_FLOW_INST_COUNT) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow get inst_id=%d is invalid.\n", + common->tc_inst_id); + return -EINVAL; + } + + if (NBL_COMMON_TO_PCI_FUNC_ID(common)) + NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt) = nbl_tc_get_flow_info(common->tc_inst_id); + nbl_info(common, NBL_DEBUG_FLOW, "tc flow get inst_id=%d success.\n", + common->tc_inst_id); + + return 0; +} + +static int nbl_res_get_driver_info(void *priv, struct nbl_driver_info *driver_info) +{ + strscpy(driver_info->driver_version, NBL_LEONIS_DRIVER_VERSION, + sizeof(driver_info->driver_version)); + return 1; +} + +static int nbl_res_get_p4_info(void *priv, char *verify_code) +{ + /* We actually only care about the snic-v3r1 part, won't check m181xx */ + strscpy(verify_code, "snic_v3r1_m181xx", NBL_P4_NAME_LEN); + + return NBL_P4_DEFAULT; +} + +static int nbl_res_get_p4_used(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + + return resource_info->p4_used; +} + +static int nbl_res_set_p4_used(void *priv, int p4_type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + + resource_info->p4_used = p4_type; + + return 0; +} + +static u32 nbl_res_get_p4_version(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return res_mgt->resource_info->board_info.p4_version; +} + +static int nbl_res_load_p4(void *priv, struct nbl_load_p4_param *p4_param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (p4_param->start || p4_param->end) + return 0; + + phy_ops->load_p4(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), p4_param->addr, + p4_param->size, p4_param->data); + + return 0; +} + +static void nbl_res_get_board_info(void *priv, struct nbl_board_port_info *board_info) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + memcpy(board_info, &res_mgt->resource_info->board_info, sizeof(*board_info)); +} + +static u16 nbl_res_get_vf_base_vsi_id(void *priv, u16 pf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_pfvfid_to_vsi_id(res_mgt, pf_id, 0, NBL_VSI_DATA); +} + +static void nbl_res_flr_clear_net(void *priv, u16 vf_id) +{ + u16 func_id = vf_id + NBL_MAX_PF; + u16 vsi_id; + + vsi_id = nbl_res_func_id_to_vsi_id(priv, func_id, NBL_VSI_SERV_VF_DATA_TYPE); + nbl_res_unregister_rdma(priv, vsi_id); + + if (nbl_res_vf_is_active(priv, func_id)) + nbl_res_unregister_net(priv, func_id); +} + +static void nbl_res_flr_clear_rdma(void *priv, u16 vf_id) +{ + u16 func_id = vf_id + NBL_MAX_PF; + u16 vsi_id; + + vsi_id = nbl_res_func_id_to_vsi_id(priv, func_id, NBL_VSI_SERV_VF_DATA_TYPE); + nbl_res_unregister_rdma(priv, vsi_id); } -static int nbl_res_unregister_net(void *priv, u16 func_id) +static u16 nbl_res_covert_vfid_to_vsi_id(void *priv, u16 vf_id) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = vf_id + NBL_MAX_PF; - return nbl_res_update_active_vf_num(res_mgt, func_id, 0); + return nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_VF_DATA_TYPE); } -static u16 nbl_res_get_vsi_id(void *priv, u16 func_id, u16 type) +static bool nbl_res_check_vf_is_active(void *priv, u16 func_id) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - return nbl_res_func_id_to_vsi_id(res_mgt, func_id, type); + return nbl_res_vf_is_active(res_mgt, func_id); } -static void nbl_res_get_eth_id(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id) +static int nbl_res_check_vf_is_vdpa(void *priv, u16 func_id, u8 *is_vdpa) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); - u16 pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); - *eth_mode = eth_info->eth_num; - if (pf_id < eth_info->eth_num) - *eth_id = eth_info->eth_id[pf_id]; - /* if pf_id > eth_num, use eth_id 0 */ - else - *eth_id = eth_info->eth_id[0]; + *is_vdpa = test_bit(func_id, resource_info->vdpa.vdpa_func_bitmap); + return 0; } -static u8 __iomem *nbl_res_get_hw_addr(void *priv, size_t *size) +static int nbl_res_get_vdpa_vf_stats(void *priv, u16 func_id, struct nbl_vf_stats *vf_stats) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_vdpa_status *vdpa_vf_stats = NULL; + struct nbl_vf_stats vdpa_vf_stats_current = {0}, *init_stats; + u16 vsi_id; + + if (NBL_RES_MGT_TO_VDPA_VF_STATS(res_mgt) && + NBL_RES_MGT_TO_VDPA_VF_STATS(res_mgt)[func_id]) { + vdpa_vf_stats = NBL_RES_MGT_TO_VDPA_VF_STATS(res_mgt)[func_id]; + init_stats = &vdpa_vf_stats->init_stats; + } else { + dev_err(dev, "function %d vdpa_vf_stats is NULL\n", func_id); + return -EFAULT; + } - return phy_ops->get_hw_addr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), size); + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + phy_ops->get_dstat_vsi_stat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + &vdpa_vf_stats_current.tx_packets, + &vdpa_vf_stats_current.tx_bytes); + phy_ops->get_ustat_vsi_stat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + &vdpa_vf_stats_current.rx_packets, + &vdpa_vf_stats_current.rx_bytes); + + vf_stats->tx_packets = vdpa_vf_stats_current.tx_packets - init_stats->tx_packets; + vf_stats->tx_bytes = vdpa_vf_stats_current.tx_bytes - init_stats->tx_bytes; + vf_stats->rx_packets = vdpa_vf_stats_current.rx_packets - init_stats->rx_packets; + vf_stats->rx_bytes = vdpa_vf_stats_current.rx_bytes - init_stats->rx_bytes; + + return 0; } -static u64 nbl_res_get_real_hw_addr(void *priv, u16 vsi_id) +static int nbl_res_get_ustore_pkt_drop_stats(void *priv) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_ustore_stats *ustore_stats = NBL_RES_MGT_TO_USTORE_STATS(res_mgt); + struct nbl_ustore_stats ustore_stats_temp = {0}; + u8 eth_id = 0; + int i = 0; + + for (i = 0; i < eth_info->eth_num; i++) { + eth_id = eth_info->eth_id[i]; + phy_ops->get_ustore_pkt_drop_stats(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + eth_id, &ustore_stats_temp); + ustore_stats[eth_id].rx_drop_packets += ustore_stats_temp.rx_drop_packets; + ustore_stats[eth_id].rx_trun_packets += ustore_stats_temp.rx_trun_packets; + } - return nbl_res_get_func_bar_base_addr(res_mgt, func_id); + return 0; } -static u16 nbl_res_get_function_id(void *priv, u16 vsi_id) +static int nbl_res_get_ustore_total_pkt_drop_stats(void *priv, u8 eth_id, + struct nbl_ustore_stats *nbl_ustore_stats) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_ustore_stats *ustore_stats = NBL_RES_MGT_TO_USTORE_STATS(res_mgt); - return nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + nbl_ustore_stats->rx_drop_packets = ustore_stats[eth_id].rx_drop_packets; + nbl_ustore_stats->rx_trun_packets = ustore_stats[eth_id].rx_trun_packets; + return 0; } -static void nbl_res_get_real_bdf(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function) +static int nbl_res_get_board_id(void *priv) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); - nbl_res_func_id_to_bdf(res_mgt, func_id, bus, dev, function); + return NBL_COMMON_TO_BOARD_ID(common); } -static u32 nbl_res_check_active_vf(void *priv, u16 func_id) +static int nbl_res_cfg_eth_bond_info(void *priv, struct nbl_lag_member_list_param *param) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - struct nbl_sriov_info *sriov_info = res_mgt->resource_info->sriov_info; + struct nbl_eth_bond_info *eth_bond_info = NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt); struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); - int pfid = 0; - int vfid = 0; - int ret; + struct nbl_eth_bond_entry origin_entry; + struct nbl_eth_bond_entry *entry = NULL; + struct nbl_event_link_status_update_data *event_data = NULL; + u8 eth_btm[NBL_MAX_ETHERNET] = {0}; + int num = 0, i = 0, j = 0; - ret = nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); - if (ret) { - nbl_err(common, NBL_DEBUG_RESOURCE, "convert func id to pfvfid failed\n"); - return ret; + if (param->lag_id >= NBL_LAG_MAX_NUM) + return -EINVAL; + + entry = ð_bond_info->entry[param->lag_id]; + memcpy(&origin_entry, entry, sizeof(origin_entry)); + + /* We always clear it first, in case lag member changed. */ + memset(entry, 0, sizeof(*entry)); + + if (param->lag_num > 1) { + for (i = 0; i < param->lag_num && NBL_ETH_BOND_VALID_PORT(i); i++) { + entry->eth_id[i] = param->member_list[i].eth_id; + eth_btm[param->member_list[i].eth_id] = 1; + } + + entry->lag_id = param->lag_id; + entry->lag_num = param->lag_num; } - return sriov_info[pfid].active_vf_num; + /* If lag member changed, notify both original and new related vfs to update link_state */ + for (i = 0; i < origin_entry.lag_num && NBL_ETH_BOND_VALID_PORT(i); i++) + eth_btm[origin_entry.eth_id[i]] = 1; + + for (i = 0; i < NBL_MAX_ETHERNET; i++) + if (eth_btm[i]) + num++; + + nbl_res_update_offload_status((struct nbl_resource_mgt_leonis *)res_mgt); + + event_data = kzalloc(sizeof(*event_data), GFP_KERNEL); + if (!event_data) + return -ENOMEM; + + for (i = 0; i < NBL_MAX_ETHERNET; i++) + if (eth_btm[i]) + event_data->eth_id[j++] = i; + + event_data->num = num; + + nbl_event_notify(NBL_EVENT_LINK_STATE_UPDATE, event_data, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + kfree(event_data); + return 0; } -static void nbl_res_get_base_mac_addr(void *priv, u8 *mac) +static int nbl_res_get_eth_bond_info(void *priv, struct nbl_bond_param *param) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_eth_bond_info *eth_bond_info = NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt); + struct nbl_eth_bond_entry *entry = NULL; + int num = 0, i = 0, j = 0, pf_id = 0; - nbl_res_get_eth_mac(res_mgt, mac, nbl_res_pf_to_eth_id(res_mgt, 0)); + for (i = 0; i < NBL_LAG_MAX_NUM; i++) { + entry = ð_bond_info->entry[i]; + + if (entry->lag_num < NBL_LAG_VALID_PORTS || entry->lag_num > NBL_LAG_MAX_PORTS) + continue; + + for (j = 0; j < entry->lag_num; j++) { + pf_id = nbl_res_eth_id_to_pf_id(res_mgt, entry->eth_id[j]); + + param->info[num].port[j].eth_id = entry->eth_id[j]; + param->info[num].port[j].vsi_id = + nbl_res_pfvfid_to_vsi_id(res_mgt, pf_id, -1, NBL_VSI_DATA); + param->info[num].port[j].is_active = + phy_ops->get_lag_fwd(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + entry->eth_id[j]); + } + + param->info[num].mem_num = entry->lag_num; + param->info[num].lag_id = entry->lag_id; + + num++; + } + + param->lag_num = num; + + return 0; } -static u32 nbl_res_get_chip_temperature(void *priv) +static void nbl_res_get_driver_version(void *priv, char *ver, int len) { - struct nbl_resource_mgt_leonis *res_mgt_leonis = - (struct nbl_resource_mgt_leonis *)priv; - struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; - struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - - return phy_ops->get_chip_temperature(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + strscpy(ver, NBL_LEONIS_DRIVER_VERSION, len); } -static u32 nbl_res_get_chip_temperature_max(void *priv) +static void nbl_res_get_xdp_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) { - return NBL_LEONIS_TEMP_MAX; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_net_ring_num_info *num_info = &res_info->net_ring_num_info; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + u16 default_queue; + if (num_info->net_max_qp_num[func_id] != 0) + default_queue = num_info->net_max_qp_num[func_id]; + else + default_queue = num_info->pf_def_max_net_qp_num; + + *queue_num = min_t(u16, default_queue, NBL_VSI_PF_LEGACY_QUEUE_NUM_MAX - default_queue); + + if (*queue_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) { + nbl_warn(NBL_RES_MGT_TO_COMMON(res_mgt), NBL_DEBUG_QUEUE, + "Invalid xdp queue num %d for func %d, use default", *queue_num, func_id); + *queue_num = NBL_DEFAULT_PF_HW_QUEUE_NUM; + } } -static u32 nbl_res_get_chip_temperature_crit(void *priv) +static int nbl_res_get_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon) { - return NBL_LEONIS_TEMP_CRIT; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->get_pfc_buffer_size(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, prio, xoff, xon); + + return 0; } -static void nbl_res_get_reg_dump(void *priv, u32 *data, u32 len) +static int nbl_res_set_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int xoff, int xon) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - phy_ops->get_reg_dump(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), data, len); + return phy_ops->set_pfc_buffer_size(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, + prio, xoff, xon); } -static int nbl_res_get_reg_dump_len(void *priv) +static int nbl_res_configure_qos(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - return phy_ops->get_reg_dump_len(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + phy_ops->configure_qos(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, pfc, trust, dscp2prio_map); + + return 0; } -static int nbl_res_process_abnormal_event(void *priv, struct nbl_abnormal_event_info *abnomal_info) +static int nbl_res_configure_rdma_bw(void *priv, u8 eth_id, int rdma_bw) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - return phy_ops->process_abnormal_event(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), abnomal_info); + phy_ops->configure_rdma_bw(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, rdma_bw); + + return 0; } -static int nbl_res_get_driver_info(void *priv, struct nbl_driver_info *driver_info) +static int nbl_res_set_rate_limit(void *priv, u16 func_id, enum nbl_traffic_type type, u32 rate) { - strscpy(driver_info->driver_version, NBL_LEONIS_DRIVER_VERSION, - sizeof(driver_info->driver_version)); - return 1; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->set_rate_limit(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, type, rate); + + return 0; } -static int nbl_res_get_p4_info(void *priv, char *verify_code) +static u32 nbl_res_get_perf_dump_length(void *priv) { - /* We actually only care about the snic-v3r1 part, won't check m181xx */ - strscpy(verify_code, "snic_v3r1_m181xx", NBL_P4_NAME_LEN); + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - return NBL_P4_DEFAULT; + return phy_ops->get_perf_dump_length(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); } -static int nbl_res_get_p4_used(void *priv) +static u32 nbl_res_get_perf_dump_data(void *priv, u8 *buffer, u32 length) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - return resource_info->p4_used; + return phy_ops->get_perf_dump_data(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), buffer, length); } -static int nbl_res_set_p4_used(void *priv, int p4_type) +static void nbl_res_register_dev_name(void *priv, u16 vsi_id, char *name) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u32 pf_id; - resource_info->p4_used = p4_type; - - return 0; + pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + WARN_ON(pf_id >= NBL_MAX_PF); + strscpy(resource_info->pf_name_list[pf_id], name, IFNAMSIZ); + nbl_info(NBL_RES_MGT_TO_COMMON(res_mgt), NBL_DEBUG_RESOURCE, + "vsi:%u-pf:%u register a pf_name->%s", vsi_id, pf_id, name); } -static void nbl_res_get_board_info(void *priv, struct nbl_board_port_info *board_info) +static void nbl_res_get_dev_name(void *priv, u16 vsi_id, char *name) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - - memcpy(board_info, &res_mgt->resource_info->board_info, sizeof(*board_info)); + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + int pf_id, vf_id; + u16 func_id; + int name_len; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pf_id, &vf_id); + WARN_ON(pf_id >= NBL_MAX_PF); + name_len = snprintf(name, IFNAMSIZ, "%sv%d", resource_info->pf_name_list[pf_id], vf_id); + if (name_len >= IFNAMSIZ) + nbl_err(NBL_RES_MGT_TO_COMMON(res_mgt), NBL_DEBUG_RESOURCE, + "vsi:%u-pf%uvf%u get name over length", vsi_id, pf_id, vf_id); + + nbl_debug(NBL_RES_MGT_TO_COMMON(res_mgt), NBL_DEBUG_RESOURCE, + "vsi:%u-pf%uvf%u get a pf_name->%s", vsi_id, pf_id, vf_id, name); } -static u16 nbl_res_get_vf_base_vsi_id(void *priv, u16 pf_id) +static int nbl_res_get_mirror_table_id(void *priv, u16 vsi_id, int dir, bool mirror_en, + u8 *mt_id) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - return nbl_res_pfvfid_to_vsi_id(res_mgt, pf_id, 0, NBL_VSI_DATA); + return phy_ops->get_mirror_table_id(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vsi_id, dir, mirror_en, mt_id); } -static void nbl_res_flr_clear_net(void *priv, u16 vf_id) +static int nbl_res_configure_mirror(void *priv, u16 func_id, bool mirror_en, int dir, + u8 mt_id) { - u16 func_id = vf_id + NBL_MAX_PF; + u16 data_vsi, user_vsi; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - if (nbl_res_vf_is_active(priv, func_id)) - nbl_res_unregister_net(priv, func_id); + data_vsi = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_PF_DATA_TYPE); + user_vsi = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_PF_USER_TYPE); + + phy_ops->configure_mirror(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), data_vsi, mirror_en, dir, + mt_id); + phy_ops->configure_mirror(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), user_vsi, mirror_en, dir, + mt_id); + + return 0; } -static int nbl_res_get_board_id(void *priv) +static int nbl_res_clear_mirror_cfg(void *priv, u16 func_id) { + u16 data_vsi, user_vsi; struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - return NBL_COMMON_TO_BOARD_ID(common); + data_vsi = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_PF_DATA_TYPE); + user_vsi = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_PF_USER_TYPE); + + phy_ops->clear_mirror_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), data_vsi); + phy_ops->clear_mirror_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), user_vsi); + + return 0; } static struct nbl_resource_ops res_ops = { @@ -482,30 +1933,121 @@ static struct nbl_resource_ops res_ops = { .get_base_mac_addr = nbl_res_get_base_mac_addr, .get_vsi_id = nbl_res_get_vsi_id, .get_eth_id = nbl_res_get_eth_id, + .get_rep_feature = nbl_res_get_rep_feature, + .get_rep_queue_info = nbl_res_get_rep_queue_info, .get_user_queue_info = nbl_res_get_user_queue_info, + .set_eswitch_mode = nbl_res_set_eswitch_mode, + .get_eswitch_mode = nbl_res_get_eswitch_mode, + .alloc_rep_data = nbl_res_alloc_rep_data, + .free_rep_data = nbl_res_free_rep_data, + .set_rep_netdev_info = nbl_res_set_rep_netdev_info, + .unset_rep_netdev_info = nbl_res_unset_rep_netdev_info, + .get_rep_netdev_info = nbl_res_get_rep_netdev_info, + .disable_phy_flow = nbl_res_disable_phy_flow, + .enable_phy_flow = nbl_res_enable_phy_flow, + .init_acl = nbl_res_init_acl, + .uninit_acl = nbl_res_uninit_acl, + .set_upcall_rule = nbl_res_set_upcall_rule, + .unset_upcall_rule = nbl_res_unset_upcall_rule, + .set_shaping_dport_vld = nbl_res_set_shaping_dport_vld, + .set_dport_fc_th_vld = nbl_res_set_dport_fc_th_vld, + .get_rep_stats = nbl_res_get_rep_stats, + .get_rep_index = nbl_res_get_rep_index, + .setup_rdma_id = nbl_res_setup_rdma_id, + .remove_rdma_id = nbl_res_remove_rdma_id, + .register_rdma = nbl_res_register_rdma, + .unregister_rdma = nbl_res_unregister_rdma, + .register_rdma_bond = nbl_res_register_rdma_bond, + .unregister_rdma_bond = nbl_res_unregister_rdma_bond, .get_hw_addr = nbl_res_get_hw_addr, .get_real_hw_addr = nbl_res_get_real_hw_addr, .get_function_id = nbl_res_get_function_id, .get_real_bdf = nbl_res_get_real_bdf, .get_product_flex_cap = nbl_res_get_flex_capability, .get_product_fix_cap = nbl_res_get_fix_capability, + .register_net_rep = nbl_res_register_net_rep, + .unregister_net_rep = nbl_res_unregister_net_rep, + .register_eth_rep = nbl_res_register_eth_rep, + .unregister_eth_rep = nbl_res_unregister_eth_rep, + .register_upcall_port = nbl_res_register_upcall_port, + .unregister_upcall_port = nbl_res_unregister_upcall_port, + .check_offload_status = nbl_res_check_offload_status, + .set_offload_status = nbl_res_set_offload_status, + .init_offload_fwd = nbl_res_init_offload_fwd, + .init_cmdq = nbl_res_init_cmdq, + .destroy_cmdq = nbl_res_destroy_cmdq, + .reset_cmdq = nbl_res_reset_cmdq, + .init_rep = nbl_res_init_rep, + .init_flow = nbl_res_init_flow, + .deinit_flow = nbl_res_deinit_flow, + .offload_flow_rule = nbl_res_offload_flow_rule, + .get_flow_acl_switch = nbl_res_get_flow_acl_switch, + .get_line_rate_info = nbl_res_get_line_rate_info, .get_chip_temperature = nbl_res_get_chip_temperature, - .get_chip_temperature_max = nbl_res_get_chip_temperature_max, - .get_chip_temperature_crit = nbl_res_get_chip_temperature_crit, .get_driver_info = nbl_res_get_driver_info, .get_board_info = nbl_res_get_board_info, .flr_clear_net = nbl_res_flr_clear_net, + .flr_clear_rdma = nbl_res_flr_clear_rdma, + .covert_vfid_to_vsi_id = nbl_res_covert_vfid_to_vsi_id, + .check_vf_is_active = nbl_res_check_vf_is_active, + .check_vf_is_vdpa = nbl_res_check_vf_is_vdpa, + .get_vdpa_vf_stats = nbl_res_get_vdpa_vf_stats, + .get_ustore_pkt_drop_stats = nbl_res_get_ustore_pkt_drop_stats, + .get_ustore_total_pkt_drop_stats = nbl_res_get_ustore_total_pkt_drop_stats, + + .init_vdpaq = nbl_res_init_vdpaq, + .destroy_vdpaq = nbl_res_destroy_vdpaq, + .get_upcall_port = nbl_res_get_upcall_port, .get_reg_dump = nbl_res_get_reg_dump, .get_reg_dump_len = nbl_res_get_reg_dump_len, .process_abnormal_event = nbl_res_process_abnormal_event, + .cfg_lag_hash_algorithm = nbl_res_cfg_lag_hash_algorithm, + .cfg_lag_member_fwd = nbl_res_cfg_lag_member_fwd, + .cfg_lag_member_list = nbl_res_cfg_lag_member_list, + .cfg_lag_member_up_attr = nbl_res_cfg_lag_member_up_attr, + .cfg_bond_shaping = nbl_res_cfg_bond_shaping, + .cfg_bgid_back_pressure = nbl_res_cfg_bgid_back_pressure, + + .cfg_eth_bond_info = nbl_res_cfg_eth_bond_info, + .get_eth_bond_info = nbl_res_get_eth_bond_info, + + .switchdev_init_cmdq = nbl_res_switchdev_init_cmdq, + .switchdev_deinit_cmdq = nbl_res_switchdev_deinit_cmdq, + .set_tc_flow_info = nbl_res_set_tc_flow_info, + .unset_tc_flow_info = nbl_res_unset_tc_flow_info, + .get_tc_flow_info = nbl_res_get_tc_flow_info, + .get_p4_info = nbl_res_get_p4_info, .get_p4_used = nbl_res_get_p4_used, .set_p4_used = nbl_res_set_p4_used, .get_vf_base_vsi_id = nbl_res_get_vf_base_vsi_id, + .load_p4 = nbl_res_load_p4, + .get_p4_version = nbl_res_get_p4_version, .get_board_id = nbl_res_get_board_id, + .set_pmd_debug = nbl_res_set_pmd_debug, + + .get_driver_version = nbl_res_get_driver_version, + .get_xdp_queue_info = nbl_res_get_xdp_queue_info, + .set_hw_status = nbl_res_set_hw_status, + + .configure_qos = nbl_res_configure_qos, + .configure_rdma_bw = nbl_res_configure_rdma_bw, + .set_pfc_buffer_size = nbl_res_set_pfc_buffer_size, + .get_pfc_buffer_size = nbl_res_get_pfc_buffer_size, + .set_rate_limit = nbl_res_set_rate_limit, + + .get_perf_dump_length = nbl_res_get_perf_dump_length, + .get_perf_dump_data = nbl_res_get_perf_dump_data, + + .register_dev_name = nbl_res_register_dev_name, + .get_dev_name = nbl_res_get_dev_name, + + .get_mirror_table_id = nbl_res_get_mirror_table_id, + .configure_mirror = nbl_res_configure_mirror, + .clear_mirror_cfg = nbl_res_clear_mirror_cfg, }; static struct nbl_res_product_ops product_ops = { @@ -567,6 +2109,10 @@ static int nbl_res_setup_ops(struct device *dev, struct nbl_resource_ops_tbl **r if (ret) goto setup_fail; + ret = nbl_tc_flow_setup_ops_leonis(&res_ops); + if (ret) + goto setup_fail; + ret = nbl_queue_setup_ops_leonis(&res_ops); if (ret) goto setup_fail; @@ -587,6 +2133,14 @@ static int nbl_res_setup_ops(struct device *dev, struct nbl_resource_ops_tbl **r if (ret) goto setup_fail; + ret = nbl_accel_setup_ops(&res_ops); + if (ret) + goto setup_fail; + + ret = nbl_fd_setup_ops(&res_ops); + if (ret) + goto setup_fail; + is_ops_inited = true; } @@ -600,10 +2154,36 @@ static int nbl_res_setup_ops(struct device *dev, struct nbl_resource_ops_tbl **r return -EAGAIN; } +static int nbl_res_dev_setup_eswitch_info(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_eswitch_info *eswitch_info; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + eswitch_info = devm_kzalloc(dev, sizeof(struct nbl_eswitch_info), GFP_KERNEL); + if (!eswitch_info) + return -ENOMEM; + NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt) = eswitch_info; + + return 0; +} + +static void nbl_res_pf_dev_remove_eswitch_info(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_eswitch_info **eswitch_info = &NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + + if (!(*eswitch_info)) + return; + devm_kfree(dev, *eswitch_info); + *eswitch_info = NULL; +} + static int nbl_res_ctrl_dev_setup_eth_info(struct nbl_resource_mgt *res_mgt) { struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); struct nbl_eth_info *eth_info; + struct nbl_eth_bond_info *eth_bond_info; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); u32 eth_num = 0; u32 eth_bitmap, eth_id; @@ -615,6 +2195,12 @@ static int nbl_res_ctrl_dev_setup_eth_info(struct nbl_resource_mgt *res_mgt) NBL_RES_MGT_TO_ETH_INFO(res_mgt) = eth_info; + eth_bond_info = devm_kzalloc(dev, sizeof(struct nbl_eth_bond_info), GFP_KERNEL); + if (!eth_bond_info) + return -ENOMEM; + + NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt) = eth_bond_info; + eth_info->eth_num = (u8)phy_ops->get_fw_eth_num(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); eth_bitmap = phy_ops->get_fw_eth_map(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); /* for 2 eth port board, the eth_id is 0, 2 */ @@ -641,13 +2227,31 @@ static int nbl_res_ctrl_dev_setup_eth_info(struct nbl_resource_mgt *res_mgt) return 0; } +static void nbl_res_ctrl_dev_remove_eth_info(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_eth_info **eth_info = &NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_eth_bond_info **eth_bond_info = &NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt); + + if (*eth_bond_info) { + devm_kfree(dev, *eth_bond_info); + *eth_bond_info = NULL; + } + + if (*eth_info) { + devm_kfree(dev, *eth_info); + *eth_info = NULL; + } +} + static int nbl_res_ctrl_dev_sriov_info_init(struct nbl_resource_mgt *res_mgt) { + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); struct device *dev = NBL_COMMON_TO_DEV(common); struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); struct nbl_sriov_info *sriov_info; - u32 vf_fid, vf_startid, vf_endid; + u32 vf_fid, vf_startid, vf_endid = NBL_MAX_VF; u16 func_id; u16 function; @@ -662,16 +2266,18 @@ static int nbl_res_ctrl_dev_sriov_info_init(struct nbl_resource_mgt *res_mgt) sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; function = NBL_COMMON_TO_PCI_FUNC_ID(common) + func_id; - sriov_info->bdf = PCI_DEVID(common->bus, + common->hw_bus = (u8)phy_ops->get_real_bus(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + sriov_info->bdf = PCI_DEVID(common->hw_bus, PCI_DEVFN(common->devid, function)); - vf_fid = phy_ops->get_host_pf_fid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), - func_id); + vf_fid = phy_ops->get_host_pf_fid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id); vf_startid = vf_fid & 0xFFFF; vf_endid = (vf_fid >> 16) & 0xFFFF; sriov_info->start_vf_func_id = vf_startid + NBL_MAX_PF_LEONIS; sriov_info->num_vfs = vf_endid - vf_startid; } + res_info->max_vf_num = vf_endid; + return 0; } @@ -687,6 +2293,20 @@ static void nbl_res_ctrl_dev_sriov_info_remove(struct nbl_resource_mgt *res_mgt) *sriov_info = NULL; } +static void nbl_res_ctrl_dev_vdpa_vf_stats_remove(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_vdpa_status **vf_status = NBL_RES_MGT_TO_VDPA_VF_STATS(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + int i = 0; + + for (i = 0; i < NBL_MAX_FUNC; i++) { + if (vf_status[i]) { + devm_kfree(dev, vf_status[i]); + vf_status[i] = NULL; + } + } +} + static int nbl_res_ctrl_dev_vsi_info_init(struct nbl_resource_mgt *res_mgt) { struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); @@ -703,22 +2323,24 @@ static int nbl_res_ctrl_dev_vsi_info_init(struct nbl_resource_mgt *res_mgt) NBL_RES_MGT_TO_VSI_INFO(res_mgt) = vsi_info; /** - * 1 two port(2pf) + * case 1 two port(2pf) * pf0,pf1(NBL_VSI_SERV_PF_DATA_TYPE) vsi is 0,512 * pf0,pf1(NBL_VSI_SERV_PF_CTLR_TYPE) vsi is 1,513 * pf0,pf1(NBL_VSI_SERV_PF_USER_TYPE) vsi is 2,514 - * pf0.vf0-pf0.vf255(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 3-258 - * pf1.vf0-pf1.vf255(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 515-770 - * pf2-pf7(NBL_VSI_SERV_PF_EXTRA_TYPE) vsi 259-264(if exist) - * 2 four port(4pf) + * pf0,pf1(NBL_VSI_SERV_PF_XDP_TYPE) vsi is 3,515 + * pf0.vf0-pf0.vf255(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 4-259 + * pf1.vf0-pf1.vf255(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 516-771 + * pf2-pf7(NBL_VSI_SERV_PF_EXTRA_TYPE) vsi 260-265(if exist) + * case 2 four port(4pf) * pf0,pf1,pf2,pf3(NBL_VSI_SERV_PF_DATA_TYPE) vsi is 0,256,512,768 * pf0,pf1,pf2,pf3(NBL_VSI_SERV_PF_CTLR_TYPE) vsi is 1,257,513,769 * pf0,pf1,pf2,pf3(NBL_VSI_SERV_PF_USER_TYPE) vsi is 2,258,514,770 - * pf0.vf0-pf0.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 3-130 - * pf1.vf0-pf1.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 259-386 - * pf2.vf0-pf2.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 515-642 - * pf3.vf0-pf3.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 771-898 - * pf4-pf7(NBL_VSI_SERV_PF_EXTRA_TYPE) vsi 387-390(if exist) + * pf0,pf1,pf2,pf3(NBL_VSI_SERV_PF_XDP_TYPE) vsi is 3,259,515,771 + * pf0.vf0-pf0.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 4-131 + * pf1.vf0-pf1.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 260-387 + * pf2.vf0-pf2.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 516-643 + * pf3.vf0-pf3.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 772-899 + * pf4-pf7(NBL_VSI_SERV_PF_EXTRA_TYPE) vsi 132-135(if exist) */ vsi_info->num = eth_info->eth_num; @@ -734,9 +2356,13 @@ static int nbl_res_ctrl_dev_vsi_info_init(struct nbl_resource_mgt *res_mgt) vsi_info->serv_info[i][NBL_VSI_SERV_PF_CTLR_TYPE].base_id + vsi_info->serv_info[i][NBL_VSI_SERV_PF_CTLR_TYPE].num; vsi_info->serv_info[i][NBL_VSI_SERV_PF_USER_TYPE].num = 1; - vsi_info->serv_info[i][NBL_VSI_SERV_VF_DATA_TYPE].base_id = + vsi_info->serv_info[i][NBL_VSI_SERV_PF_XDP_TYPE].base_id = vsi_info->serv_info[i][NBL_VSI_SERV_PF_USER_TYPE].base_id + vsi_info->serv_info[i][NBL_VSI_SERV_PF_USER_TYPE].num; + vsi_info->serv_info[i][NBL_VSI_SERV_PF_XDP_TYPE].num = 1; + vsi_info->serv_info[i][NBL_VSI_SERV_VF_DATA_TYPE].base_id = + vsi_info->serv_info[i][NBL_VSI_SERV_PF_XDP_TYPE].base_id + + vsi_info->serv_info[i][NBL_VSI_SERV_PF_XDP_TYPE].num; sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + i; vsi_info->serv_info[i][NBL_VSI_SERV_VF_DATA_TYPE].num = sriov_info->num_vfs; } @@ -774,38 +2400,49 @@ static int nbl_res_ring_num_info_init(struct nbl_resource_mgt *res_mgt) return 0; } +static int nbl_res_ctrl_dev_ustore_stats_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_ustore_stats *ustore_stats; + + ustore_stats = devm_kcalloc(dev, NBL_MAX_ETHERNET, + sizeof(struct nbl_ustore_stats), GFP_KERNEL); + if (!ustore_stats) + return -ENOMEM; + + NBL_RES_MGT_TO_USTORE_STATS(res_mgt) = ustore_stats; + + return 0; +} + +static void nbl_res_ctrl_dev_ustore_stats_remove(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_ustore_stats **ustore_stats = &NBL_RES_MGT_TO_USTORE_STATS(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + + if (!(*ustore_stats)) + return; + + devm_kfree(dev, *ustore_stats); + *ustore_stats = NULL; +} + static int nbl_res_check_fw_working(struct nbl_resource_mgt *res_mgt) { struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - unsigned long fw_pong_current = 0; + unsigned long fw_pong_current; unsigned long seconds_current = 0; - unsigned long sleep_us = USEC_PER_MSEC; - u64 timeout_us = 100 * USEC_PER_MSEC; - ktime_t timeout; seconds_current = (unsigned long)ktime_get_real_seconds(); phy_ops->set_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), seconds_current - 1); phy_ops->set_fw_ping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), seconds_current); - timeout = ktime_add_us(ktime_get(), timeout_us); - might_sleep_if((sleep_us) != 0); - - for (;;) { - fw_pong_current = phy_ops->get_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); - if (fw_pong_current == seconds_current) - break; - if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { - fw_pong_current = phy_ops->get_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); - break; - } - if (sleep_us) - usleep_range((sleep_us >> 2) + 1, sleep_us); - } - - if (fw_pong_current == seconds_current) - return 0; - else - return -ETIMEDOUT; + /* Wait for FW to ack the first heartbeat seq */ + return nbl_read_poll_timeout(phy_ops->get_fw_pong, fw_pong_current, + fw_pong_current == seconds_current, + USEC_PER_MSEC, 500 * USEC_PER_MSEC, + false, NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); } static int nbl_res_init_pf_num(struct nbl_resource_mgt *res_mgt) @@ -842,15 +2479,28 @@ static void nbl_res_init_board_info(struct nbl_resource_mgt *res_mgt) static void nbl_res_stop(struct nbl_resource_mgt_leonis *res_mgt_leonis) { struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + nbl_fd_mgt_stop(res_mgt); nbl_queue_mgt_stop(res_mgt); nbl_txrx_mgt_stop(res_mgt); nbl_intr_mgt_stop(res_mgt); nbl_adminq_mgt_stop(res_mgt); nbl_vsi_mgt_stop(res_mgt); + nbl_accel_mgt_stop(res_mgt); nbl_flow_mgt_stop_leonis(res_mgt); + nbl_res_ctrl_dev_ustore_stats_remove(res_mgt); + nbl_res_ctrl_dev_vdpa_vf_stats_remove(res_mgt); nbl_res_ctrl_dev_remove_vsi_info(res_mgt); + nbl_res_ctrl_dev_remove_eth_info(res_mgt); nbl_res_ctrl_dev_sriov_info_remove(res_mgt); + nbl_res_pf_dev_remove_eswitch_info(res_mgt); + + /*only pf0 need tc_flow_mgt_stop*/ + if (!common->is_vf && !NBL_COMMON_TO_PCI_FUNC_ID(common)) { + nbl_info(common, NBL_DEBUG_FLOW, "tc flow stop tc flow mgt"); + nbl_tc_flow_mgt_stop_leonis(res_mgt); + } } static int nbl_res_start(struct nbl_resource_mgt_leonis *res_mgt_leonis, @@ -858,6 +2508,9 @@ static int nbl_res_start(struct nbl_resource_mgt_leonis *res_mgt_leonis, { struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_upcall_port_info *upcall_port_info = + &res_mgt_leonis->pmd_status.upcall_port_info; + u32 quirks; int ret = 0; if (caps.has_ctrl) { @@ -893,10 +2546,18 @@ static int nbl_res_start(struct nbl_resource_mgt_leonis *res_mgt_leonis, if (ret) goto start_fail; + ret = nbl_res_ctrl_dev_ustore_stats_init(res_mgt); + if (ret) + goto start_fail; + ret = nbl_flow_mgt_start_leonis(res_mgt); if (ret) goto start_fail; + ret = nbl_tc_flow_mgt_start_leonis(res_mgt); + if (ret) + goto start_fail; + ret = nbl_queue_mgt_start(res_mgt); if (ret) goto start_fail; @@ -913,27 +2574,63 @@ static int nbl_res_start(struct nbl_resource_mgt_leonis *res_mgt_leonis, if (ret) goto start_fail; - nbl_res_set_flex_capability(res_mgt, NBL_SECURITY_ACCEL_CAP); - nbl_res_set_fix_capability(res_mgt, NBL_DUMP_FLOW_CAP); + ret = nbl_accel_mgt_start(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_fd_mgt_start(res_mgt); + if (ret) + goto start_fail; + + nbl_res_set_flex_capability(res_mgt, NBL_DUMP_FLOW_CAP); + nbl_res_set_flex_capability(res_mgt, NBL_DUMP_FD_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_OFFLOAD_NETWORK_CAP); nbl_res_set_fix_capability(res_mgt, NBL_TASK_FW_HB_CAP); nbl_res_set_fix_capability(res_mgt, NBL_TASK_FW_RESET_CAP); nbl_res_set_fix_capability(res_mgt, NBL_TASK_CLEAN_ADMINDQ_CAP); nbl_res_set_fix_capability(res_mgt, NBL_RESTOOL_CAP); nbl_res_set_fix_capability(res_mgt, NBL_TASK_ADAPT_DESC_GOTHER); nbl_res_set_fix_capability(res_mgt, NBL_PROCESS_FLR_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_RESET_CTRL_CAP); + /* leonis af need a pmd_debug for dpdk gdb debug */ + nbl_res_set_fix_capability(res_mgt, NBL_PMD_DEBUG); + nbl_res_set_fix_capability(res_mgt, NBL_HIGH_THROUGHPUT_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_HEALTH_REPORT_TEMP_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_HEALTH_REPORT_REBOOT_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_DVN_DESC_REQ_SYSFS_CAP); + nbl_res_set_flex_capability(res_mgt, NBL_SECURITY_ACCEL_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_IPSEC_AGE_CAP); + upcall_port_info->upcall_port_active = false; } if (caps.has_net) { ret = nbl_txrx_mgt_start(res_mgt); if (ret) goto start_fail; + + if (!caps.is_vf) { + ret = nbl_res_dev_setup_eswitch_info(res_mgt); + if (ret) + goto start_fail; + } } nbl_res_set_fix_capability(res_mgt, NBL_HWMON_TEMP_CAP); nbl_res_set_fix_capability(res_mgt, NBL_TASK_CLEAN_MAILBOX_CAP); nbl_res_set_fix_capability(res_mgt, NBL_ITR_DYNAMIC); nbl_res_set_fix_capability(res_mgt, NBL_P4_CAP); - nbl_res_set_fix_capability(res_mgt, NBL_TASK_KEEP_ALIVE); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_RESET_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_QOS_SYSFS_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_MIRROR_SYSFS_CAP); + + nbl_res_set_fix_capability(res_mgt, NBL_XDP_CAP); + + quirks = nbl_res_get_quirks(res_mgt); + if (quirks & BIT(NBL_QUIRKS_NO_TOE)) { + nbl_res_set_fix_capability(res_mgt, NBL_TASK_KEEP_ALIVE); + if (caps.has_ctrl) + nbl_res_set_fix_capability(res_mgt, NBL_RECOVERY_ABNORMAL_STATUS); + } return 0; diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.h index ba1320dcb972f605a99a4ef97668e31358e3857d..24e2b67ec54eceac2cd1d8c73e4688987cd2a897 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.h @@ -11,7 +11,7 @@ #define NBL_MAX_PF_LEONIS 8 /* product NO(ASIC SNIC as 3)-V NO.R NO.B NO.SP NO */ -#define NBL_LEONIS_DRIVER_VERSION "3-3.1.120" +#define NBL_LEONIS_DRIVER_VERSION "3-3.1.512.2" int nbl_flow_mgt_start_leonis(struct nbl_resource_mgt *res_mgt); void nbl_flow_mgt_stop_leonis(struct nbl_resource_mgt *res_mgt); @@ -19,6 +19,11 @@ int nbl_flow_setup_ops_leonis(struct nbl_resource_ops *resource_ops); void nbl_flow_remove_ops_leonis(struct nbl_resource_ops *resource_ops); int nbl_queue_setup_ops_leonis(struct nbl_resource_ops *resource_ops); void nbl_queue_remove_ops_leonis(struct nbl_resource_ops *resource_ops); +int nbl_tc_flow_mgt_start_leonis(struct nbl_resource_mgt *res_mgt); +void nbl_tc_flow_mgt_stop_leonis(struct nbl_resource_mgt *res_mgt); +int nbl_tc_flow_setup_ops_leonis(struct nbl_resource_ops *resource_ops); +void nbl_tc_flow_remove_ops_leonis(struct nbl_resource_ops *resource_ops); +int nbl_tc_flow_flush_flow(struct nbl_resource_mgt *res_mgt); void nbl_queue_mgt_init_leonis(struct nbl_queue_mgt *queue_mgt); int nbl_res_queue_setup_qid_map_table_leonis(struct nbl_resource_mgt *res_mgt, u16 func_id, diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.c new file mode 100644 index 0000000000000000000000000000000000000000..38f8a4fece01505f34912e8b4516009a818a372f --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.c @@ -0,0 +1,2142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_tc_flow_filter_leonis.h" +#include "nbl_p4_actions.h" +#include "nbl_tc_tun_leonis.h" +#include "nbl_tc_flow_leonis.h" +#include "nbl_tc_pedit.h" + +#define NBL_ACT_OFT 16 +#define NBL_GET_ACT_INFO(data, idx) (*(u16 *)&(data) + ((idx) << NBL_ACT_OFT)) + +static const struct nbl_cmd_hdr g_cmd_hdr[] = { + [NBL_FEM_KTAT_WRITE] = { NBL_BLOCK_PPE, NBL_MODULE_FEM, + NBL_TABLE_FEM_KTAT, NBL_CMD_OP_WRITE }, + [NBL_FEM_KTAT_READ] = { NBL_BLOCK_PPE, NBL_MODULE_FEM, + NBL_TABLE_FEM_KTAT, NBL_CMD_OP_READ }, + [NBL_FEM_KTAT_SEARCH] = { NBL_BLOCK_PPE, NBL_MODULE_FEM, + NBL_TABLE_FEM_KTAT, NBL_CMD_OP_SEARCH }, + [NBL_FEM_HT_WRITE] = { NBL_BLOCK_PPE, NBL_MODULE_FEM, NBL_TABLE_FEM_HT, + NBL_CMD_OP_WRITE }, + [NBL_FEM_HT_READ] = { NBL_BLOCK_PPE, NBL_MODULE_FEM, NBL_TABLE_FEM_HT, + NBL_CMD_OP_READ }, +}; + +static int nbl_set_tcam_process(struct nbl_common_info *common, + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng, + struct nbl_tcam_item *tcam_item, + struct nbl_flow_tcam_ad_item *ad_item, + u16 *index, bool *is_new) +{ + int ret; + + if (!nbl_tcam_key_lookup(tcam_pp_key_mng, tcam_item, index)) { + tcam_pp_key_mng[*index].ref_cnt++; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow tcam:ref_cnt++ pp%d index=%d, ref_cnt=%d", + tcam_item->pp_type, *index, + tcam_pp_key_mng[*index].ref_cnt); + if (tcam_item->key_mode == NBL_TC_KT_FULL_MODE) { + tcam_pp_key_mng[*index + 1].ref_cnt++; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow tcam:ref_cnt++ pp%d index=%d, ref_cnt=%d", + tcam_item->pp_type, *index + 1, + tcam_pp_key_mng[*index + 1].ref_cnt); + } + } else { + ret = nbl_insert_tcam_key_ad(common, tcam_pp_key_mng, tcam_pp_ad_mng, + tcam_item, ad_item, index); + *is_new = true; + if (ret) + return ret; + } + + return 0; +} + +static int nbl_flow_ht_assign_proc(struct nbl_resource_mgt *res_mgt, + struct nbl_mt_input *mt_input, + struct nbl_flow_pp_ht_mng *pp_ht0_mng, + struct nbl_flow_pp_ht_mng *pp_ht1_mng, + struct nbl_tc_ht_item *ht_item, + struct nbl_tcam_item *tcam_item) +{ + int ret = 0; + u16 i = 0; + u16 ht0_hash = 0; + u16 ht1_hash = 0; + struct nbl_flow_pp_ht_tbl *pp_ht0_node = NULL; + struct nbl_flow_pp_ht_tbl *pp_ht1_node = NULL; + u32 num = 0; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + ht0_hash = NBL_CRC16_CCITT(mt_input->key, NBL_KT_BYTE_LEN); + ht1_hash = NBL_CRC16_IBM(mt_input->key, NBL_KT_BYTE_LEN); + + ht0_hash = + nbl_hash_transfer(ht0_hash, mt_input->power, mt_input->depth); + ht1_hash = + nbl_hash_transfer(ht1_hash, mt_input->power, mt_input->depth); + + pp_ht0_node = pp_ht0_mng->hash_map[ht0_hash]; + pp_ht1_node = pp_ht1_mng->hash_map[ht1_hash]; + + ht_item->ht0_hash = ht0_hash; + ht_item->ht1_hash = ht1_hash; + ht_item->tbl_id = mt_input->tbl_id; + + /* 2 flow has the same ht0 ht1,put it to tcam*/ + if (nbl_pp_ht0_ht1_search(pp_ht0_mng, ht0_hash, pp_ht1_mng, ht1_hash)) { + if ((*tcam_item->pp_tcam_count < NBL_FEM_TCAM_MAX_NUM - num - 1) || + (*tcam_item->pp_tcam_count == NBL_FEM_TCAM_MAX_NUM - num - 1 && + tcam_item->key_mode == NBL_TC_KT_HALF_MODE)) { + tcam_item->tcam_flag = true; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow tcam:pp%d has the same ht0=%x,ht1=%x,put it to tcam.\n", + mt_input->pp_type, ht0_hash, ht1_hash); + } else { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow tcam:pp%d has the same ht0=%x,ht1=%x,exceed max num.\n", + mt_input->pp_type, ht0_hash, ht1_hash); + ret = -ENOSPC; + } + return ret; + } + + if (!pp_ht0_node && !pp_ht1_node) { + ret = nbl_insert_pp_ht(res_mgt, pp_ht0_mng, ht0_hash, ht1_hash, + mt_input->tbl_id); + ht_item->ht_entry = NBL_HASH0; + ht_item->hash_bucket = 0; + + } else if (pp_ht0_node && !pp_ht1_node) { + if (pp_ht0_node->ref_cnt >= NBL_HASH_CFT_AVL) { + ret = nbl_insert_pp_ht(res_mgt, pp_ht1_mng, ht1_hash, ht0_hash, + mt_input->tbl_id); + ht_item->ht_entry = NBL_HASH1; + ht_item->hash_bucket = 0; + } else { + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (pp_ht0_node->key[i].vid == 0) { + pp_ht0_node->key[i].vid = 1; + pp_ht0_node->key[i].ht_other_index = + ht1_hash; + pp_ht0_node->key[i].kt_index = + mt_input->tbl_id; + pp_ht0_node->ref_cnt++; + ht_item->ht_entry = NBL_HASH0; + ht_item->hash_bucket = i; + break; + } + } + } + } else if (!pp_ht0_node && pp_ht1_node) { + if (pp_ht1_node->ref_cnt >= NBL_HASH_CFT_AVL) { + ret = nbl_insert_pp_ht(res_mgt, pp_ht0_mng, ht0_hash, ht1_hash, + mt_input->tbl_id); + ht_item->ht_entry = NBL_HASH0; + ht_item->hash_bucket = 0; + } else { + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (pp_ht1_node->key[i].vid == 0) { + pp_ht1_node->key[i].vid = 1; + pp_ht1_node->key[i].ht_other_index = + ht0_hash; + pp_ht1_node->key[i].kt_index = + mt_input->tbl_id; + pp_ht1_node->ref_cnt++; + ht_item->ht_entry = NBL_HASH1; + ht_item->hash_bucket = i; + break; + } + } + } + } else { + if (pp_ht0_node->ref_cnt <= NBL_HASH_CFT_AVL || + (pp_ht0_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht0_node->ref_cnt < NBL_HASH_CFT_MAX && + pp_ht1_node->ref_cnt > NBL_HASH_CFT_AVL)) { + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (pp_ht0_node->key[i].vid == 0) { + pp_ht0_node->key[i].vid = 1; + pp_ht0_node->key[i].ht_other_index = + ht1_hash; + pp_ht0_node->key[i].kt_index = + mt_input->tbl_id; + pp_ht0_node->ref_cnt++; + ht_item->ht_entry = NBL_HASH0; + ht_item->hash_bucket = i; + break; + } + } + } else if ((pp_ht0_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht1_node->ref_cnt <= NBL_HASH_CFT_AVL) || + (pp_ht0_node->ref_cnt == NBL_HASH_CFT_MAX && + pp_ht1_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht1_node->ref_cnt < NBL_HASH_CFT_MAX)) { + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (pp_ht1_node->key[i].vid == 0) { + pp_ht1_node->key[i].vid = 1; + pp_ht1_node->key[i].ht_other_index = + ht0_hash; + pp_ht1_node->key[i].kt_index = + mt_input->tbl_id; + pp_ht1_node->ref_cnt++; + ht_item->ht_entry = NBL_HASH1; + ht_item->hash_bucket = i; + break; + } + } + } else { + if ((*tcam_item->pp_tcam_count < + NBL_FEM_TCAM_MAX_NUM - num - 1) || + (*tcam_item->pp_tcam_count == + NBL_FEM_TCAM_MAX_NUM - num - 1 && + tcam_item->key_mode == NBL_TC_KT_HALF_MODE)) { + tcam_item->tcam_flag = true; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow tcam:pp%d ht0=%x,cnt=%d,ht1=%x,cnt=%d, " + "put it to tcam.\n", + mt_input->pp_type, ht0_hash, + pp_ht0_node->ref_cnt, ht1_hash, + pp_ht1_node->ref_cnt); + } else { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow tcam: pp%d ht0=%x,ht1=%x,exceed max tcam num.\n", + mt_input->pp_type, ht0_hash, ht1_hash); + ret = -ENOSPC; + } + } + } + + return ret; +} + +static inline u8 nbl_flow_act_num(struct nbl_mt_input *input, + u16 count) +{ + if (count <= input->kt_left_num) + return NBL_FEM_AT_NO_ENTRY; + else if (count <= input->kt_left_num + NBL_MAX_ACTION_NUM - 1) + return NBL_FEM_AT_ONE_ENTRY; + else if (count <= input->kt_left_num + 2 * (NBL_MAX_ACTION_NUM - 1)) + return NBL_FEM_AT_TWO_ENTRY; + + return NBL_FEM_AT_TWO_ENTRY; +} + +static int +nbl_flow_port_id_action_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + union nbl_action_data set_dport = {.data = 0}; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(tc_flow_mgt->res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_txrx_bond_info *bond_info = &txrx_mgt->bond_info; + u16 port_id = 0; + u16 act_idx = *item; + u16 cur_eth_proto = 0; + u32 salve1_port_id = 0; + u32 salve2_port_id = 0; + + if (!action || !buf) + return -EINVAL; + + set_dport.dport.up.port_type = action->port_type; + set_dport.dport.up.port_id = action->port_id; + set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_NML_FWD; + set_dport.dport.up.next_stg_sel = action->next_stg_sel; + + memcpy(&port_id, &set_dport, 2); + buf[act_idx] = port_id + (NBL_ACT_SET_DPORT << 16); + + if (!(action->flag & NBL_FLOW_ACTION_PUSH_OUTER_VLAN)) + goto ret_info; + + if (action->vlan.eth_proto == NBL_QINQ_TPID_VALUE) + cur_eth_proto = NBL_QINQ_TPYE; + else if (action->vlan.eth_proto == NBL_VLAN_TPID_VALUE) + cur_eth_proto = NBL_VLAN_TPYE; + else + goto ret_info; + + if ((action->vlan.port_type == NBL_TC_PORT_TYPE_VSI || + action->vlan.port_type == NBL_TC_PORT_TYPE_ETH) && + cur_eth_proto != tc_flow_mgt->port_tpid_type[action->vlan.port_id]) { + nbl_info(common, NBL_DEBUG_FLOW, "tc flow port_id=%d,eth_proto=%d.\n", + action->vlan.port_id, cur_eth_proto); + phy_ops->set_ped_tab_vsi_type(NBL_RES_MGT_TO_PHY_PRIV(tc_flow_mgt->res_mgt), + action->vlan.port_id, cur_eth_proto); + tc_flow_mgt->port_tpid_type[action->vlan.port_id] = cur_eth_proto; + goto ret_info; + } + + salve1_port_id = bond_info->eth_id[0] + NBL_VLAN_TYPE_ETH_BASE; + salve2_port_id = bond_info->eth_id[1] + NBL_VLAN_TYPE_ETH_BASE; + + if (action->vlan.port_type == NBL_TC_PORT_TYPE_BOND && bond_info->bond_enable && + action->vlan.port_id == bond_info->lag_id && + (cur_eth_proto != tc_flow_mgt->port_tpid_type[salve1_port_id] || + cur_eth_proto != tc_flow_mgt->port_tpid_type[salve2_port_id])) { + nbl_info(common, NBL_DEBUG_FLOW, "tc flow lag_id = %d, port1_id=%d, eth_proto=%d, port2_id=%d, eth_proto=%d.\n", + bond_info->lag_id, salve1_port_id, cur_eth_proto, + salve2_port_id, cur_eth_proto); + phy_ops->set_ped_tab_vsi_type(NBL_RES_MGT_TO_PHY_PRIV(tc_flow_mgt->res_mgt), + salve1_port_id, cur_eth_proto); + tc_flow_mgt->port_tpid_type[salve1_port_id] = cur_eth_proto; + phy_ops->set_ped_tab_vsi_type(NBL_RES_MGT_TO_PHY_PRIV(tc_flow_mgt->res_mgt), + salve2_port_id, cur_eth_proto); + tc_flow_mgt->port_tpid_type[salve2_port_id] = cur_eth_proto; + } + +ret_info: + return 0; +} + +static int nbl_flow_drop_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + union nbl_action_data set_dport = {.data = 0}; + u16 port_id = 0; + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + set_dport.dport.up.port_type = SET_DPORT_TYPE_SP_PORT; + set_dport.dport.up.port_id = 0x3FF; + set_dport.dport.up.upcall_flag = AUX_KEEP_FWD_TYPE; + set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_EPRO; + + memcpy(&port_id, &set_dport, 2); + buf[act_idx] = port_id + (NBL_ACT_SET_DPORT << 16); + + return 0; +} + +static int +nbl_flow_counter_action_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + buf[act_idx] = + (action->counter_id & 0x1FFFF) + (NBL_ACT_SET_FLOW_STAT0 << 16); + return 0; +} + +static int +nbl_flow_mcc_action_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 act_idx = *item; + int i; + int ret = 0; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + u16 mcc_port = 0; + union nbl_action_data mcc_dport = {.data = 0}; + + if (!action || !buf) + return -EINVAL; + + for (i = 0; i < action->mcc_cnt; i++) { + ret = nbl_tc_mcc_add_leaf_node(&tc_flow_mgt->tc_mcc_mgt, + action->port_mcc[i].dport_id, + action->port_mcc[i].port_type); + if (ret < 0) { + nbl_tc_mcc_free_list(&tc_flow_mgt->tc_mcc_mgt); + return ret; + } + + if (i == action->mcc_cnt - 1) { + edit_item->mcc_idx = ret; + edit_item->is_mir = true; + } + } + + buf[act_idx] = edit_item->mcc_idx + (NBL_ACT_SET_MCC << 16); + ++act_idx; + + mcc_dport.set_fwd_type.identify = NBL_SET_FWD_TYPE_IDENTIFY; + mcc_dport.set_fwd_type.next_stg_vld = 1; + mcc_dport.set_fwd_type.next_stg = NBL_NEXT_STG_MCC; + memcpy(&mcc_port, &mcc_dport, 2); + buf[act_idx] = mcc_port + (NBL_ACT_SET_AUX_FIELD << 16); + *item = act_idx; + + nbl_tc_mcc_add_hw_tbl(tc_flow_mgt->res_mgt, &tc_flow_mgt->tc_mcc_mgt); + + nbl_tc_mcc_get_list(&tc_flow_mgt->tc_mcc_mgt, &edit_item->tc_mcc_list); + + return 0; +} + +static int +nbl_flow_push_outer_vlan_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + buf[act_idx] = action->vlan.vlan_tag + (NBL_ACT_ADD_SVLAN << 16); + return 0; +} + +static int +nbl_flow_push_inner_vlan_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + buf[act_idx] = action->vlan.vlan_tag + (NBL_ACT_ADD_CVLAN << 16); + return 0; +} + +static int +nbl_flow_pop_outer_vlan_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + buf[act_idx] = NBL_ACT_DEL_SVLAN << 16; + return 0; +} + +static int +nbl_flow_pop_inner_vlan_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + buf[act_idx] = NBL_ACT_DEL_CVLAN << 16; + return 0; +} + +static int +nbl_flow_tunnel_encap_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 vni_h; + u16 vni_l; + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + vni_l = (u16)(action->vni & 0x0000ffff); + vni_h = (u16)(action->vni >> 16); + buf[act_idx] = (action->encap_idx & 0x1FFFF) + (NBL_ACT_TNL_ENCAP << 16); + act_idx++; + buf[act_idx] = vni_h + (NBL_ACT_SET_VNI1 << 16); + act_idx++; + buf[act_idx] = vni_l + (NBL_ACT_SET_VNI0 << 16); + *item = act_idx; + + return 0; +} + +static int +nbl_flow_tunnel_decap_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + buf[act_idx] = NBL_ACT_TNL_DECAP << 16; + + return 0; +} + +static u32 nbl_flow_set_pedit_act(struct nbl_resource_mgt *res_mgt, + struct nbl_tc_pedit_entry *in_e, + enum nbl_flow_ped_type pedit_type, u32 act_id) +{ + u32 act = 0; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(tc_flow_mgt->res_mgt); + + /* ref_node no need write ped cuz first node had done it */ + if (!NBL_TC_PEDIT_GET_NODE_VAL(in_e)) + phy_ops->write_ped_tbl(NBL_RES_MGT_TO_PHY_PRIV(tc_flow_mgt->res_mgt), + in_e->key, nbl_tc_pedit_get_hw_id(in_e), pedit_type); + act = nbl_tc_pedit_get_hw_id(in_e) + (act_id << 16); + + return act; +} + +static int nbl_flow_set_sip_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 act_idx = *item; + void *out_e = NULL; + struct nbl_tc_pedit_entry in_e; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + enum nbl_flow_ped_type pedit_type; + + memset(&in_e, 0, sizeof(in_e)); + /* ipv4 should write in the high 32-bits of ped_tbl */ + in_e.ip[1] = be32_to_cpu(action->tc_pedit_info.val.ip4.saddr); + if (action->flag & NBL_FLOW_ACTION_EGRESS) + pedit_type = NBL_FLOW_PED_DIP_TYPE; + else + pedit_type = NBL_FLOW_PED_UIP_TYPE; + + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 1); + ret = nbl_tc_pedit_add_node(&tc_flow_mgt->pedit_mgt, &in_e, &out_e, pedit_type); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_set_sip error"); + return -ENOMEM; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%d-%u):sip:%u, hw-idx:%u", + pedit_type, action->tc_pedit_info.pedit_node.pedits, + action->tc_pedit_info.val.ip4.saddr, nbl_tc_pedit_get_hw_id(&in_e)); + + buf[act_idx] = nbl_flow_set_pedit_act(res_mgt, &in_e, pedit_type, NBL_ACT_REP_IPV4_SIP); + NBL_TC_PEDIT_SET_NODE_RES_VAL(action->tc_pedit_info.pedit_node); + NBL_TC_PEDIT_SET_NODE_RES_ENTRY(action->tc_pedit_info.pedit_node, pedit_type, out_e); + return ret; +} + +static int nbl_flow_set_dip_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 act_idx = *item; + struct nbl_tc_pedit_entry in_e; + void *out_e = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + enum nbl_flow_ped_type pedit_type; + + memset(&in_e, 0, sizeof(in_e)); + /* ipv4 should write in the high 32-bits of ped_tbl */ + in_e.ip[1] = be32_to_cpu(action->tc_pedit_info.val.ip4.daddr); + if (action->flag & NBL_FLOW_ACTION_EGRESS) + pedit_type = NBL_FLOW_PED_DIP_TYPE; + else + pedit_type = NBL_FLOW_PED_UIP_TYPE; + + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 1); + ret = nbl_tc_pedit_add_node(&tc_flow_mgt->pedit_mgt, &in_e, &out_e, pedit_type); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_set_dip error"); + return -ENOMEM; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%d-%u):dip:%u, hw-idx:%u", + pedit_type, action->tc_pedit_info.pedit_node.pedits, + action->tc_pedit_info.val.ip4.daddr, nbl_tc_pedit_get_hw_id(&in_e)); + buf[act_idx] = nbl_flow_set_pedit_act(res_mgt, &in_e, pedit_type, NBL_ACT_REP_IPV4_DIP); + NBL_TC_PEDIT_SET_NODE_RES_VAL(action->tc_pedit_info.pedit_node); + + /* update pedit_type, for dst ip store in _D_TYPE */ + NBL_TC_PEDIT_SET_D_TYPE(pedit_type); + NBL_TC_PEDIT_SET_NODE_RES_ENTRY(action->tc_pedit_info.pedit_node, pedit_type, out_e); + return ret; +} + +static int nbl_flow_set_sip6_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 act_idx = *item; + struct nbl_tc_pedit_entry in_e; + void *out_e = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + enum nbl_flow_ped_type pedit_type; + int idx; + char ip6[128]; + int oft = 0; + u32 *cur_ip_s = (u32 *)&in_e.ip6; + u32 *ip = &action->tc_pedit_info.val.ip6.saddr.in6_u.u6_addr32[3]; + + memset(&in_e, 0, sizeof(in_e)); + for (idx = 0; idx < 4; ++idx) { + *cur_ip_s = be32_to_cpu(*ip); + oft += snprintf(&ip6[oft], 128, "-%x", *cur_ip_s); + --ip; + ++cur_ip_s; + } + + if (action->flag & NBL_FLOW_ACTION_EGRESS) + pedit_type = NBL_FLOW_PED_DIP_TYPE; + else + pedit_type = NBL_FLOW_PED_UIP_TYPE; + + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 4); + NBL_TC_PEDIT_SET_NODE_H(&in_e); + ret = nbl_tc_pedit_add_node(&tc_flow_mgt->pedit_mgt, &in_e, &out_e, pedit_type); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_set_sip6 error"); + return -ENOMEM; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%d-%u):sip6:%s, hw-idx:%u", + pedit_type, action->tc_pedit_info.pedit_node.pedits, ip6, + nbl_tc_pedit_get_hw_id(&in_e)); + buf[act_idx] = nbl_flow_set_pedit_act(res_mgt, &in_e, + NBL_TC_PEDIT_GET_IP6_PHY_TYPE(pedit_type), + NBL_ACT_REP_IPV6_SIP); + + NBL_TC_PEDIT_SET_NODE_RES_VAL(action->tc_pedit_info.pedit_node); + NBL_TC_PEDIT_SET_NODE_RES_ENTRY(action->tc_pedit_info.pedit_node, pedit_type, out_e); + return ret; +} + +static int nbl_flow_set_dip6_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 act_idx = *item; + struct nbl_tc_pedit_entry in_e; + void *out_e = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + enum nbl_flow_ped_type pedit_type; + int idx; + char ip6[128]; + int oft = 0; + u32 *cur_ip_s = (u32 *)&in_e.ip6; + u32 *ip = &action->tc_pedit_info.val.ip6.daddr.in6_u.u6_addr32[3]; + + memset(&in_e, 0, sizeof(in_e)); + for (idx = 0; idx < 4; ++idx) { + *cur_ip_s = be32_to_cpu(*ip); + oft += snprintf(&ip6[oft], 128 - oft, "-%x", *cur_ip_s); + --ip; + ++cur_ip_s; + } + + if (action->flag & NBL_FLOW_ACTION_EGRESS) + pedit_type = NBL_FLOW_PED_DIP_TYPE; + else + pedit_type = NBL_FLOW_PED_UIP_TYPE; + + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 4); + NBL_TC_PEDIT_SET_NODE_H(&in_e); + ret = nbl_tc_pedit_add_node(&tc_flow_mgt->pedit_mgt, &in_e, &out_e, pedit_type); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_set_dip6 error"); + return -ENOMEM; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%u-%d):dip6:%s, hw-idx:%u", + pedit_type, action->tc_pedit_info.pedit_node.pedits, ip6, + nbl_tc_pedit_get_hw_id(&in_e)); + buf[act_idx] = nbl_flow_set_pedit_act(res_mgt, &in_e, + NBL_TC_PEDIT_GET_IP6_PHY_TYPE(pedit_type), + NBL_ACT_REP_IPV6_DIP); + + NBL_TC_PEDIT_SET_NODE_RES_VAL(action->tc_pedit_info.pedit_node); + /* update pedit_type, for dst ip store in _D_TYPE */ + NBL_TC_PEDIT_SET_D_TYPE(pedit_type); + NBL_TC_PEDIT_SET_NODE_RES_ENTRY(action->tc_pedit_info.pedit_node, pedit_type, out_e); + return ret; +} + +static int nbl_flow_set_smac_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 act_idx = *item; + struct nbl_tc_pedit_entry in_e; + void *out_e = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + enum nbl_flow_ped_type pedit_type; + int idx; + char mac[128]; + int oft = 0; + u8 *cur_mac_s = (u8 *)&in_e.mac; + + memset(&in_e, 0, sizeof(in_e)); + /* update mac offset, for low 16-bit must be 0 */ + NBL_TC_UPDATE_MAC_OFT(cur_mac_s); + for (idx = 0; idx < ETH_ALEN; ++idx) { + *cur_mac_s = action->tc_pedit_info.val.eth.h_source[ETH_ALEN - 1 - idx]; + oft += snprintf(&mac[oft], 128 - oft, "-%x", *cur_mac_s); + ++cur_mac_s; + } + + if (action->flag & NBL_FLOW_ACTION_EGRESS) + pedit_type = NBL_FLOW_PED_DMAC_TYPE; + else + pedit_type = NBL_FLOW_PED_UMAC_TYPE; + + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 2); + ret = nbl_tc_pedit_add_node(&tc_flow_mgt->pedit_mgt, &in_e, &out_e, pedit_type); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_set_smac error"); + return -ENOMEM; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%d-%u):smac:%s, hw-idx:%u", + pedit_type, action->tc_pedit_info.pedit_node.pedits, mac, + nbl_tc_pedit_get_hw_id(&in_e)); + buf[act_idx] = nbl_flow_set_pedit_act(res_mgt, &in_e, pedit_type, NBL_ACT_REP_SMAC); + NBL_TC_PEDIT_SET_NODE_RES_VAL(action->tc_pedit_info.pedit_node); + NBL_TC_PEDIT_SET_NODE_RES_ENTRY(action->tc_pedit_info.pedit_node, pedit_type, out_e); + return ret; +} + +static int nbl_flow_set_dmac_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 act_idx = *item; + struct nbl_tc_pedit_entry in_e; + void *out_e = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + enum nbl_flow_ped_type pedit_type; + int idx; + char mac[128]; + int oft = 0; + u8 *cur_mac_s = in_e.mac; + + memset(&in_e, 0, sizeof(in_e)); + /* update mac offset, for low 16-bit must be 0 */ + NBL_TC_UPDATE_MAC_OFT(cur_mac_s); + for (idx = 0; idx < ETH_ALEN; ++idx) { + *cur_mac_s = action->tc_pedit_info.val.eth.h_dest[ETH_ALEN - 1 - idx]; + oft += snprintf(&mac[oft], 128 - oft, "-%x", *cur_mac_s); + ++cur_mac_s; + } + + if (action->flag & NBL_FLOW_ACTION_EGRESS) + pedit_type = NBL_FLOW_PED_DMAC_TYPE; + else + pedit_type = NBL_FLOW_PED_UMAC_TYPE; + + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 2); + ret = nbl_tc_pedit_add_node(&tc_flow_mgt->pedit_mgt, &in_e, &out_e, pedit_type); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_set_dmac error"); + return -ENOMEM; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%d-%u):dmac:%s, hw-idx:%u", + pedit_type, action->tc_pedit_info.pedit_node.pedits, mac, + nbl_tc_pedit_get_hw_id(&in_e)); + buf[act_idx] = nbl_flow_set_pedit_act(res_mgt, &in_e, pedit_type, NBL_ACT_REP_DMAC); + NBL_TC_PEDIT_SET_NODE_RES_VAL(action->tc_pedit_info.pedit_node); + + /* update pedit_type, for dst mac store in _D_TYPE */ + NBL_TC_PEDIT_SET_D_TYPE(pedit_type); + NBL_TC_PEDIT_SET_NODE_RES_ENTRY(action->tc_pedit_info.pedit_node, pedit_type, out_e); + return ret; +} + +static int nbl_flow_set_sp_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 port = 0; + u16 act_idx = *item; + bool is_udp = NBL_TC_PEDIT_GET_NODE_RES_PRO(action->tc_pedit_info.pedit_node); + + if (!is_udp) + port = be16_to_cpu(action->tc_pedit_info.val.tcp.source); + else + port = be16_to_cpu(action->tc_pedit_info.val.udp.source); + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%u):sp:%s-%u", + action->tc_pedit_info.pedit_node.pedits, + is_udp ? "udp" : "tcp", port); + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 1); + + buf[act_idx] = port + (NBL_ACT_REP_SPORT << 16); + return ret; +} + +static int nbl_flow_set_dp_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 port = 0; + u16 act_idx = *item; + bool is_udp = NBL_TC_PEDIT_GET_NODE_RES_PRO(action->tc_pedit_info.pedit_node); + + if (!is_udp) + port = be16_to_cpu(action->tc_pedit_info.val.tcp.dest); + else + port = be16_to_cpu(action->tc_pedit_info.val.udp.dest); + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%u):dp:%s-%u", + action->tc_pedit_info.pedit_node.pedits, + is_udp ? "udp" : "tcp", port); + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 1); + + buf[act_idx] = port + (NBL_ACT_REP_DPORT << 16); + return ret; +} + +static struct nbl_flow_action_2hw acts_2hw[] = { + { NBL_FLOW_ACTION_PORT_ID, nbl_flow_port_id_action_2hw }, + { NBL_FLOW_ACTION_DROP, nbl_flow_drop_2hw }, + { NBL_FLOW_ACTION_COUNTER, nbl_flow_counter_action_2hw }, + { NBL_FLOW_ACTION_MCC, nbl_flow_mcc_action_2hw }, + { NBL_FLOW_ACTION_PUSH_OUTER_VLAN, nbl_flow_push_outer_vlan_2hw }, + { NBL_FLOW_ACTION_PUSH_INNER_VLAN, nbl_flow_push_inner_vlan_2hw }, + { NBL_FLOW_ACTION_POP_OUTER_VLAN, nbl_flow_pop_outer_vlan_2hw }, + { NBL_FLOW_ACTION_POP_INNER_VLAN, nbl_flow_pop_inner_vlan_2hw }, + { NBL_FLOW_ACTION_TUNNEL_ENCAP, nbl_flow_tunnel_encap_act_2hw }, + { NBL_FLOW_ACTION_TUNNEL_DECAP, nbl_flow_tunnel_decap_act_2hw }, + { NBL_FLOW_ACTION_SET_IPV4_SRC_IP, nbl_flow_set_sip_act_2hw }, + { NBL_FLOW_ACTION_SET_IPV4_DST_IP, nbl_flow_set_dip_act_2hw }, + { NBL_FLOW_ACTION_SET_IPV6_SRC_IP, nbl_flow_set_sip6_act_2hw }, + { NBL_FLOW_ACTION_SET_IPV6_DST_IP, nbl_flow_set_dip6_act_2hw }, + { NBL_FLOW_ACTION_SET_SRC_MAC, nbl_flow_set_smac_act_2hw }, + { NBL_FLOW_ACTION_SET_DST_MAC, nbl_flow_set_dmac_act_2hw }, + { NBL_FLOW_ACTION_SET_SRC_PORT, nbl_flow_set_sp_act_2hw }, + { NBL_FLOW_ACTION_SET_DST_PORT, nbl_flow_set_dp_act_2hw }, +}; + +static int nbl_flow_at_num_proc(struct nbl_resource_mgt *res_mgt, + struct nbl_mt_input *mt_input, + u16 action_cnt, u32 *buf, + struct nbl_tc_at_item *at_item) +{ + u16 idx = 0; + u16 act_idx = 0; + u16 act1_idx = 0; + u16 act2_idx = 0; + u32 act_node_idx[2]; + u32 i; + int ret = 0; + struct nbl_flow_pp_at_key at_key[2]; + struct nbl_flow_at_tbl *node = NULL; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + memset(&at_key, 0, sizeof(at_key)); + + if (mt_input->at_num == 0) { + for (idx = 0; idx < action_cnt; idx++) + at_item->act_buf[idx] = buf[idx]; + + at_item->act_num = action_cnt; + } else if (mt_input->at_num == 1) { + while (idx < mt_input->kt_left_num - 1) { + at_item->act_buf[idx + 1] = buf[idx]; + idx++; + } + at_item->act_num = mt_input->kt_left_num; + + while (idx < action_cnt) { + at_item->act1_buf[act1_idx] = buf[idx]; + at_key[0].act[act1_idx] = buf[idx]; + idx++; + act1_idx++; + } + + at_item->act1_num = action_cnt - mt_input->kt_left_num + 1; + act_node_idx[0] = nbl_pp_at_lookup(res_mgt, mt_input->pp_type, NBL_AT_TYPE_1, + &at_key[0], &node); + if (act_node_idx[0] != U32_MAX) { + node->ref_cnt++; + } else { + act_node_idx[0] = nbl_insert_pp_at(res_mgt, mt_input->pp_type, + NBL_AT_TYPE_1, &at_key[0], &node); + if (act_node_idx[0] == U32_MAX) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow nbl_insert_pp_at error.\n"); + return -1; + } + + memcpy(&at_item->act_collect.act_key, &at_key[0], + sizeof(struct nbl_flow_pp_at_key)); + } + + at_item->act_collect.act_vld = 1; + at_item->act_collect.act_hw_index = act_node_idx[0] + + at_item->act_collect.act_offset; + at_item->act_buf[0] = at_item->act_collect.act_hw_index + + (NBL_ACT_NEXT_AT_FULL0 << 16); + } else if (mt_input->at_num == 2) { + while (idx < mt_input->kt_left_num - 2) { + at_item->act_buf[idx + 2] = buf[idx]; + idx++; + } + at_item->act_num = mt_input->kt_left_num; + act_idx = idx; + + while (idx < NBL_AT_MAX_NUM + act_idx) { + at_item->act1_buf[act1_idx] = buf[idx]; + at_key[0].act[act1_idx] = buf[idx]; + idx++; + act1_idx++; + } + at_item->act1_num = NBL_AT_MAX_NUM; + + while (idx < action_cnt) { + at_item->act2_buf[act2_idx] = buf[idx]; + at_key[1].act[act2_idx] = buf[idx]; + idx++; + act2_idx++; + } + at_item->act2_num = + action_cnt - mt_input->kt_left_num + 2 - NBL_AT_MAX_NUM; + + for (i = 0; i < 2; i++) { + act_node_idx[i] = nbl_pp_at_lookup(res_mgt, mt_input->pp_type, + NBL_AT_TYPE_1 + i, &at_key[i], &node); + if (act_node_idx[i] != U32_MAX) { + node->ref_cnt++; + } else { + ret = nbl_insert_pp_at(res_mgt, mt_input->pp_type, + NBL_AT_TYPE_1 + i, &at_key[i], &node); + if (act_node_idx[i] == U32_MAX) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow nbl_insert_pp_at error.\n"); + return -1; + } + memcpy(&at_item->act_collect.act_key[i], &at_key[i], + sizeof(struct nbl_flow_pp_at_key)); + } + } + + at_item->act_collect.act2_vld = 1; + at_item->act_collect.act_vld = 1; + at_item->act_collect.act2_hw_index = + act_node_idx[0] + + at_item->act_collect.act2_offset; + at_item->act_collect.act_hw_index = + act_node_idx[1] + + at_item->act_collect.act_offset; + at_item->act_buf[0] = at_item->act_collect.act2_hw_index + + (NBL_ACT_NEXT_AT_FULL0 << 16); + at_item->act_buf[1] = at_item->act_collect.act_hw_index + + (NBL_ACT_NEXT_AT_FULL0 << 16); + } + + return ret; +} + +static int nbl_flow_insert_at(struct nbl_resource_mgt *res_mgt, + struct nbl_mt_input *mt_input, + struct nbl_rule_action *action, + struct nbl_tc_at_item *at_item, + struct nbl_edit_item *edit_item, + struct nbl_tcam_item *tcam_item) +{ + int ret = 0; + u32 idx = 0; + u16 item = 0; + u32 list_num = ARRAY_SIZE(acts_2hw); + u32 buf[NBL_MAX_ACTION_NUM] = { 0 }; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + for (idx = 0; idx < list_num; idx++) { + if (action->flag & acts_2hw[idx].action_type) { + if (!acts_2hw[idx].act_2hw) + continue; + + ret = acts_2hw[idx].act_2hw(action, buf, &item, + edit_item, res_mgt); + if (ret) + return ret; + item++; + } + } + + if (tcam_item->tcam_flag) { + memcpy(tcam_item->tcam_action, buf, sizeof(tcam_item->tcam_action)); + return ret; + } + + mt_input->at_num = nbl_flow_act_num(mt_input, item); + spin_lock(&tc_flow_mgt->flow_lock); + + ret = nbl_flow_at_num_proc(res_mgt, mt_input, item, buf, at_item); + spin_unlock(&tc_flow_mgt->flow_lock); + + return ret; +} + +static void nbl_cmdq_show_ht_data(struct nbl_common_info *common, + union nbl_cmd_fem_ht_u *ht, bool read) +{ + u32 index = 0; + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow HT bucket/entry/ht/em: %x-%04x-%x-%x\n", + ht->info.bucket_id, ht->info.entry_id, ht->info.ht_id, ht->info.em_id); + if (read) { + for (index = 0; index < 4; index++) { + if (index == 0) + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow HT four buckets kt_idx/hash/vld:%05x-%04x-%x\n", + ht->info.ht_data[index].info.kt_index, + ht->info.ht_data[index].info.hash, + ht->info.ht_data[index].info.vld); + else + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow HT four buckets kt_idx/hash/vld: %05x-%04x-%x", + ht->info.ht_data[index].info.kt_index, + ht->info.ht_data[index].info.hash, + ht->info.ht_data[index].info.vld); + } + + } else { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow HT kt_idx/hash/vld: %05x-%04x-%x\n", + ht->info.ht_data[index].info.kt_index, + ht->info.ht_data[index].info.hash, + ht->info.ht_data[index].info.vld); + } +} + +int nbl_cmdq_flow_ht_clear_2hw(struct nbl_tc_ht_item *ht_item, + u8 pp_type, struct nbl_resource_mgt *res_mgt) +{ + union nbl_cmd_fem_ht_u ht; + struct nbl_cmd_hdr hdr = g_cmd_hdr[NBL_FEM_HT_WRITE]; + struct nbl_cmd_content cmd = { 0 }; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + memset(&ht, 0, sizeof(ht)); + + ht.info.ht_valid = 1; + if (ht_item->ht_entry == NBL_HASH0) { + ht.info.entry_id = ht_item->ht0_hash; + ht.info.ht_id = NBL_ACC_HT0; + } else if (ht_item->ht_entry == NBL_HASH1) { + ht.info.entry_id = ht_item->ht1_hash; + ht.info.ht_id = NBL_ACC_HT1; + } + + ht.info.bucket_id = ht_item->hash_bucket; + ht.info.em_id = pp_type; + /* prepare the command and command header */ + cmd.in_va = &ht; + cmd.in_length = NBL_CMDQ_FEM_W_REQ_LEN; + nbl_cmdq_show_ht_data(common, &ht, false); + return nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); +} + +static int nbl_flow_del_ht_2hw(struct nbl_tc_ht_item *ht_item, u8 pp_type, + struct nbl_flow_pp_ht_mng *pp_ht0_mng, + struct nbl_flow_pp_ht_mng *pp_ht1_mng, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + struct nbl_flow_pp_ht_key pp_ht_key = { 0 }; + struct nbl_flow_pp_ht_tbl *node = NULL; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (ht_item->ht_entry == NBL_HT0_HASH) { + pp_ht_key.vid = 1; + pp_ht_key.ht_other_index = ht_item->ht1_hash; + pp_ht_key.kt_index = ht_item->tbl_id; + node = nbl_pp_ht_lookup(pp_ht0_mng, ht_item->ht0_hash, + &pp_ht_key); + + if (node) { + ret = nbl_cmdq_flow_ht_clear_2hw(ht_item, pp_type, res_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow failed to del cmdq ht 2hw,pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d., ret %d\n", + pp_type, ht_item->ht0_hash, + ht_item->ht1_hash, ht_item->tbl_id, ret); + return ret; + } + + ret = nbl_delete_pp_ht(res_mgt, pp_ht0_mng, node, + ht_item->ht0_hash, + ht_item->ht1_hash, + ht_item->tbl_id); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow failed to del ht,pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d, ret %d.\n", + pp_type, ht_item->ht0_hash, + ht_item->ht1_hash, ht_item->tbl_id, ret); + return ret; + } + } else { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow node = null, pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d.\n", + pp_type, ht_item->ht0_hash, ht_item->ht1_hash, + ht_item->tbl_id); + return -EINVAL; + } + + } else if (ht_item->ht_entry == NBL_HT1_HASH) { + pp_ht_key.vid = 1; + pp_ht_key.ht_other_index = ht_item->ht0_hash; + pp_ht_key.kt_index = ht_item->tbl_id; + node = nbl_pp_ht_lookup(pp_ht1_mng, ht_item->ht1_hash, + &pp_ht_key); + + if (node) { + ret = nbl_cmdq_flow_ht_clear_2hw(ht_item, pp_type, res_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow failed to del cmdq ht 2hw,pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d, ret %d.\n", + pp_type, ht_item->ht0_hash, + ht_item->ht1_hash, ht_item->tbl_id, ret); + return ret; + } + + ret = nbl_delete_pp_ht(res_mgt, pp_ht1_mng, node, + ht_item->ht1_hash, + ht_item->ht0_hash, + ht_item->tbl_id); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow failed to del ht, pp%d ht1_hash=%d, ht0_hash=%d, tbl_id=%d, ret %d.\n", + pp_type, ht_item->ht1_hash, + ht_item->ht0_hash, ht_item->tbl_id, ret); + return ret; + } + } else { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow node = null, pp%d ht1_hash=%d,ht0_hash=%d,tbl_id=%d.\n", + pp_type, ht_item->ht1_hash, ht_item->ht0_hash, + ht_item->tbl_id); + return -EINVAL; + } + } else { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow ht_entry error, pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d.\n", + pp_type, ht_item->ht0_hash, ht_item->ht1_hash, + ht_item->tbl_id); + } + + return ret; +} + +static int nbl_flow_del_at_2hw(struct nbl_resource_mgt *res_mgt, + struct nbl_act_collect *act_collect, u8 pp_type) +{ + int ret = 0; + int idx; + struct nbl_flow_at_tbl *at_node = NULL; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + void *at1_tbl = tc_flow_mgt->at_mng.at_tbl[pp_type][NBL_AT_TYPE_1]; + void *at2_tbl = tc_flow_mgt->at_mng.at_tbl[pp_type][NBL_AT_TYPE_2]; + struct nbl_index_key_extra extra_key; + + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, 0, 0, true); + if (act_collect->act_vld == 1) { + idx = nbl_common_get_index_with_data(at1_tbl, act_collect->act_key[0].act, + &extra_key, NULL, 0, (void **)&at_node); + if (idx != U32_MAX) { + at_node->ref_cnt--; + if (!at_node->ref_cnt) { + nbl_common_free_index(at1_tbl, act_collect->act_key[0].act); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow delete at node key:%d-%d-%d-%d-%d-%d-%d-%d.\n", + act_collect->act_key[0].act[0], + act_collect->act_key[0].act[1], + act_collect->act_key[0].act[2], + act_collect->act_key[0].act[3], + act_collect->act_key[0].act[4], + act_collect->act_key[0].act[5], + act_collect->act_key[0].act[6], + act_collect->act_key[0].act[7]); + } + } + } + + if (act_collect->act2_vld == 1) { + idx = nbl_common_get_index_with_data(at2_tbl, act_collect->act_key[1].act, + &extra_key, NULL, 0, (void **)&at_node); + if (idx != U32_MAX) { + at_node->ref_cnt--; + if (!at_node->ref_cnt) { + nbl_common_free_index(at2_tbl, act_collect->act_key[1].act); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow delete at node key:%d-%d-%d-%d-%d-%d-%d-%d.\n", + act_collect->act_key[1].act[0], + act_collect->act_key[1].act[1], + act_collect->act_key[1].act[2], + act_collect->act_key[1].act[3], + act_collect->act_key[1].act[4], + act_collect->act_key[1].act[5], + act_collect->act_key[1].act[6], + act_collect->act_key[1].act[7]); + } + } + } + + return ret; +} + +static int nbl_tc_flow_send_tcam_2hw(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng, + struct nbl_tcam_item *tcam_item) +{ + int ret = 0; + struct nbl_flow_tcam_ad_item ad_item; + u16 index = 0; + bool is_new = false; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(tc_flow_mgt->res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u8 mode; + + if (!tcam_pp_key_mng || !tcam_pp_ad_mng || !tcam_item) + return -EINVAL; + + memset(&ad_item, 0, sizeof(ad_item)); + + memcpy(ad_item.action, tcam_item->tcam_action, sizeof(ad_item.action)); + ret = nbl_set_tcam_process(common, tcam_pp_key_mng, tcam_pp_ad_mng, + tcam_item, &ad_item, &index, &is_new); + if (ret) + return ret; + + if (is_new) { + tcam_item->tcam_index = index; + if (tcam_item->key_mode == NBL_TC_KT_HALF_MODE) { + mode = NBL_KT_HALF_MODE; + *tcam_item->pp_tcam_count = + *tcam_item->pp_tcam_count + 1; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow tcam:count+1 pp%d count=%d", + tcam_item->pp_type, *tcam_item->pp_tcam_count); + } else { + mode = NBL_KT_FULL_MODE; + *tcam_item->pp_tcam_count = + *tcam_item->pp_tcam_count + 2; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow tcam:count+2 pp%d count=%d", + tcam_item->pp_type, *tcam_item->pp_tcam_count); + } + + ret = phy_ops->add_tcam(NBL_RES_MGT_TO_PHY_PRIV(tc_flow_mgt->res_mgt), + tcam_item->tcam_index, tcam_item->kt_data.hash_key, + tcam_item->tcam_action, mode, tcam_item->pp_type); + } + + return ret; +} + +static void nbl_cmdq_show_ktat_header(struct nbl_common_info *common, + union nbl_cmd_fem_ktat_u *ktat) +{ + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow show KT index: 0x%08x\n", ktat->info.kt_index); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow show KT valid: 0x%0x\n", ktat->info.kt_valid); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow show KT size: 0x%02x\n", ktat->info.kt_size); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow show AT index: 0x%08x\n", ktat->info.at_index); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow show AT valid: 0x%0x\n", ktat->info.at_valid); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow show AT size: 0x%02x\n", ktat->info.at_size); +} + +static void nbl_cmdq_show_kt_data(struct nbl_common_info *common, + union nbl_cmd_fem_ktat_u *ktat, bool second) +{ + u32 i = 0; + const unsigned char *p = (unsigned char *)&ktat->info.kt_data; + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow showing KT data (320 bits):\n"); + + for (i = 0; i < NBL_PPE_KT_FULL_SIZE; i += 16) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow [%d] %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + i / 8, p[i], p[i + 1], p[i + 2], p[i + 3], p[i + 4], + p[i + 5], p[i + 6], p[i + 7], p[i + 8], p[i + 9], + p[i + 10], p[i + 11], p[i + 12], p[i + 13], p[i + 14], + p[i + 15]); + } + + if (second) { + const union nbl_fem_four_at_data_u *test = + (const union nbl_fem_four_at_data_u *)(p + 20); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow showing KT actions:\n"); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow [actions]: %02x %02x %02x %02x\n", + test->info.at1, test->info.at2, test->info.at3, + test->info.at4); + } else { + const union nbl_fem_four_at_data_u *test = + (const union nbl_fem_four_at_data_u *)(p); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow showing KT actions:\n"); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow [actions]: %02x %02x %02x %02x\n", + test->info.at1, test->info.at2, test->info.at3, + test->info.at4); + } +} + +static void __maybe_unused +nbl_cmdq_show_at_data(struct nbl_common_info *common, + union nbl_cmd_fem_ktat_u *ktat) +{ + /* AT 176 bit */ + const union nbl_fem_at_acc_data_u *at = (union nbl_fem_at_acc_data_u *)&ktat->info.at_data; + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow showing AT data (176 bits):\n"); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow check at data:0x%x-%x-%x-%x-%x-%x-%x-%x.\n", + at->info.at1, at->info.at2, at->info.at3, at->info.at4, + at->info.at5, at->info.at6, at->info.at7, at->info.at8); +} + +static void __maybe_unused +nbl_cmdq_show_searched_at_data(struct nbl_common_info *common, + union nbl_cmd_fem_ktat_u *ktat) +{ + const union nbl_fem_all_at_data_u *at = (union nbl_fem_all_at_data_u *)&ktat->info.kt_data; + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow showing all action data (352 bits):\n"); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow check at data:0x%x-%x-%x-%x-%x-%x-%x-%x.\n", + at->info.at1, at->info.at2, at->info.at3, at->info.at4, + at->info.at5, at->info.at6, at->info.at7, at->info.at8); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow check act data:0x%x-%x-%x-%x-%x-%x-%x-%x.\n", + at->info.at9, at->info.at10, at->info.at11, at->info.at12, + at->info.at13, at->info.at14, at->info.at15, at->info.at16); +} + +static void __maybe_unused +nbl_cmdq_search_flow_ktat(const struct nbl_tc_kt_item *kt_item, + struct nbl_resource_mgt *res_mgt) +{ + union nbl_cmd_fem_ktat_u ktat; + struct nbl_cmd_hdr hdr = g_cmd_hdr[NBL_FEM_KTAT_SEARCH]; + struct nbl_cmd_content cmd = { 0 }; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + memset(&ktat, 0, sizeof(ktat)); + + ktat.info.kt_valid = 1; + ktat.info.kt_em = kt_item->pp_type; + if (kt_item->key_type == NBL_KEY_TYPE_160) + memcpy(&ktat.info.kt_data[5], &kt_item->kt_data.data, + sizeof(kt_item->kt_data.data) / 2); + else + memcpy(&ktat.info.kt_data, &kt_item->kt_data.data, + sizeof(kt_item->kt_data.data)); + + cmd.in_va = &ktat; + cmd.in_length = NBL_CMDQ_FEM_S_REQ_LEN; + cmd.out_va = &ktat; + nbl_cmdq_show_kt_data(common, &ktat, kt_item->key_type == NBL_KEY_TYPE_160); + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); + /* the result AT will be stored in KT in ktat */ + nbl_cmdq_show_searched_at_data(common, &ktat); +} + +/* search a non-existant KT */ +static void __maybe_unused +nbl_cmdq_search_noflow_ktat(const struct nbl_tc_kt_item *kt_item, + struct nbl_resource_mgt *res_mgt) +{ + union nbl_cmd_fem_ktat_u ktat; + struct nbl_cmd_hdr hdr = g_cmd_hdr[NBL_FEM_KTAT_SEARCH]; + struct nbl_cmd_content cmd = { 0 }; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + memset(&ktat, 0, sizeof(ktat)); + + ktat.info.kt_valid = 1; + ktat.info.kt_em = kt_item->pp_type; + if (kt_item->key_type == NBL_KEY_TYPE_160) + memcpy(&ktat.info.kt_data[5], &kt_item->kt_data.data, + sizeof(kt_item->kt_data.data) / 2); + else + memcpy(&ktat.info.kt_data, &kt_item->kt_data.data, + sizeof(kt_item->kt_data.data)); + + /* KT data modification */ + ktat.info.kt_data[9] = 0; + + cmd.in_va = &ktat; + cmd.in_length = NBL_CMDQ_FEM_S_REQ_LEN; + cmd.out_va = &ktat; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow searching AT with non-existant KT data\n"); + nbl_cmdq_show_kt_data(common, &ktat, kt_item->key_type == NBL_KEY_TYPE_160); + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); + + /* the result AT will be stored in KT in ktat */ + nbl_cmdq_show_searched_at_data(common, &ktat); +} + +/* use cmdq to read the KT & AT written to MT */ +static int __maybe_unused nbl_cmdq_read_flow_ktat(struct nbl_tc_ht_item *ht_item, + struct nbl_tc_at_item *at_item, + struct nbl_resource_mgt *res_mgt) +{ + union nbl_cmd_fem_ktat_u ktat; + union nbl_cmd_fem_ktat_u extra_ktat; + struct nbl_cmd_hdr hdr = g_cmd_hdr[NBL_FEM_KTAT_READ]; + struct nbl_cmd_content cmd = { 0 }; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + /* not necessary to check KT info */ + if (!ht_item || !at_item) + return -EINVAL; + + memset(&ktat, 0, sizeof(ktat)); + memset(&extra_ktat, 0, sizeof(extra_ktat)); + + /* read KT */ + ktat.info.kt_valid = 1; + ktat.info.kt_size = 1; /* can only read full table */ + ktat.info.kt_index = ht_item->tbl_id; + if (at_item->act1_num) { + ktat.info.at_valid = 1; + ktat.info.at_size = 1; /* can only read full table */ + ktat.info.at_index = at_item->act_collect.act_hw_index; + } + + cmd.in_va = &ktat; + cmd.in_length = NBL_CMDQ_FEM_R_REQ_LEN; + cmd.out_va = &ktat; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow sending read request of KT and AT table\n"); + + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); + + /* print out read data */ + nbl_cmdq_show_ktat_header(common, &ktat); + nbl_cmdq_show_kt_data(common, &ktat, false); + nbl_cmdq_show_at_data(common, &ktat); + + /* read AT */ + if (at_item->act2_num) { + extra_ktat.info.at_index = at_item->act_collect.act2_hw_index; + extra_ktat.info.at_valid = 1; + extra_ktat.info.at_size = 1; + cmd.in_va = &extra_ktat; + cmd.in_length = NBL_CMDQ_FEM_R_REQ_LEN; + cmd.out_va = &extra_ktat; + cmd.out_length = 0; + cmd.out_params = 0; + cmd.in_params = 0; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow sending read request of AT table\n"); + + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); + + /* print out AT data */ + nbl_cmdq_show_ktat_header(common, &extra_ktat); + nbl_cmdq_show_at_data(common, &extra_ktat); + } + + return 0; +} + +static void __maybe_unused +nbl_cmdq_read_hw_ht_entry(struct nbl_tc_ht_item *ht_item, + u8 pp_type, struct nbl_resource_mgt *res_mgt) +{ + union nbl_cmd_fem_ht_u ht; + struct nbl_cmd_hdr hdr = g_cmd_hdr[NBL_FEM_HT_READ]; + struct nbl_cmd_content cmd = { 0 }; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + ht.info.ht_valid = 1; + ht.info.ht_data[0].info.vld = 1; + if (ht_item->ht_entry == NBL_HASH0) { + ht.info.ht_data[0].info.hash = ht_item->ht1_hash; + ht.info.entry_id = ht_item->ht0_hash; + ht.info.ht_id = NBL_ACC_HT0; + } else if (ht_item->ht_entry == NBL_HASH1) { + ht.info.ht_data[0].info.hash = ht_item->ht0_hash; + ht.info.entry_id = ht_item->ht1_hash; + ht.info.ht_id = NBL_ACC_HT1; + } + + /* no need to fill in the bucket id */ + ht.info.ht_data[0].info.kt_index = ht_item->tbl_id; + ht.info.em_id = pp_type; + + cmd.in_va = &ht; + cmd.in_length = NBL_CMDQ_FEM_R_REQ_LEN; + + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); + + /* show read result */ + nbl_cmdq_show_ht_data(common, &ht, true); +} + +/* write HT table using CMDQ */ +static void nbl_cmdq_send_flow_ht(struct nbl_tc_ht_item *ht_item, u8 pp_type, + struct nbl_resource_mgt *res_mgt) +{ + union nbl_cmd_fem_ht_u ht; + struct nbl_cmd_hdr hdr = g_cmd_hdr[NBL_FEM_HT_WRITE]; + struct nbl_cmd_content cmd = { 0 }; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + memset(&ht, 0, sizeof(ht)); + + ht.info.ht_valid = 1; + ht.info.ht_data[0].info.vld = 1; + + if (ht_item->ht_entry == NBL_HASH0) { + ht.info.ht_data[0].info.hash = ht_item->ht1_hash; + ht.info.entry_id = ht_item->ht0_hash; + ht.info.ht_id = NBL_ACC_HT0; + } else if (ht_item->ht_entry == NBL_HASH1) { + ht.info.ht_data[0].info.hash = ht_item->ht0_hash; + ht.info.entry_id = ht_item->ht1_hash; + ht.info.ht_id = NBL_ACC_HT1; + } + + ht.info.bucket_id = ht_item->hash_bucket; + ht.info.ht_data[0].info.kt_index = ht_item->tbl_id; + ht.info.em_id = pp_type; + + /* sending the command */ + cmd.in_va = &ht; + cmd.in_length = NBL_CMDQ_FEM_W_REQ_LEN; + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); +} + +/* write KT and AT table, KT index is stored in ht_item */ +__maybe_unused static int +nbl_cmdq_send_flow_ktat(struct nbl_tc_ht_item *ht_item, + struct nbl_tc_kt_item *kt_item, + struct nbl_tc_at_item *at_item, + struct nbl_resource_mgt *res_mgt) +{ + union nbl_cmd_fem_ktat_u ktat; + struct nbl_cmd_hdr hdr = g_cmd_hdr[NBL_FEM_KTAT_WRITE]; + struct nbl_cmd_content cmd = { 0 }; + union nbl_fem_at_acc_data_u at1; + union nbl_fem_at_acc_data_u at2; + struct nbl_cmd_content cmd_addition; + union nbl_cmd_fem_ktat_u extra_ktat; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (!ht_item || !kt_item || !at_item) + return -EINVAL; + + memset(&ktat, 0, sizeof(ktat)); + memset(&at1, 0, sizeof(at1)); + memset(&at2, 0, sizeof(at2)); + memset(&cmd_addition, 0, sizeof(cmd_addition)); + memset(&extra_ktat, 0, sizeof(extra_ktat)); + + /* the first command, it should send KT, and possible the first AT */ + ktat.info.kt_valid = 1; + ktat.info.kt_index = ht_item->tbl_id; + ktat.info.kt_size = (kt_item->key_type == NBL_KEY_TYPE_160) ? 0 : 1; + memcpy(&ktat.info.kt_data, &kt_item->kt_data.data, sizeof(kt_item->kt_data.data)); + + if (at_item->act1_num) { + at1.info.at1 = at_item->act1_buf[0]; + at1.info.at2 = at_item->act1_buf[1]; + at1.info.at3 = at_item->act1_buf[2]; + at1.info.at4 = at_item->act1_buf[3]; + at1.info.at5 = at_item->act1_buf[4]; + at1.info.at6 = at_item->act1_buf[5]; + at1.info.at7 = at_item->act1_buf[6]; + at1.info.at8 = at_item->act1_buf[7]; + + ktat.info.at_valid = 1; + ktat.info.at_index = at_item->act_collect.act_hw_index; + /* all AT entry use the full width */ + ktat.info.at_size = 1; + memcpy(&ktat.info.at_data, &at1.info, sizeof(at1)); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow kt index=0x%x,at_hw_index=0x%x," + "at data:0x%x-%x-%x-%x-%x-%x-%x-%x.", + ktat.info.kt_index, at_item->act_collect.act_hw_index, + at1.info.at1, at1.info.at2, at1.info.at3, at1.info.at4, + at1.info.at5, at1.info.at6, at1.info.at7, at1.info.at8); + } + + /* fill in the command flags, block, module, table, etc */ + cmd.in_va = &ktat; + cmd.in_length = NBL_CMDQ_FEM_W_REQ_LEN; + + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); + + /* if AT2 is used, another command is also needed */ + if (at_item->act2_num) { + at2.info.at1 = at_item->act2_buf[0]; + at2.info.at2 = at_item->act2_buf[1]; + at2.info.at3 = at_item->act2_buf[2]; + at2.info.at4 = at_item->act2_buf[3]; + at2.info.at5 = at_item->act2_buf[4]; + at2.info.at6 = at_item->act2_buf[5]; + at2.info.at7 = at_item->act2_buf[6]; + at2.info.at8 = at_item->act2_buf[7]; + + extra_ktat.info.at_valid = 1; + extra_ktat.info.at_index = at_item->act_collect.act2_hw_index; + /* all AT entry use the full width */ + extra_ktat.info.at_size = 1; + memcpy(&extra_ktat.info.at_data, &at2.info, sizeof(at2)); + + cmd_addition.in_va = &extra_ktat; + cmd_addition.in_length = NBL_CMDQ_FEM_W_REQ_LEN; + + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow kt index=0x%x, at2_hw_index=0x%x, at2 data:0x%x-%x-%x-%x-%x-%x-%x-%x.", + ktat.info.kt_index, at_item->act_collect.act2_hw_index, + at2.info.at1, at2.info.at2, at2.info.at3, at2.info.at4, + at2.info.at5, at2.info.at6, at2.info.at7, at2.info.at8); + } + + /* write HT table using CMDQ */ + nbl_cmdq_send_flow_ht(ht_item, kt_item->pp_type, res_mgt); + + return 0; +} + +static int nbl_flow_del_tcam_2hw(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng, + struct nbl_tcam_item *tcam_item) +{ + int ret = 0; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(tc_flow_mgt->res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u8 mode; + + if (!tcam_pp_key_mng || !tcam_pp_ad_mng || !tcam_item) + return -EINVAL; + + ret = nbl_delete_tcam_key_ad(common, tcam_pp_key_mng, tcam_pp_ad_mng, + tcam_item->tcam_index, tcam_item->key_mode, + tcam_item->pp_type); + if (ret == 0 && tcam_pp_key_mng[tcam_item->tcam_index].ref_cnt == 0) { + if (tcam_item->key_mode == NBL_TC_KT_HALF_MODE) { + mode = NBL_KT_HALF_MODE; + *tcam_item->pp_tcam_count = + *tcam_item->pp_tcam_count - 1; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow tcam:count-1 pp%d count=%d\n", + tcam_item->pp_type, *tcam_item->pp_tcam_count); + } else { + mode = NBL_KT_FULL_MODE; + *tcam_item->pp_tcam_count = + *tcam_item->pp_tcam_count - 2; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow tcam:count-2 pp%d count=%d\n", + tcam_item->pp_type, *tcam_item->pp_tcam_count); + } + + phy_ops->del_tcam(NBL_RES_MGT_TO_PHY_PRIV(tc_flow_mgt->res_mgt), + tcam_item->tcam_index, mode, tcam_item->pp_type); + } + + return ret; +} + +static int nbl_tc_set_pp_related_value(struct nbl_select_input *select_input, + struct nbl_mt_input *mt_input, + struct nbl_tc_flow_mgt *tc_flow_mgt, + u8 profile_id) +{ + select_input->pp_type = profile_id / NBL_PP_PROFILE_NUM; + + switch (select_input->pp_type) { + case NBL_PP_TYPE_1: + select_input->pp_tcam_count = &tc_flow_mgt->count_mng.pp1_tcam_count; + select_input->pp_ht0_mng = &tc_flow_mgt->pp1_ht0_mng; + select_input->pp_ht1_mng = &tc_flow_mgt->pp1_ht1_mng; + select_input->act_offset = NBL_PP1_AT_OFFSET; + select_input->act2_offset = NBL_PP1_AT2_OFFSET; + + select_input->tcam_pp_key_mng = tc_flow_mgt->tcam_pp1_key_mng; + select_input->tcam_pp_ad_mng = tc_flow_mgt->tcam_pp1_ad_mng; + select_input->pp_kt_bmp = tc_flow_mgt->pp1_kt_bmp; + select_input->pp_kt_num = NBL_PP1_KT_NUM; + + mt_input->depth = NBL_FEM_HT_PP1_DEPTH; + select_input->kt_idx_offset = NBL_PP1_KT_OFFSET; + mt_input->power = NBL_PP1_POWER; + + mt_input->pp_type = NBL_PP_TYPE_1; + break; + case NBL_PP_TYPE_2: + select_input->pp_tcam_count = &tc_flow_mgt->count_mng.pp2_tcam_count; + select_input->pp_ht0_mng = &tc_flow_mgt->pp2_ht0_mng; + select_input->pp_ht1_mng = &tc_flow_mgt->pp2_ht1_mng; + select_input->tcam_pp_key_mng = tc_flow_mgt->tcam_pp2_key_mng; + select_input->tcam_pp_ad_mng = tc_flow_mgt->tcam_pp2_ad_mng; + select_input->pp_kt_bmp = tc_flow_mgt->pp2_kt_bmp; + select_input->pp_kt_num = NBL_PP2_KT_NUM; + + mt_input->depth = NBL_FEM_HT_PP2_DEPTH; + select_input->act2_offset = NBL_PP2_AT2_OFFSET; + mt_input->power = NBL_PP2_POWER; + + mt_input->pp_type = NBL_PP_TYPE_2; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void nbl_tc_assign_action_data(u32 *key, u32 offset, + u32 value) +{ + u32 index = offset / NBL_BITS_IN_U32; + u32 remain = offset % NBL_BITS_IN_U32; + u32 shifted = 0; + + if (NBL_BITS_IN_U32 - remain < NBL_AT_WIDTH) { + /* if the value span across u32 boundary */ + shifted = NBL_BITS_IN_U32 - remain; + key[index] += (value << remain); + key[index + 1] += (value >> shifted); + } else { + key[index] += (value << remain); + } +} + +static void nbl_tc_assign_acts_for_kt(struct nbl_common_info *common, + struct nbl_tc_at_item *at_item, u32 *key, + struct nbl_mt_input *input) +{ + u8 i = 0; + u32 offset = 0; + + if (input->kt_left_num > (NBL_FEM_KT_HALF_LEN / NBL_AT_WIDTH)) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow too many actions to insert for KT data\n"); + return; + } + + for (i = 0; i < input->kt_left_num; i++) { + nbl_tc_assign_action_data(key, offset, at_item->act_buf[i]); + offset += NBL_AT_WIDTH; + } +} + +static inline void nbl_tc_assign_idx_act_for_kt(struct nbl_tc_kt_item *kt_item, + struct nbl_flow_tab_filter *node) +{ + u32 act_value = node->assoc_tbl_id + (NBL_ACT_SET_TAB_INDEX << 16); + + nbl_tc_assign_action_data(kt_item->kt_data.data, 0, act_value); +} + +static inline void +nbl_tc_assign_idx_act_for_tcam(struct nbl_tcam_item *tcam_item, + struct nbl_flow_tab_filter *node) +{ + u32 idx = 0; + + tcam_item->tcam_action[idx++] = NBL_GET_ACT_INFO(node->tbl_id, + NBL_ACT_SET_TAB_INDEX); +} + +static inline void nbl_tc_assign_key_for_hash(struct nbl_mt_input *mt_input, + struct nbl_flow_tab_filter *node) +{ + u8 idx; + u8 *ptr = (u8 *)node->key.key_value; + + for (idx = 0; idx < NBL_KT_BYTE_HALF_LEN; idx++) { + mt_input->key[idx] = ptr[NBL_KT_BYTE_LEN - idx - 1]; + mt_input->key[NBL_KT_BYTE_LEN - idx - 1] = ptr[idx]; + } +} + +static inline void nbl_tc_assign_kt_item(struct nbl_tc_kt_item *kt_item, + struct nbl_select_input *select_input, + struct nbl_flow_tab_filter *node, + bool full) +{ + u32 *ptr = node->key.key_value; + u16 size = full ? NBL_FEM_KT_LEN : NBL_FEM_KT_HALF_LEN; + u16 offset = full ? 0 : (NBL_FEM_KT_HALF_LEN / NBL_BITS_IN_U32); + + kt_item->key_type = full ? NBL_KEY_TYPE_320 : NBL_KEY_TYPE_160; + kt_item->pp_type = select_input->pp_type; + memcpy(kt_item->kt_data.data, ptr + offset, size / NBL_BITS_IN_U8); +} + +static void +nbl_tc_kt_mt_set_value(struct nbl_tc_at_item *at_item, + struct nbl_mt_input *mt_input, + struct nbl_select_input *select_input, + struct nbl_rule_action *action, + struct nbl_profile_msg *profile_msg, + const struct nbl_flow_idx_info *idx_info) +{ + at_item->act_collect.act_offset = select_input->act_offset; + at_item->act_collect.act2_offset = select_input->act2_offset; + mt_input->kt_left_num = profile_msg->act_count; + if (idx_info->key_flag & NBL_FLOW_KEY_DIPV4_FLAG) { + action->flag |= NBL_FLOW_ACTION_IPV4; + } else if (idx_info->key_flag & + NBL_FLOW_KEY_DIPV6_FLAG) { + action->flag |= NBL_FLOW_ACTION_IPV6; + } + + if (idx_info->key_flag & NBL_FLOW_KEY_T_VNI_FLAG) + action->flag |= NBL_FLOW_ACTION_TUNNEL_DECAP; +} + +static void +nbl_tc_node_at_set_value(struct nbl_tc_at_item *at_item, + struct nbl_flow_tab_filter *node, + struct nbl_edit_item *edit_item, + struct nbl_rule_action *action) +{ + memcpy(&node->act_collect, &at_item->act_collect, + sizeof(at_item->act_collect)); + memcpy(&node->edit_item, edit_item, sizeof(struct nbl_edit_item)); + if (node->edit_item.is_mir) + list_replace_init(&edit_item->tc_mcc_list, &node->edit_item.tc_mcc_list); + if (action->flag & NBL_FLOW_ACTION_INGRESS) + node->edit_item.direct = NBL_ACT_INGRESS; +} + +static int nbl_flow_tab_add(struct nbl_flow_tab_filter *node, + struct nbl_rule_action *action, + struct nbl_resource_mgt *res_mgt, + const struct nbl_flow_idx_info *idx_info, + struct nbl_mt_input *mt_input, + struct nbl_select_input *select_input) +{ + int ret = 0; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_profile_msg *profile_msg = + &tc_flow_mgt->profile_msg[idx_info->profile_id]; + struct nbl_tc_ht_item ht_item; + struct nbl_tc_kt_item kt_item; + struct nbl_tc_at_item *at_item = NULL; + struct nbl_edit_item edit_item; + struct nbl_tcam_item tcam_item; + + memset(&ht_item, 0, sizeof(ht_item)); + memset(&kt_item, 0, sizeof(kt_item)); + memset(&edit_item, 0, sizeof(edit_item)); + memset(&tcam_item, 0, sizeof(tcam_item)); + + nbl_tc_assign_key_for_hash(mt_input, node); + + spin_lock(&tc_flow_mgt->flow_lock); + if (mt_input->key_full) { + tcam_item.key_mode = NBL_TC_KT_FULL_MODE; + ret = nbl_tc_flow_alloc_bmp_id(select_input->pp_kt_bmp, + select_input->pp_kt_num, + tcam_item.key_mode, &node->tbl_id); + if (ret) { + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow failed to alloc id for full table, ret %d.\n", ret); + return -ENOSPC; + } + } else { + tcam_item.key_mode = NBL_TC_KT_HALF_MODE; + ret = nbl_tc_flow_alloc_bmp_id(select_input->pp_kt_bmp, + select_input->pp_kt_num, + tcam_item.key_mode, &node->tbl_id); + if (ret) { + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow failed to alloc id for half table, ret %d.\n", ret); + return -ENOSPC; + } + } + + mt_input->tbl_id = node->tbl_id + select_input->kt_idx_offset; + tcam_item.pp_tcam_count = select_input->pp_tcam_count; + ret = nbl_flow_ht_assign_proc(res_mgt, mt_input, + select_input->pp_ht0_mng, + select_input->pp_ht1_mng, &ht_item, + &tcam_item); + spin_unlock(&tc_flow_mgt->flow_lock); + + if (ret) + goto ret_bitmap_fail; + + if (tcam_item.tcam_flag) { + node->tcam_flag = tcam_item.tcam_flag; + if (mt_input->key_full) + memcpy(tcam_item.kt_data.data, node->key.key_value, + sizeof(node->key.key_value)); + else + memcpy(tcam_item.kt_data.data, + &node->key.key_value[NBL_TABLE_KEY_DATA_LEN / 2], + sizeof(node->key.key_value) / 2); + } + memcpy(&node->ht_item, &ht_item, sizeof(ht_item)); + node->pp_type = select_input->pp_type; + + /* copy pure key from node to kt */ + nbl_tc_assign_kt_item(&kt_item, select_input, node, (bool)mt_input->key_full); + + at_item = kzalloc(sizeof(*at_item), GFP_KERNEL); + if (!at_item) { + ret = -ENOMEM; + goto ret_bitmap_fail; + } + + if (idx_info->last_stage) { + nbl_tc_kt_mt_set_value(at_item, mt_input, select_input, + action, profile_msg, idx_info); + + ret = nbl_flow_insert_at(res_mgt, mt_input, action, + at_item, &edit_item, &tcam_item); + if (ret) + goto ret_fail; + nbl_tc_node_at_set_value(at_item, node, &edit_item, action); + nbl_tc_assign_acts_for_kt(common, at_item, kt_item.kt_data.data, + mt_input); + } else { + if (!tcam_item.tcam_flag) + nbl_tc_assign_idx_act_for_kt(&kt_item, node); + else + nbl_tc_assign_idx_act_for_tcam(&tcam_item, node); + } + + if (tcam_item.tcam_flag) { + spin_lock(&tc_flow_mgt->flow_lock); + tcam_item.pp_type = select_input->pp_type; + tcam_item.sw_hash_id = node->sw_hash_id; + tcam_item.profile_id = idx_info->profile_id; + ret = nbl_tc_flow_send_tcam_2hw(res_mgt, select_input->tcam_pp_key_mng, + select_input->tcam_pp_ad_mng, &tcam_item); + node->tcam_index = tcam_item.tcam_index; + spin_unlock(&tc_flow_mgt->flow_lock); + goto ret_fail; + } + + /* write flow KT AT using CMDQ */ + ret = nbl_cmdq_send_flow_ktat(&ht_item, &kt_item, at_item, res_mgt); + +ret_fail: + kfree(at_item); +ret_bitmap_fail: + if (ret) { + spin_lock(&tc_flow_mgt->flow_lock); + if (mt_input->key_full) + nbl_tc_flow_free_bmp_id(select_input->pp_kt_bmp, + node->tbl_id, NBL_TC_KT_FULL_MODE); + else + nbl_tc_flow_free_bmp_id(select_input->pp_kt_bmp, + node->tbl_id, NBL_TC_KT_HALF_MODE); + spin_unlock(&tc_flow_mgt->flow_lock); + } + + return ret; +} + +static int nbl_flow_tab_del(struct nbl_flow_tab_filter *node, struct nbl_resource_mgt *res_mgt, + struct nbl_mt_input *mt_input, struct nbl_select_input *select_input) +{ + int ret = 0; + struct nbl_tcam_item tcam_item; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + memset(&tcam_item, 0, sizeof(tcam_item)); + + spin_lock(&tc_flow_mgt->flow_lock); + if (node->tcam_flag) { + if (mt_input->key_full) + tcam_item.key_mode = NBL_TC_KT_FULL_MODE; + else + tcam_item.key_mode = NBL_TC_KT_HALF_MODE; + tcam_item.pp_type = select_input->pp_type; + tcam_item.tcam_index = node->tcam_index; + tcam_item.pp_tcam_count = select_input->pp_tcam_count; + ret = nbl_flow_del_tcam_2hw(res_mgt, select_input->tcam_pp_key_mng, + select_input->tcam_pp_ad_mng, &tcam_item); + if (!ret) + goto ret_tcam_success; + else + goto ret_fail; + } + + if (mt_input->key_full) { + nbl_tc_flow_free_bmp_id(select_input->pp_kt_bmp, + node->tbl_id, NBL_TC_KT_FULL_MODE); + } else { + nbl_tc_flow_free_bmp_id(select_input->pp_kt_bmp, + node->tbl_id, NBL_TC_KT_HALF_MODE); + } + + ret = nbl_flow_del_ht_2hw(&node->ht_item, node->pp_type, + select_input->pp_ht0_mng, + select_input->pp_ht1_mng, + res_mgt); + if (ret) + goto ret_fail; + + ret = nbl_flow_del_at_2hw(res_mgt, &node->act_collect, select_input->pp_type); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow failed to del at 2hw, ret %d\n", ret); + goto ret_fail; + } + +ret_tcam_success: + if (node->edit_item.is_mir) + nbl_tc_mcc_free_hw_tbl(tc_flow_mgt->res_mgt, &tc_flow_mgt->tc_mcc_mgt, + &node->edit_item.tc_mcc_list); + +ret_fail: + spin_unlock(&tc_flow_mgt->flow_lock); + return ret; +} + +/* note that the key in node should not be modified */ +static int nbl_flow_tab_ht_at(struct nbl_flow_tab_filter *node, + struct nbl_rule_action *action, u8 opcode, + struct nbl_resource_mgt *res_mgt, + const struct nbl_flow_idx_info *idx_info) +{ + int ret = 0; + struct nbl_mt_input mt_input = { 0 }; + struct nbl_select_input select_input = { 0 }; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_profile_msg *profile_msg = + &tc_flow_mgt->profile_msg[idx_info->profile_id]; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (!node || !idx_info) + return -EINVAL; + + mt_input.key_full = profile_msg->key_full; + ret = nbl_tc_set_pp_related_value(&select_input, &mt_input, tc_flow_mgt, + idx_info->profile_id); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow set pp failed, profile_id %u.\n", + profile_msg->key_full); + return ret; + } + + if (opcode == NBL_OPCODE_ADD) + ret = nbl_flow_tab_add(node, action, res_mgt, idx_info, &mt_input, &select_input); + else if (opcode == NBL_OPCODE_DELETE) + ret = nbl_flow_tab_del(node, res_mgt, &mt_input, &select_input); + + return ret; +} + +static int nbl_flow_tbl_op(void *ptr, struct nbl_rule_action *action, + struct nbl_resource_mgt *res_mgt, + const struct nbl_flow_idx_info *idx_info, + __maybe_unused void *query_rslt, u8 opcode) +{ + struct nbl_flow_tab_filter *flow_tab_node = NULL; + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (opcode == NBL_OPCODE_ADD && !action) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow add failed as action is NULL.\n"); + return -EINVAL; + } + + flow_tab_node = (struct nbl_flow_tab_filter *)ptr; + ret = nbl_flow_tab_ht_at(flow_tab_node, action, opcode, res_mgt, idx_info); + + return ret; +} + +static int nbl_off_flow_op(void *ptr, struct nbl_rule_action *act, + struct nbl_resource_mgt *res_mgt, + struct nbl_flow_idx_info *idx_info, u8 opcode, + void *query_rslt) +{ + int ret = 0; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (!ptr) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow offload op failed, flow node is NULL. op:%u\n", opcode); + return -EINVAL; + } + + ret = nbl_flow_tbl_op(ptr, act, res_mgt, idx_info, query_rslt, opcode); + + return ret; +} + +/** + * @brief: offload flow add + * + * @param[in] ptr: flow tab node info + * @param[in] act: act to add + * @param[in] idx_info: some indx info + * @return int : 0-success other-fail + */ +static int nbl_off_flow_add(void *ptr, struct nbl_rule_action *act, + struct nbl_resource_mgt *res_mgt, + struct nbl_flow_idx_info *idx_info) +{ + return nbl_off_flow_op(ptr, act, res_mgt, idx_info, NBL_OPCODE_ADD, NULL); +} + +/** + * @brief: offload flow del + * + * @param[in] ptr: flow tab node info + * @param[in] idx_info: some indx info + * @return int : 0-success other-fail + */ +static int nbl_off_flow_del(void *ptr, struct nbl_resource_mgt *res_mgt, + struct nbl_flow_idx_info *idx_info) +{ + return nbl_off_flow_op(ptr, NULL, res_mgt, idx_info, NBL_OPCODE_DELETE, NULL); +} + +/** + * @brief: offload flow query + * + * @param[in] ptr: flow tab node info + * @param[in] idx: flow-id + * @param[in] type: distinguish which key template to query + * @param[out] query_rslt: when query use this param + * @brief: offload flow + * @return int : 0-success other-fail + */ +static int nbl_off_flow_query(void *ptr, u32 idx, void *query_rslt) +{ + struct nbl_flow_idx_info idx_inf = { 0 }; + + idx_inf.flow_idx = idx; + return nbl_off_flow_op(ptr, NULL, NULL, &idx_inf, NBL_OPCODE_QUERY, + query_rslt); +} + +const struct nbl_flow_offload_ops nbl_flow_offload_ops = { + .add = nbl_off_flow_add, + .del = nbl_off_flow_del, + .query = nbl_off_flow_query, +}; diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.h new file mode 100644 index 0000000000000000000000000000000000000000..475cad31eff4cd1f0ac5cc10a57f0d9a15d5b59b --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.h @@ -0,0 +1,420 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_TC_FLOW_FILTER_LEONIS_H_ +#define _NBL_TC_FLOW_FILTER_LEONIS_H_ + +#include "nbl_tc_flow_leonis.h" + +#define NBL_ACC_HT0 (0) +#define NBL_ACC_HT1 (1) + +#define NBL_TC_UPDATE_MAC_OFT(p) ((p) += 2) +#define NBL_TC_UPDATE_IP_OFT(p) ((p) += 4) + +struct nbl_flow_offload_ops { + int (*add) + (void *ptr, + struct nbl_rule_action *act, + struct nbl_resource_mgt *res_mgt, + struct nbl_flow_idx_info *idx_info); + + int (*del) + (void *pt, + struct nbl_resource_mgt *res_mgt, + struct nbl_flow_idx_info *idx_info); + + int (*query) + (void *ptr, + u32 idx, + void *query_rslt); +}; + +extern const struct nbl_flow_offload_ops nbl_flow_offload_ops; + +struct nbl_flow_action_2hw { + u64 action_type; + int (*act_2hw)(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, struct nbl_resource_mgt *res_mgt); +}; + +struct nbl_del_action_2hw { + u64 action_type; + int (*del_act_2hw)(struct nbl_tc_flow_mgt *tc_flow_mgt, + struct nbl_edit_item *edit_item); +}; + +union nbl_ipv4_tnl_data_u { + struct nbl_ipv4_tnl_data { + u32 act0:22; + u32 act1:22; + u32 rsv1:16; + u32 dst_port:16; + u32 option_class:16; + u32 option_data:32; + u32 dst_ip:32; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_IPV4_TNL_DATA_TAB_WIDTH (sizeof(struct nbl_ipv4_tnl_data) \ + / sizeof(u32)) + u32 data[NBL_IPV4_TNL_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_ipv4_tnl_data)]; +} __packed; + +union nbl_ipv6_tnl_data_u { + struct nbl_ipv6_tnl_data { + u32 act0:22; + u32 act1:22; + u32 act2:22; + u32 act3:22; + u32 act4:22; + u32 rsv:14; + u32 dst_port:16; + u32 option_class:16; + u32 option_data:32; + u64 dst_ipv6_2:64; + u64 dst_ipv6_1:64; + u32 template:4; + } __packed info; +#define NBL_IPV6_TNL_DATA_TAB_WIDTH (sizeof(struct nbl_ipv6_tnl_data) \ + / sizeof(u32)) + u32 data[NBL_IPV6_TNL_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_ipv6_tnl_data)]; +} __packed; + +union nbl_l2_tnl_data_u { + struct nbl_l2_tnl_data { + u32 act0:22; + u32 act1:22; + u32 act2:22; + u32 act3:22; + u32 act4:22; + u32 act5:22; + u32 act6:22; + u32 rsv2:6; + u32 inport:12; + u32 metadata:16; + u32 svlan_id:12; + u32 rsv1:4; + u32 cvlan_id:12; + u32 rsv:4; + u32 ether_type:16; + u64 dst_mac:48; + u32 vni:32; + u32 template:4; + } __packed info; +#define NBL_L2_TNL_DATA_TAB_WIDTH (sizeof(struct nbl_l2_tnl_data) \ + / sizeof(u32)) + u32 data[NBL_L2_TNL_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_tnl_data)]; +} __packed; + +union nbl_l2_notnl_data_u { + struct nbl_l2_notnl_data { + u32 act0:22; + u32 act1:22; + u32 rsv3:4; + u32 inport:12; + u32 svlan_id:12; + u32 rsv2:4; + u32 cvlan_id:12; + u32 rsv1:4; + u32 ether_type:16; + u64 dst_mac:48; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L2_NOTNL_DATA_TAB_WIDTH (sizeof(struct nbl_l2_notnl_data) \ + / sizeof(u32)) + u32 data[NBL_L2_NOTNL_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_notnl_data)]; +} __packed; + +union nbl_l3_ipv4_data_u { + struct nbl_l3_ipv4_data { + u32 act0:22; + u32 act1:22; + u32 act2:22; + u32 act3:22; + u32 rsv1:4; + u32 metadata:16; + u32 dscp:8; + u32 ttl:8; + u32 dst_ip:32; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L3_IPV4_DATA_TAB_WIDTH (sizeof(struct nbl_l3_ipv4_data) \ + / sizeof(u32)) + u32 data[NBL_L3_IPV4_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l3_ipv4_data)]; +} __packed; + +union nbl_l3_ipv6_data_u { + struct nbl_l3_ipv6_data { + u32 act0:22; + u32 act1:22; + u32 act2:22; + u32 act3:22; + u32 act4:22; + u32 act5:22; + u32 act6:22; + u32 rsv:2; + u32 metadata:16; + u32 dscp:8; + u32 hoplimit:8; + u64 dst_ipv6_2:64; + u64 dst_ipv6_1:64; + u32 template:4; + } __packed info; +#define NBL_L3_IPV6_DATA_TAB_WIDTH (sizeof(struct nbl_l3_ipv6_data) \ + / sizeof(u32)) + u32 data[NBL_L3_IPV6_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l3_ipv6_data)]; +} __packed; + +union nbl_t5_ipv4_data_u { + struct nbl_t5_ipv4_data { + u32 act0:22; + u32 act1:22; + u32 rsv1:16; + u32 metadata:16; + u32 pad:8; + u32 proto:8; + u32 dst_port:16; + u32 src_port:16; + u32 src_ip:32; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_T5_IPV4_DATA_TAB_WIDTH (sizeof(struct nbl_t5_ipv4_data) \ + / sizeof(u32)) + u32 data[NBL_T5_IPV4_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_t5_ipv4_data)]; +} __packed; + +union nbl_t5_ipv6_data_u { + struct nbl_t5_ipv6_data { + u32 act0:22; + u32 act1:22; + u32 act2:22; + u32 act3:22; + u32 act4:22; + u32 rsv:14; + u32 metadata:16; + u32 pad:8; + u32 proto:8; + u32 dst_port:16; + u32 src_port:16; + u64 src_ipv6_2:64; + u64 src_ipv6_1:64; + u32 template:4; + } __packed info; +#define NBL_T5_IPV6_DATA_TAB_WIDTH (sizeof(struct nbl_t5_ipv6_data) \ + / sizeof(u32)) + u32 data[NBL_T5_IPV6_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_t5_ipv6_data)]; +} __packed; + +#define NBL_FEM_KT_ACC_DATA (NBL_PPE_FEM_BASE + 0x00000348) + +#define NBL_FEM_EM0_TCAM_TABLE_ADDR (0xa0b000) +#define NBL_FEM_EM_TCAM_TABLE_DEPTH (64) +#define NBL_FEM_EM_TCAM_TABLE_WIDTH (256) +union fem_em_tcam_table_u { + struct fem_em_tcam_table { + u32 key[5]; /* [159:0] Default:0x0 RW */ + u32 key_vld:1; /* [160] Default:0x0 RW */ + u32 key_size:1; /* [161] Default:0x0 RW */ + u32 rsv:30; /* [191:162] Default:0x0 RO */ + u32 rsv1[2]; /* [255:192] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM_TCAM_TABLE_WIDTH / 32]; + u8 hash_key[sizeof(struct fem_em_tcam_table)]; +} __packed; + +#define NBL_FEM_EM_TCAM_TABLE_REG(r, t) (NBL_FEM_EM0_TCAM_TABLE_ADDR + 0x1000 * (r) + \ + (NBL_FEM_EM_TCAM_TABLE_WIDTH / 8) * (t)) + +#define NBL_FEM_EM0_AD_TABLE_ADDR (0xa08000) +#define NBL_FEM_EM_AD_TABLE_DEPTH (64) +#define NBL_FEM_EM_AD_TABLE_WIDTH (512) +union fem_em_ad_table_u { + struct fem_em_ad_table { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 action4:22; /* [109:88] Default:0x0 RW */ + u32 action5:22; /* [131:110] Default:0x0 RW */ + u32 action6:22; /* [153:132] Default:0x0 RW */ + u32 action7:22; /* [175:154] Default:0x0 RW */ + u32 action8:22; /* [197:176] Default:0x0 RW */ + u32 action9:22; /* [219:198] Default:0x0 RW */ + u32 action10:22; /* [241:220] Default:0x0 RW */ + u32 action11:22; /* [263:242] Default:0x0 RW */ + u32 action12:22; /* [285:264] Default:0x0 RW */ + u32 action13:22; /* [307:286] Default:0x0 RW */ + u32 action14:22; /* [329:308] Default:0x0 RW */ + u32 action15:22; /* [351:330] Default:0x0 RW */ + u32 rsv[5]; /* [511:352] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM_AD_TABLE_WIDTH / 32]; + u8 hash_key[sizeof(struct fem_em_ad_table)]; +} __packed; + +#define NBL_FEM_EM_AD_TABLE_REG(r, t) (NBL_FEM_EM0_AD_TABLE_ADDR + 0x1000 * (r) + \ + (NBL_FEM_EM_AD_TABLE_WIDTH / 8) * (t)) + +union nbl_fem_at_acc_data_u { + struct nbl_fem_at_acc_data { + u32 at1:22; + u32 at2:22; + u32 at3:22; + u32 at4:22; + u32 at5:22; + u32 at6:22; + u32 at7:22; + u32 at8:22; + u32 rsv:16; + } __packed info; +#define NBL_FEM_AT_ACC_DATA_TBL_WIDTH (sizeof(struct nbl_fem_at_acc_data) \ + / sizeof(u32)) + u32 data[NBL_FEM_AT_ACC_DATA_TBL_WIDTH]; +} __packed; + +#define NBL_FEM_AT_ACC_DATA (NBL_PPE_FEM_BASE + 0x00000398) + +union nbl_fem_all_at_data_u { + struct nbl_fem_all_at_data { + u32 at1:22; + u32 at2:22; + u32 at3:22; + u32 at4:22; + u32 at5:22; + u32 at6:22; + u32 at7:22; + u32 at8:22; + u32 at9:22; + u32 at10:22; + u32 at11:22; + u32 at12:22; + u32 at13:22; + u32 at14:22; + u32 at15:22; + u32 at16:22; + } __packed info; +#define NBL_FEM_ALL_AT_DATA_TBL_WIDTH (sizeof(struct nbl_fem_all_at_data) \ + / sizeof(u32)) + u32 data[NBL_FEM_ALL_AT_DATA_TBL_WIDTH]; +} __packed; + +union nbl_fem_four_at_data_u { + struct nbl_fem_four_at_data { + u32 at1:22; + u32 at2:22; + u32 at3:22; + u32 at4:22; + } __packed info; +#define NBL_FEM_FOUR_AT_DATA_TBL_WIDTH (sizeof(struct nbl_fem_four_at_data) \ + / sizeof(u32)) + u32 data[NBL_FEM_FOUR_AT_DATA_TBL_WIDTH]; +} __packed; + +/* COMMON CRC16 Calc */ +u16 nbl_calc_crc16(const u8 *data, u32 size, u16 crc_poly, + u16 init_value, u8 ref_flag, u16 xorout); +#define NBL_CRC16_CCITT(data, size) \ + nbl_calc_crc16(data, size, 0x1021, 0x0000, 1, 0x0000) +#define NBL_CRC16_CCITT_FALSE(data, size) \ + nbl_calc_crc16(data, size, 0x1021, 0xFFFF, 0, 0x0000) +#define NBL_CRC16_XMODEM(data, size) \ + nbl_calc_crc16(data, size, 0x1021, 0x0000, 0, 0x0000) +#define NBL_CRC16_IBM(data, size) \ + nbl_calc_crc16(data, size, 0x8005, 0x0000, 1, 0x0000) + +/* CMDQ data content for FEM-KT AT */ +union nbl_cmd_fem_ktat_u { + struct nbl_cmd_fem_ktat { + u32 at_index; + u8 at_valid:1; + u8 rsv0:7; + u8 at_size:8; + u16 rsv1:16; + u32 kt_index; + u8 kt_valid:1; + u8 rsv2:7; + u8 kt_size:8; + u16 rsv3:16; + u32 at_data[8]; + u32 kt_data[10]; + u32 kt_em:2; + u32 rsv4:30; + u32 rsv5[5]; + } __packed info; +#define NBL_CMD_FEM_KTAT_TAB_WIDTH (sizeof(struct nbl_cmd_fem_ktat) \ + / sizeof(u32)) + u32 data[NBL_CMD_FEM_KTAT_TAB_WIDTH]; +} __packed; + +#define NBL_CMD_FEM_KT_SIZE (16 + 32) +#define HALF_CMD_DESC_LENGTH 16 + +union nbl_fem_ht_acc_data_u { + struct nbl_fem_ht_acc_data { + u32 kt_index:17; + u32 hash:14; + u32 vld:1; + } __packed info; +#define NBL_FEM_HT_ACC_DATA_TBL_WIDTH (sizeof(struct nbl_fem_ht_acc_data) \ + / sizeof(u32)) + u32 data[NBL_FEM_HT_ACC_DATA_TBL_WIDTH]; +} __packed; + +/* CMDQ data content for FEM-HT */ +union nbl_cmd_fem_ht_u { + struct nbl_cmd_fem_ht { + u32 bucket_id:2; /* four buckets in the hash entry */ + u32 entry_id:14; /* hash table entry id */ + u32 ht_id:1; /* 0:HT0, 1:HT1 */ + u32 em_id:2; /* 0:pp0 1:pp1 2 or 3:pp2 */ + u32 rsv:13; + u8 ht_valid:1; + u8 rsv0:7; + u8 rsv1:8; + u16 rsv2:16; + u32 kt_index; + u8 kt_valid:1; + u8 rsv3:7; + u8 kt_size:8; + u16 rsv4:16; + union nbl_fem_ht_acc_data_u ht_data[4]; + u32 rsv5[4]; + u32 kt_data[10]; + u32 kt_em:2; + u32 rsv6:30; + u32 rsv7[5]; + } __packed info; +#define NBL_CMD_FEM_HT_TAB_WIDTH (sizeof(struct nbl_cmd_fem_ht) \ + / sizeof(u32)) + u32 data[NBL_CMD_FEM_HT_TAB_WIDTH]; +} __packed; + +/* size macros, all in unit of bytes */ +#define NBL_CMDQ_FEM_R_REQ_LEN 16 +#define NBL_CMDQ_FEM_W_REQ_LEN 112 +#define NBL_CMDQ_FEM_S_REQ_LEN 112 +#define NBL_CMDQ_ACL_TCAM_R_REQ_LEN 4 +#define NBL_CMDQ_ACL_TCAM_W_REQ_LEN 168 +#define NBL_CMDQ_ACL_TCAM_S_REQ_LEN 84 +#define NBL_CMDQ_ACL_STAT_BASE_LEN 32 +#define NBL_CMDQ_ACL_STAT_ITEM_LEN 12 + +#define NBL_PPE_KT_FULL_SIZE 40 +#define NBL_PPE_KT_HALF_SIZE 20 + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.c new file mode 100644 index 0000000000000000000000000000000000000000..132d20603577f43a2b66ceb4f496f1a5c92a30e7 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.c @@ -0,0 +1,4611 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ +#include "nbl_tc_flow_leonis.h" +#include "nbl_tc_flow_filter_leonis.h" +#include "nbl_p4_actions.h" +#include "nbl_fc_leonis.h" +#include "nbl_tc_tun_leonis.h" +#include "nbl_tc_pedit.h" +#include "nbl_resource_leonis.h" + +static struct nbl_profile_msg g_prf_msg[NBL_ALL_PROFILE_NUM] = { + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 1, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 1, + .to_end = 0, + .need_upcall = 0, + .pp_id = 1, + .profile_id = 0, + .g_profile_id = 16, + .key_count = 7, + .key_len = 100, + .key_flag = 20500, + .act_count = 2, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 32, + .key_id = 2, + .name = "t_dip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 36, + .length = 32, + .key_id = 4, + .name = "t_ovnData", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 68, + .length = 16, + .key_id = 14, + .name = "t_ovnClass", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 84, + .length = 16, + .key_id = 12, + .name = "t_dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 138, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 116, + .length = 22, + .key_id = 0, + .name = "action1", + }, + }, + }, + { + .valid = 1, + .pp_mode = 0, + .key_full = 1, + .pt_cmd = 0, + .from_start = 1, + .to_end = 0, + .need_upcall = 0, + .pp_id = 1, + .profile_id = 1, + .g_profile_id = 17, + .key_count = 10, + .key_len = 196, + .key_flag = 20504, + .act_count = 5, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 128, + .key_id = 3, + .name = "t_dip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 132, + .length = 32, + .key_id = 4, + .name = "t_ovnData", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 164, + .length = 16, + .key_id = 14, + .name = "t_ovnClass", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 180, + .length = 16, + .key_id = 12, + .name = "t_dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 298, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 276, + .length = 22, + .key_id = 0, + .name = "action1", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 254, + .length = 22, + .key_id = 0, + .name = "action2", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 232, + .length = 22, + .key_id = 0, + .name = "action3", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 210, + .length = 22, + .key_id = 0, + .name = "action4", + }, + }, + }, + { + .valid = 1, + .pp_mode = 1, + .key_full = 1, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 1, + .profile_id = 2, + .g_profile_id = 18, + .key_count = 18, + .key_len = 160, + .key_flag = 549999083555, + .act_count = 7, + .pre_assoc_profile_id = {16, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {32, 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 32, + .key_id = 5, + .name = "t_vni", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 36, + .length = 48, + .key_id = 23, + .name = "dstMAC", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 84, + .length = 16, + .key_id = 27, + .name = "etherType", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 100, + .length = 16, + .key_id = 26, + .name = "vlan2_pcv", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 116, + .length = 16, + .key_id = 25, + .name = "vlan1_pcv", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 148, + .length = 8, + .key_id = 1, + .name = "sport_b8", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 156, + .length = 4, + .key_id = 39, + .name = "sport_b4", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)3, + .offset = 100, + .length = 4, + .key_id = 0, + .name = "vlan2_pcv_mask", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)3, + .offset = 116, + .length = 4, + .key_id = 0, + .name = "vlan1_pcv_mask", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 132, + .length = 16, + .key_id = 0, + .name = "tab_index", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 298, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 276, + .length = 22, + .key_id = 0, + .name = "action1", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 254, + .length = 22, + .key_id = 0, + .name = "action2", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 232, + .length = 22, + .key_id = 0, + .name = "action3", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 210, + .length = 22, + .key_id = 0, + .name = "action4", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 188, + .length = 22, + .key_id = 0, + .name = "action5", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 166, + .length = 22, + .key_id = 0, + .name = "action6", + }, + }, + }, + { + .valid = 1, + .pp_mode = 1, + .key_full = 0, + .pt_cmd = 0, + .from_start = 1, + .to_end = 0, + .need_upcall = 0, + .pp_id = 1, + .profile_id = 3, + .g_profile_id = 19, + .key_count = 11, + .key_len = 112, + .key_flag = 549999083522, + .act_count = 2, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {32, 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 48, + .key_id = 23, + .name = "dstMAC", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 52, + .length = 16, + .key_id = 27, + .name = "etherType", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 68, + .length = 16, + .key_id = 26, + .name = "vlan2_pcv", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 84, + .length = 16, + .key_id = 25, + .name = "vlan1_pcv", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 100, + .length = 8, + .key_id = 1, + .name = "sport_b8", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 108, + .length = 4, + .key_id = 39, + .name = "sport_b4", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)3, + .offset = 68, + .length = 4, + .key_id = 0, + .name = "vlan2_pcv_mask", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)3, + .offset = 84, + .length = 4, + .key_id = 0, + .name = "vlan1_pcv_mask", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 138, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 116, + .length = 22, + .key_id = 0, + .name = "action1", + }, + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 1, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 1, + .pp_id = 2, + .profile_id = 0, + .g_profile_id = 32, + .key_count = 9, + .key_len = 68, + .key_flag = 51541704705, + .act_count = 4, + .pre_assoc_profile_id = {18, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {34, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 32, + .key_id = 21, + .name = "dip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 36, + .length = 8, + .key_id = 35, + .name = "ttl", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 44, + .length = 8, + .key_id = 34, + .name = "tos", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 52, + .length = 16, + .key_id = 0, + .name = "tab_index", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 138, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 116, + .length = 22, + .key_id = 0, + .name = "action1", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 94, + .length = 22, + .key_id = 0, + .name = "action2", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 72, + .length = 22, + .key_id = 0, + .name = "action3", + }, + }, + }, + { + .valid = 1, + .pp_mode = 0, + .key_full = 1, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 1, + .pp_id = 2, + .profile_id = 1, + .g_profile_id = 33, + .key_count = 12, + .key_len = 164, + .key_flag = 51543801857, + .act_count = 7, + .pre_assoc_profile_id = {18, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {35, 37, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 128, + .key_id = 22, + .name = "dip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 132, + .length = 8, + .key_id = 35, + .name = "ttl", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 140, + .length = 8, + .key_id = 34, + .name = "tos", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 148, + .length = 16, + .key_id = 0, + .name = "tab_index", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 298, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 276, + .length = 22, + .key_id = 0, + .name = "action1", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 254, + .length = 22, + .key_id = 0, + .name = "action2", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 232, + .length = 22, + .key_id = 0, + .name = "action3", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 210, + .length = 22, + .key_id = 0, + .name = "action4", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 188, + .length = 22, + .key_id = 0, + .name = "action5", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 166, + .length = 22, + .key_id = 0, + .name = "action6", + }, + }, + }, + { + .valid = 1, + .pp_mode = 1, + .key_full = 1, + .pt_cmd = 1, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 2, + .profile_id = 2, + .g_profile_id = 34, + .key_count = 13, + .key_len = 164, + .key_flag = 8801195917312, + .act_count = 7, + .pre_assoc_profile_id = {32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 36, + .length = 32, + .key_id = 19, + .name = "sip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 68, + .length = 32, + .key_id = 21, + .name = "dip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 124, + .length = 8, + .key_id = 32, + .name = "protocol", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 132, + .length = 16, + .key_id = 28, + .name = "srcPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 148, + .length = 16, + .key_id = 29, + .name = "dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 298, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 276, + .length = 22, + .key_id = 0, + .name = "action1", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 254, + .length = 22, + .key_id = 0, + .name = "action2", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 232, + .length = 22, + .key_id = 0, + .name = "action3", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 210, + .length = 22, + .key_id = 0, + .name = "action4", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 188, + .length = 22, + .key_id = 0, + .name = "action5", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 166, + .length = 22, + .key_id = 0, + .name = "action6", + }, + }, + }, + { + .valid = 1, + .pp_mode = 1, + .key_full = 1, + .pt_cmd = 1, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 2, + .profile_id = 3, + .g_profile_id = 35, + .key_count = 13, + .key_len = 164, + .key_flag = 8801198538752, + .act_count = 7, + .pre_assoc_profile_id = {33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 36, + .length = 32, + .key_id = 20, + .name = "sip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 68, + .length = 32, + .key_id = 22, + .name = "dip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 124, + .length = 8, + .key_id = 32, + .name = "protocol", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 132, + .length = 16, + .key_id = 28, + .name = "srcPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 148, + .length = 16, + .key_id = 29, + .name = "dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 298, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 276, + .length = 22, + .key_id = 0, + .name = "action1", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 254, + .length = 22, + .key_id = 0, + .name = "action2", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 232, + .length = 22, + .key_id = 0, + .name = "action3", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 210, + .length = 22, + .key_id = 0, + .name = "action4", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 188, + .length = 22, + .key_id = 0, + .name = "action5", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 166, + .length = 22, + .key_id = 0, + .name = "action6", + }, + }, + }, + { + .valid = 1, + .pp_mode = 1, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 1, + .pp_id = 2, + .profile_id = 4, + .g_profile_id = 36, + .key_count = 8, + .key_len = 100, + .key_flag = 5100797953, + .act_count = 2, + .pre_assoc_profile_id = {32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 32, + .key_id = 19, + .name = "sip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 36, + .length = 16, + .key_id = 28, + .name = "srcPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 52, + .length = 16, + .key_id = 29, + .name = "dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 68, + .length = 8, + .key_id = 32, + .name = "protocol", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 84, + .length = 16, + .key_id = 0, + .name = "tab_index", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 138, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 116, + .length = 22, + .key_id = 0, + .name = "action1", + }, + }, + }, + { + .valid = 1, + .pp_mode = 1, + .key_full = 1, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 1, + .pp_id = 2, + .profile_id = 5, + .g_profile_id = 37, + .key_count = 11, + .key_len = 196, + .key_flag = 5101322241, + .act_count = 5, + .pre_assoc_profile_id = {33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 128, + .key_id = 20, + .name = "sip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 132, + .length = 16, + .key_id = 28, + .name = "srcPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 148, + .length = 16, + .key_id = 29, + .name = "dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 164, + .length = 8, + .key_id = 32, + .name = "protocol", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 180, + .length = 16, + .key_id = 0, + .name = "tab_index", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 298, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 276, + .length = 22, + .key_id = 0, + .name = "action1", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 254, + .length = 22, + .key_id = 0, + .name = "action2", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 232, + .length = 22, + .key_id = 0, + .name = "action3", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 210, + .length = 22, + .key_id = 0, + .name = "action4", + }, + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 1, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 3, + .profile_id = 0, + .g_profile_id = 48, + .key_count = 7, + .key_len = 116, + .key_flag = 17597286842369, + .act_count = 0, + .pre_assoc_profile_id = {34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 32, + .key_id = 19, + .name = "sip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 36, + .length = 16, + .key_id = 28, + .name = "srcPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 52, + .length = 16, + .key_id = 29, + .name = "dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 68, + .length = 8, + .key_id = 32, + .name = "protocol", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 84, + .length = 16, + .key_id = 0, + .name = "tab_index", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 100, + .length = 16, + .key_id = 44, + .name = "dp_hash0", + }, + }, + }, + { + .valid = 1, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 3, + .profile_id = 1, + .g_profile_id = 49, + .key_count = 7, + .key_len = 212, + .key_flag = 17597287366657, + .act_count = 0, + .pre_assoc_profile_id = {35, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 128, + .key_id = 20, + .name = "sip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 132, + .length = 16, + .key_id = 28, + .name = "srcPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 148, + .length = 16, + .key_id = 29, + .name = "dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 164, + .length = 8, + .key_id = 32, + .name = "protocol", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 180, + .length = 16, + .key_id = 0, + .name = "tab_index", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 196, + .length = 16, + .key_id = 44, + .name = "dp_hash0", + }, + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, +}; + +static struct nbl_profile_assoc_graph g_prf_graph[NBL_ASSOC_PROFILE_GRAPH_NUM] = { + { + .key_flag = 26994920673335, + .profile_count = 5, + .profile_id = {16, 18, 32, 34, 48}, + }, + { + .key_flag = 606641606711, + .profile_count = 4, + .profile_id = {16, 18, 32, 36}, + }, + { + .key_flag = 601540808759, + .profile_count = 3, + .profile_id = {16, 18, 32}, + }, + { + .key_flag = 26994923294775, + .profile_count = 5, + .profile_id = {16, 18, 33, 35, 49}, + }, + { + .key_flag = 606644228151, + .profile_count = 4, + .profile_id = {16, 18, 33, 37}, + }, + { + .key_flag = 601542905911, + .profile_count = 3, + .profile_id = {16, 18, 33}, + }, + { + .key_flag = 549999104055, + .profile_count = 2, + .profile_id = {16, 18}, + }, + { + .key_flag = 20500, + .profile_count = 1, + .profile_id = {16}, + }, + { + .key_flag = 26994920673339, + .profile_count = 5, + .profile_id = {17, 18, 32, 34, 48}, + }, + { + .key_flag = 606641606715, + .profile_count = 4, + .profile_id = {17, 18, 32, 36}, + }, + { + .key_flag = 601540808763, + .profile_count = 3, + .profile_id = {17, 18, 32}, + }, + { + .key_flag = 26994923294779, + .profile_count = 5, + .profile_id = {17, 18, 33, 35, 49}, + }, + { + .key_flag = 606644228155, + .profile_count = 4, + .profile_id = {17, 18, 33, 37}, + }, + { + .key_flag = 601542905915, + .profile_count = 3, + .profile_id = {17, 18, 33}, + }, + { + .key_flag = 549999104059, + .profile_count = 2, + .profile_id = {17, 18}, + }, + { + .key_flag = 20504, + .profile_count = 1, + .profile_id = {17}, + }, + { + .key_flag = 26994920652803, + .profile_count = 4, + .profile_id = {19, 32, 34, 48}, + }, + { + .key_flag = 606641586179, + .profile_count = 3, + .profile_id = {19, 32, 36}, + }, + { + .key_flag = 601540788227, + .profile_count = 2, + .profile_id = {19, 32}, + }, + { + .key_flag = 26994923274243, + .profile_count = 4, + .profile_id = {19, 33, 35, 49}, + }, + { + .key_flag = 606644207619, + .profile_count = 3, + .profile_id = {19, 33, 37}, + }, + { + .key_flag = 601542885379, + .profile_count = 2, + .profile_id = {19, 33}, + }, + { + .key_flag = 549999083522, + .profile_count = 1, + .profile_id = {19}, + }, +}; + +static u8 g_profile_graph_count = 23; + +static void nbl_assign_key(u32 *kt_data, bool full, + u32 offset, u16 length, u32 value) +{ + u32 full_offset = NBL_FEM_KT_LEN - offset - length; + u32 index = full_offset / NBL_BITS_IN_U32; + u32 remain = full_offset % NBL_BITS_IN_U32; + u32 shifted = 0; + + if (NBL_BITS_IN_U32 - remain < length) { + /* if the value span across u32 boundary */ + shifted = NBL_BITS_IN_U32 - remain; + kt_data[index] += (value << remain); + kt_data[index + 1] += (value >> shifted); + } else { + kt_data[index] += (value << remain); + } +} + +static void nbl_assign_flow_key_input(u32 *kt_data, bool full, + const struct nbl_flow_key_info *key, + struct nbl_fdir_fltr *input, + u16 tab_index) +{ + const u32 *data = NULL; + const u32 *mask = NULL; + u16 temp_etype = 0; + u16 length = (u16)(key->length / NBL_BITS_IN_U32); + int i = 0; + + switch (1ULL << key->key_id) { + case NBL_FLOW_KEY_TABLE_IDX_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + tab_index); + break; + case NBL_FLOW_KEY_INPORT8_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->port & 0xFF); + break; + case NBL_FLOW_KEY_INPORT4_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + (input->port >> NBL_BITS_IN_U8) & 0xF); + break; + case NBL_FLOW_KEY_T_DIPV4_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip_outer.dst_ip.addr); + break; + case NBL_FLOW_KEY_T_DIPV6_FLAG: + data = (u32 *)(&input->ip_outer.dst_ip.v6_addr); + for (i = length - 1; i >= 0; i--, data++) + nbl_assign_key(kt_data, full, + key->offset + NBL_BITS_IN_U32 * i, + NBL_BITS_IN_U32, (*data)); + break; + case NBL_FLOW_KEY_T_SRCPORT_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->l4_outer.src_port); + break; + case NBL_FLOW_KEY_T_DSTPORT_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->l4_outer.dst_port); + break; + case NBL_FLOW_KEY_T_PROTOCOL_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip_outer.proto); + break; + case NBL_FLOW_KEY_T_TOS_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip_outer.tos); + break; + case NBL_FLOW_KEY_T_TTL_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip_outer.ttl); + break; + case NBL_FLOW_KEY_T_VNI_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->tnl.vni); + break; + case NBL_FLOW_KEY_SIPV4_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip.src_ip.addr & input->ip_mask.src_ip.addr); + break; + case NBL_FLOW_KEY_SIPV6_FLAG: + data = (u32 *)(&input->ip.src_ip.v6_addr); + mask = (u32 *)(&input->ip_mask.src_ip.v6_addr); + for (i = length - 1; i >= 0; i--, data++, mask++) + nbl_assign_key(kt_data, full, + key->offset + NBL_BITS_IN_U32 * i, + NBL_BITS_IN_U32, (*data) & (*mask)); + break; + case NBL_FLOW_KEY_DIPV4_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip.dst_ip.addr); + break; + case NBL_FLOW_KEY_DIPV6_FLAG: + data = (u32 *)(&input->ip.dst_ip.v6_addr); + for (i = length - 1; i >= 0; i--, data++, mask++) + nbl_assign_key(kt_data, full, + key->offset + NBL_BITS_IN_U32 * i, + NBL_BITS_IN_U32, (*data)); + break; + case NBL_FLOW_KEY_DSTMAC_FLAG: + data = (u32 *)input->l2_data.dst_mac; + nbl_assign_key(kt_data, full, key->offset + NBL_BITS_IN_U16, + NBL_BITS_IN_U32, *data); + nbl_assign_key(kt_data, full, key->offset, NBL_BITS_IN_U16, + (*(data + 1)) & 0x0000FFFF); + break; + case NBL_FLOW_KEY_SRCMAC_FLAG: + data = (u32 *)input->l2_data.src_mac; + nbl_assign_key(kt_data, full, key->offset + NBL_BITS_IN_U16, + NBL_BITS_IN_U32, *data); + nbl_assign_key(kt_data, full, key->offset, NBL_BITS_IN_U16, + (*(data + 1)) & 0x0000FFFF); + break; + case NBL_FLOW_KEY_SVLAN_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->svlan_tag); + break; + case NBL_FLOW_KEY_CVLAN_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->cvlan_tag); + break; + case NBL_FLOW_KEY_ETHERTYPE_FLAG: + if (input->cvlan_type) + temp_etype = input->cvlan_type; + else if (input->svlan_type) + temp_etype = input->svlan_type; + else + temp_etype = input->l2_data.ether_type; + nbl_assign_key(kt_data, full, key->offset, key->length, + temp_etype); + break; + case NBL_FLOW_KEY_SRCPORT_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->l4.src_port & input->l4_mask.src_port); + break; + case NBL_FLOW_KEY_DSTPORT_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->l4.dst_port & input->l4_mask.dst_port); + break; + case NBL_FLOW_KEY_PROTOCOL_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip.proto & input->ip_mask.proto); + break; + case NBL_FLOW_KEY_TCPSTAT_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->l4.tcp_flag); + break; + case NBL_FLOW_KEY_TOS_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip.tos); + break; + case NBL_FLOW_KEY_TTL_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip.ttl); + break; + case NBL_FLOW_KEY_T_DSTMAC_FLAG: + case NBL_FLOW_KEY_T_SRCMAC_FLAG: + case NBL_FLOW_KEY_T_SVLAN_FLAG: + case NBL_FLOW_KEY_T_CVLAN_FLAG: + case NBL_FLOW_KEY_T_ETHERTYPE_FLAG: + case NBL_FLOW_KEY_T_NPROTO_FLAG: + case NBL_FLOW_KEY_T_TCPSTAT_FLAG: + case NBL_FLOW_KEY_ARP_OP_FLAG: + case NBL_FLOW_KEY_ICMPV6_TYPE_FLAG: + case NBL_FLOW_KEY_RDMA_ACK_SEQ_FLAG: + case NBL_FLOW_KEY_RDMA_QPN_FLAG: + case NBL_FLOW_KEY_RDMA_OP_FLAG: + case NBL_FLOW_KEY_INPORT2_FLAG: + case NBL_FLOW_KEY_INPORT2L_FLAG: + default: + break; + } +} + +/* kt_data: five u64 data */ +static void nbl_assign_hash_key_key(u32 *kt_data, + struct nbl_flow_key_info *key, + struct nbl_profile_msg *prf_msg, + struct nbl_fdir_fltr *input, + u16 tab_index) +{ + /* assign profile id, key PHVs (key and action data) */ + /* ignore bit setter and masks, actions */ + switch (key->key_type) { + case NBL_FLOW_KEY_TYPE_PID: + nbl_assign_key(kt_data, prf_msg->key_full, key->offset, + key->length, prf_msg->profile_id); + break; + case NBL_FLOW_KEY_TYPE_PHV: + nbl_assign_flow_key_input(kt_data, prf_msg->key_full, key, + input, tab_index); + break; + case NBL_FLOW_KEY_TYPE_ACTION: + break; + case NBL_FLOW_KEY_TYPE_BTS: + break; + case NBL_FLOW_KEY_TYPE_MASK: + break; + default: + break; + } +} + +static void nbl_debug_print_hash_key(struct nbl_common_info *common, + struct nbl_flow_tab_conf *hash_key, + struct nbl_profile_msg *prf_msg, + struct nbl_fdir_fltr *input) +{ + size_t index = 0; + u32 *ptr = (u32 *)(&hash_key->key_value); + /* debug example: tnl v4/v6/l2 */ + const union nbl_ipv4_tnl_data_u *p0 = (union nbl_ipv4_tnl_data_u *)(ptr); + const union nbl_ipv6_tnl_data_u *p1 = (union nbl_ipv6_tnl_data_u *)(ptr); + const union nbl_l2_tnl_data_u *p2 = (union nbl_l2_tnl_data_u *)(ptr); + + /* debug example: nontnl l2 */ + const union nbl_l2_notnl_data_u *p3 = (union nbl_l2_notnl_data_u *)(ptr); + + /* debug example: l3 */ + const union nbl_l3_ipv4_data_u *p4 = (union nbl_l3_ipv4_data_u *)(ptr); + const union nbl_l3_ipv6_data_u *p5 = (union nbl_l3_ipv6_data_u *)(ptr); + + /* debug example: t5 ipv4 (160 bits) and t5 ipv6 (320 bits) */ + const union nbl_t5_ipv4_data_u *p8 = (union nbl_t5_ipv4_data_u *)(ptr); + const union nbl_t5_ipv6_data_u *p9 = (union nbl_t5_ipv6_data_u *)(ptr); + + unsigned long long test_l2_notnl = + NBL_FLOW_KEY_DSTMAC_FLAG | NBL_FLOW_KEY_ETHERTYPE_FLAG | + NBL_FLOW_KEY_SVLAN_FLAG | NBL_FLOW_KEY_CVLAN_FLAG; + + unsigned long long test_tnl_v4 = + NBL_FLOW_KEY_T_DIPV4_FLAG | NBL_FLOW_KEY_T_OPT_DATA_FLAG | + NBL_FLOW_KEY_T_OPT_CLASS_FLAG | NBL_FLOW_KEY_T_DSTPORT_FLAG; + + unsigned long long test_tnl_v6 = + NBL_FLOW_KEY_T_DIPV6_FLAG | NBL_FLOW_KEY_T_OPT_DATA_FLAG | + NBL_FLOW_KEY_T_OPT_CLASS_FLAG | NBL_FLOW_KEY_T_DSTPORT_FLAG; + + unsigned long long test_tnl_l2 = + NBL_FLOW_KEY_T_VNI_FLAG | NBL_FLOW_KEY_DSTMAC_FLAG | + NBL_FLOW_KEY_ETHERTYPE_FLAG | NBL_FLOW_KEY_CVLAN_FLAG | + NBL_FLOW_KEY_SVLAN_FLAG; + + unsigned long long test_l3_v4 = NBL_FLOW_KEY_DIPV4_FLAG | + NBL_FLOW_KEY_TTL_FLAG | + NBL_FLOW_KEY_DSCP_FLAG; + + unsigned long long test_l3_v6 = NBL_FLOW_KEY_DIPV6_FLAG | + NBL_FLOW_KEY_TTL_FLAG | + NBL_FLOW_KEY_DSCP_FLAG; + + unsigned long long test_t5_ipv6 = + NBL_FLOW_KEY_DSTPORT_FLAG | NBL_FLOW_KEY_SRCPORT_FLAG | + NBL_FLOW_KEY_PROTOCOL_FLAG | NBL_FLOW_KEY_SIPV6_FLAG; + + unsigned long long test_t5_ipv4 = + NBL_FLOW_KEY_DSTPORT_FLAG | NBL_FLOW_KEY_SRCPORT_FLAG | + NBL_FLOW_KEY_PROTOCOL_FLAG | NBL_FLOW_KEY_SIPV4_FLAG; + + u8 offset = + prf_msg->key_full ? 0 : (NBL_FEM_KT_HALF_LEN / NBL_BITS_IN_U32); + ptr += offset; + + /* print out all the fields */ + for (index = 0; index < 10; index++) + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw kt data[%ld]: %x\n", index, + hash_key->key_value[index]); + + if ((prf_msg->key_flag & test_tnl_v4) == test_tnl_v4) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated tv4 profile: id %d, " + "dipv4 0x%x, optdata 0x%x, optclass 0x%x, dport 0x%x\n", + p0->info.template, p0->info.dst_ip, + p0->info.option_data, p0->info.option_class, + p0->info.dst_port); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw original tv4 profile: id %d, " + "dipv4 0x%x, dport 0x%x\n", + prf_msg->profile_id, input->ip_outer.dst_ip.addr, + input->l4_outer.dst_port); + } else if ((prf_msg->key_flag & test_tnl_v6) == test_tnl_v6) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated tv6 profile: id %d, " + "dipv6 0x%lx 0x%lx, optdata 0x%x, optclass 0x%x, dport 0x%x\n", + p1->info.template, (unsigned long)p1->info.dst_ipv6_1, + (unsigned long)p1->info.dst_ipv6_2, + p1->info.option_data, p1->info.option_class, + p1->info.dst_port); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw originla tv6 profile: id %d, " + "dipv6 0x%x, dport 0x%x\n", + prf_msg->profile_id, input->ip_outer.dst_ip.addr, + input->l4_outer.dst_port); + } else if ((prf_msg->key_flag & test_tnl_l2) == test_tnl_l2) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated tnl l2 profile: id %d, " + "vni %d, dstmac 0x%lx, etype 0x%04x, cvlan %d, svlan %d\n", + p2->info.template, p2->info.vni, + (unsigned long)p2->info.dst_mac, p2->info.ether_type, + p2->info.cvlan_id, p2->info.svlan_id); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw original tnl l2 profile: id %d, " + "dstmac 0x%llx, etype 0x%04x, cvlan %d, svlan %d\n", + prf_msg->profile_id, + *(u64 *)input->l2_data.dst_mac, + input->l2_data.ether_type, input->cvlan_tag, + input->svlan_tag); + } else if ((prf_msg->key_flag & test_l2_notnl) == test_l2_notnl) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated l2 profile: id %d, dstmac 0x%lx, " + "etype 0x%04x, svlan %d, cvlan %d\n", + p3->info.template, (unsigned long)p3->info.dst_mac, + p3->info.ether_type, p3->info.svlan_id, + p3->info.cvlan_id); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw original l2 profile: id %d, dstmac 0x%llx, " + "etype 0x%04x, svlan %d, cvlan %d\n", + prf_msg->profile_id, + *(u64 *)input->l2_data.dst_mac, + input->l2_data.ether_type, input->svlan_tag, + input->cvlan_tag); + } else if ((prf_msg->key_flag & test_l3_v4) == test_l3_v4) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated l3 v4: id %d, dip 0x%x, " + "ttl %d, dscp %d\n", + p4->info.template, p4->info.dst_ip, p4->info.ttl, + p4->info.dscp); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw original l3 v4: id %d, dip 0x%x, " + "ttl %d, dscp %d\n", + prf_msg->profile_id, input->ip.dst_ip.addr, + input->ip.ttl, input->ip.tos); + } else if ((prf_msg->key_flag & test_l3_v6) == test_l3_v6) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated l3 v6: id %d, dip 0x%llx-%llx, " + "ttl %d, dscp %d\n", + p5->info.template, p5->info.dst_ipv6_1, + p5->info.dst_ipv6_2, p5->info.hoplimit, p5->info.dscp); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw original l3 v6: id %d, dip 0x%llx-%llx, " + "ttl %d, dscp %d\n", + prf_msg->profile_id, + *(u64 *)input->ip.dst_ip.v6_addr, + *((u64 *)input->ip.dst_ip.v6_addr + 1), + input->ip.ttl, input->ip.tos); + } else if ((prf_msg->key_flag & test_t5_ipv4) == test_t5_ipv4) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated t5 ipv4 profile: id %d, sip 0x%x, " + "srcport %d, dstport %d, protocol %d\n", + p8->info.template, p8->info.src_ip, p8->info.src_port, + p8->info.dst_port, p8->info.proto); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw original data: sip: 0x%x, srcport %d, " + " dstport %d, protocol %d\n", + input->ip.src_ip.addr, input->l4.src_port, + input->l4.dst_port, input->ip.proto); + } else if ((prf_msg->key_flag & test_t5_ipv6) == test_t5_ipv6) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated t5 ipv6 profile: sip 0x%llx-%llx, " + "srcport %d, dstport %d, protocol %d\n", + p9->info.src_ipv6_1, p9->info.src_ipv6_2, + p9->info.src_port, p9->info.dst_port, p9->info.proto); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw original data: sip: 0x%llx-%llx, srcport %d, " + " dstport %d, protocol %d\n", + *(u64 *)input->ip.src_ip.v6_addr, + *((u64 *)input->ip.src_ip.v6_addr + 1), + input->l4.src_port, input->l4.dst_port, + input->ip.proto); + } +} + +static void nbl_assign_hash_key(struct nbl_flow_tab_conf *hash_key, + struct nbl_flow_pattern_conf *filter, + struct nbl_resource_mgt *res_mgt, + struct nbl_profile_offload_msg *off_msg) +{ + /* 320 bit key data, namely 5 * 64 bits */ + u32 *kt_data = hash_key->key_value; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_profile_msg *prf_msg = + &tc_flow_mgt->profile_msg[off_msg->profile_id]; + struct nbl_flow_key_info *key_info = prf_msg->flow_keys; + u8 i = 0; + + /* loop through all keys of this profile */ + for (i = 0; i < prf_msg->key_count; i++, key_info++) { + if (!key_info->valid) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow hw key %s invalid, something went wrong\n", + key_info->name); + + return; + } + + nbl_assign_hash_key_key(kt_data, key_info, prf_msg, + &filter->input, off_msg->assoc_tbl_id); + } + + /* print out the 320 bit key */ + nbl_debug_print_hash_key(common, hash_key, prf_msg, &filter->input); +} + +static inline void nbl_flow_set_bits(u8 *p, u8 mask) +{ + *p |= mask; +} + +static inline void nbl_flow_clr_bits(u8 *p, u8 mask) +{ + *p &= ~mask; +} + +static void nbl_flow_resource_available(struct nbl_tc_flow_mgt *tc_flow_mgt) +{ + nbl_flow_set_bits(&tc_flow_mgt->init_status, NBL_FLOW_AVAILABLE_BIT); +} + +void nbl_flow_resource_unavailable(struct nbl_tc_flow_mgt *tc_flow_mgt) +{ + nbl_flow_clr_bits(&tc_flow_mgt->init_status, NBL_FLOW_AVAILABLE_BIT); +} + +bool nbl_flow_is_available(struct nbl_tc_flow_mgt *tc_flow_mgt) +{ + u8 ret = tc_flow_mgt->init_status & NBL_FLOW_AVAILABLE_BIT; + + return ret != 0; +} + +static bool nbl_flow_is_resource_ready(struct nbl_tc_flow_mgt *tc_flow_mgt) +{ + u8 ret = tc_flow_mgt->init_status & NBL_FLOW_INIT_BIT; + + return ret != 0; +} + +static void nbl_flow_set_resource_init_status(struct nbl_tc_flow_mgt *tc_flow_mgt, + bool status) +{ + if (status) + nbl_flow_set_bits(&tc_flow_mgt->init_status, + NBL_FLOW_INIT_BIT); + else + nbl_flow_clr_bits(&tc_flow_mgt->init_status, + NBL_FLOW_INIT_BIT); +} + +/** + * @brief: offload sw-tab to hw + */ +static int nbl_add_nic_hw_flow_tab(void *node, struct nbl_rule_action *act, + struct nbl_resource_mgt *res_mgt, + struct nbl_flow_idx_info *idx_info) +{ + int rc = 0; + + WARN_ON(!node); + rc = nbl_flow_offload_ops.add(node, act, res_mgt, idx_info); + return rc; +} + +/** + * @brief: hw flow tab destroy + */ +static int nbl_del_nic_hw_flow_tab(void *node, struct nbl_resource_mgt *res_mgt, + struct nbl_flow_idx_info *idx_info) +{ + int rc = 0; + + WARN_ON(!node); + rc = nbl_flow_offload_ops.del(node, res_mgt, idx_info); + return rc; +} + +/** + * @brief: hw flow tab query + */ +__maybe_unused static int nbl_query_nic_hw_flow_tab(void *node, u32 idx, + void *query_rslt) +{ + int rc = 0; + + WARN_ON(!node); + rc = nbl_flow_offload_ops.query(node, idx, query_rslt); + return rc; +} + +int nbl_tc_flow_alloc_bmp_id(unsigned long *bitmap_mng, u32 size, + u8 type, u32 *bitmap_id) +{ + u32 id; + + if (type == NBL_TC_KT_HALF_MODE) { + id = find_first_zero_bit(bitmap_mng, size); + if (id == size) + return -ENOSPC; + set_bit(id, bitmap_mng); + } else { + id = nbl_common_find_available_idx(bitmap_mng, size, 2, 2); + if (id == size) + return -ENOSPC; + set_bit(id, bitmap_mng); + set_bit(id + 1, bitmap_mng); + } + + *bitmap_id = id; + return 0; +} + +void nbl_tc_flow_free_bmp_id(unsigned long *bitmap_mng, u32 id, u8 type) +{ + if (type == NBL_TC_KT_HALF_MODE) { + clear_bit(id, bitmap_mng); + } else { + clear_bit(id, bitmap_mng); + clear_bit(id + 1, bitmap_mng); + } +} + +/** + * @brief: tnl: ipv4 tnl filter hash tab search func + * + * @param[in] tc_flow_mgt: tc flow hw mgt + * @param[in] key: node key info + * @return nbl_flow_tab_filter *: return node ptr + */ +static struct nbl_flow_tab_filter * +nbl_flow_tab_filter_lookup(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_tab_conf *key, u8 profile_id) +{ + struct nbl_flow_tab_filter *tab_filter = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + if (!tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash) + return NULL; + + tab_filter = nbl_common_get_hash_node(tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash, + key); + return tab_filter; +} + +/** + * @brief: flow_tab.insert hash tab node func + * + * @param[in] tc_flow_mgt: tc flow hw mgt + * @param[in] node: node key info + * @return int: 0-success other-fail + */ +static int nbl_insert_flow_tab_filter(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_tab_conf *key, + struct nbl_flow_tab_filter *node, + struct nbl_flow_tab_filter **new_node, + u8 profile_id) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int ret; + + if (!tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash) + return -EINVAL; + + ret = nbl_common_alloc_hash_node(tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash, key, + node, (void **)new_node); + if (ret) + return ret; + + tc_flow_mgt->flow_tab_hash[profile_id].tab_cnt++; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw insert pid=%d tab_cnt++ =%d\n", + profile_id, tc_flow_mgt->flow_tab_hash[profile_id].tab_cnt); + + return 0; +} + +/** + * @brief:delete ipv4-tnl-hash-list + * @param[in] tc_flow_mgt: tc flow hw mgt + * @return int: 0-success other-fail + * + */ +static int +nbl_flow_flush_flow_tab_hash_list(struct nbl_resource_mgt *res_mgt, + u8 profile_id) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + if (!tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash) + return 0; + + nbl_common_remove_hash_table(tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash, NULL); + tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash = NULL; + + return 0; +} + +static int nbl_flow_flush_hash_list(struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + u8 i = 0; + + for (i = 0; i < NBL_ALL_PROFILE_NUM; i++) + ret |= nbl_flow_flush_flow_tab_hash_list(res_mgt, i); + + return ret; +} + +/** + * @brief: tnl.remove hash tab node func + * + * @param[in] tc_flow_mgt: tc_flow_mgt + * @param[in] key: node key info + * @param[in] off: is need to offload to hw + * @return int: 0-success other-fail + */ +static int nbl_rmv_flow_tab_filter(struct nbl_resource_mgt *res_mgt, + void *key, bool off, bool last_stage, + u8 profile_id) +{ + struct nbl_flow_idx_info idx_info = { 0 }; + struct nbl_flow_tab_filter *node = NULL; + struct nbl_flow_tab_filter tmp_node; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int ret = 0; + + spin_lock(&tc_flow_mgt->flow_lock); + node = nbl_flow_tab_filter_lookup(res_mgt, key, profile_id); + if (!node) { + spin_unlock(&tc_flow_mgt->flow_lock); + } else if (node && node->ref_cnt > NBL_FLOW_TAB_ONE_TIME) { + node->ref_cnt--; + spin_unlock(&tc_flow_mgt->flow_lock); + } else { + memcpy(&tmp_node, node, sizeof(*node)); + if (node->edit_item.is_mir) + list_replace_init(&node->edit_item.tc_mcc_list, + &tmp_node.edit_item.tc_mcc_list); + + if (node->assoc_tbl_id >= NBL_FLOW_TABLE_NUM) { + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw assoc_tbl_id invalid %u.\n", + node->assoc_tbl_id); + return -EINVAL; + } + + if (node->assoc_tbl_id != 0) + nbl_tc_flow_free_bmp_id(tc_flow_mgt->assoc_table_bmp, + node->assoc_tbl_id, 0); + + nbl_common_free_hash_node(tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash, + key); + node = NULL; + + if (tc_flow_mgt->flow_tab_hash[profile_id].tab_cnt > 0) { + tc_flow_mgt->flow_tab_hash[profile_id].tab_cnt--; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw rmv pid=%d tab_cnt--=%d\n", + profile_id, tc_flow_mgt->flow_tab_hash[profile_id].tab_cnt); + } else { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw rmv pid=%d tab_cnt=%d, do not reduce\n", + profile_id, tc_flow_mgt->flow_tab_hash[profile_id].tab_cnt); + spin_unlock(&tc_flow_mgt->flow_lock); + return -EINVAL; + } + + spin_unlock(&tc_flow_mgt->flow_lock); + + /* del hw */ + ret = 0; + if (off) { + idx_info.last_stage = last_stage; + idx_info.profile_id = profile_id; + ret = nbl_del_nic_hw_flow_tab(&tmp_node, res_mgt, &idx_info); + } + } + return ret; +} + +/** + * @brief: flow_tab.add hash node, and transfer the key value + * + * @param[in] key: node key info + * @param[out] ptr: hash node + * @return int: 0-success other-fail + */ +static int nbl_flow_tab_hash_add(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow *tc_flow_ptr, void **ptr, + struct nbl_profile_offload_msg *prof_off_msg) +{ + struct nbl_flow_tab_filter *node = NULL; + const struct nbl_flow_tab_filter *pre_node = NULL; + struct nbl_flow_tab_conf hash_key; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int ret = 0; + u8 profile_id = prof_off_msg->profile_id; + u8 profile_stage = prof_off_msg->profile_stage; + u32 entries = 0; + struct nbl_flow_tab_filter filter_data; + + memset(&hash_key, 0, sizeof(hash_key)); + + if (profile_stage != 0) { + pre_node = tc_flow_ptr->profile_rule[profile_stage - 1]; + if (!pre_node) + return -EINVAL; + prof_off_msg->assoc_tbl_id = (u16)pre_node->assoc_tbl_id; + } + nbl_assign_hash_key(&hash_key, filter, res_mgt, prof_off_msg); + + spin_lock(&tc_flow_mgt->flow_lock); + node = nbl_flow_tab_filter_lookup(res_mgt, &hash_key, profile_id); + if (node) { + if (prof_off_msg->last_stage) { + nbl_info(common, NBL_DEBUG_FLOW, "tc flow offload already, drop this one"); + spin_unlock(&tc_flow_mgt->flow_lock); + return -EEXIST; + } + + node->ref_cnt++; + *ptr = node; + tc_flow_ptr->profile_id[profile_stage] = profile_id; + tc_flow_ptr->profile_rule[profile_stage] = node; + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw flow_tab refcnt ++.\n"); + return 0; + } + + if (profile_id <= NBL_PP1_PROFILE_ID_MAX && profile_id > NBL_PP0_PROFILE_ID_MAX) { + entries = NBL_FLOW_TABLE_LEN; + } else if (profile_id <= NBL_PP2_PROFILE_ID_MAX && profile_id > NBL_PP1_PROFILE_ID_MAX) { + entries = NBL_FLOW_TABLE_LEN * 8; + } else { + spin_unlock(&tc_flow_mgt->flow_lock); + return 0; + } + + if (tc_flow_mgt->flow_tab_hash[profile_id].tab_cnt >= entries) { + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw pid=%d flow_tab num is greater than %d.", + profile_id, entries); + return -EINVAL; + } + + memset(&filter_data, 0, sizeof(filter_data)); + filter_data.ref_cnt = 1; + memcpy(&filter_data.key, &hash_key, sizeof(hash_key)); + + if (prof_off_msg->last_stage) + goto insert_filter; + + /* alloc bmp */ + ret = nbl_tc_flow_alloc_bmp_id(tc_flow_mgt->assoc_table_bmp, + NBL_FLOW_TABLE_NUM, 0, &filter_data.assoc_tbl_id); + if (ret) { + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw failed to alloc id for flow tab.\n"); + return -ENOSPC; + } + + if (!filter_data.assoc_tbl_id) { + ret = nbl_tc_flow_alloc_bmp_id(tc_flow_mgt->assoc_table_bmp, + NBL_FLOW_TABLE_NUM, 0, &filter_data.assoc_tbl_id); + if (ret) { + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw failed to alloc id for flow tab.\n"); + return -ENOSPC; + } + } + +insert_filter: + ret = nbl_insert_flow_tab_filter(res_mgt, &hash_key, &filter_data, &node, profile_id); + if (ret) { + if (!prof_off_msg->last_stage) + nbl_tc_flow_free_bmp_id(tc_flow_mgt->assoc_table_bmp, + filter_data.assoc_tbl_id, 0); + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_info(common, NBL_DEBUG_FLOW, + "tc flow hw failed to insert flow tab filter " + "to hash table %d.\n", ret); + return ret; + } + + *ptr = node; + tc_flow_ptr->profile_id[profile_stage] = profile_id; + tc_flow_ptr->profile_rule[profile_stage] = node; + spin_unlock(&tc_flow_mgt->flow_lock); + return ret; +} + +/** + * @brief: outer tnl flow tab resource storage and offload to hw + * + * @param[in] tc_flow_mgt: tc flow hw info + * @param[in] act: nbl_rule_action info + * @param[in] filter: nbl_flow_pattern_conf info + * @param[out] tc_flow_ptr: tc-flow pointer + * @return int: zero init success, other init failed + */ +static int nbl_flow_tab_storage(struct nbl_resource_mgt *res_mgt, + __maybe_unused struct nbl_rule_action *act, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow *tc_flow_ptr, + struct nbl_profile_offload_msg *prof_off_msg) +{ + int ret = 0; + struct nbl_flow_tab_filter *flow_tab_node = NULL; + struct nbl_flow_idx_info idx_info = { 0 }; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + ret = nbl_flow_tab_hash_add(res_mgt, filter, tc_flow_ptr, + (void **)&flow_tab_node, prof_off_msg); + if (ret || !flow_tab_node) { + nbl_info(common, NBL_DEBUG_FLOW, "tc flow hw flow_tab hash-list storage fail, " + "ret %d, to store flow node %p.\n", + ret, flow_tab_node); + return ret; + } + if (flow_tab_node->ref_cnt > 1) + return 0; + + flow_tab_node->act_flags = act->flag; + idx_info.profile_id = prof_off_msg->profile_id; + idx_info.last_stage = prof_off_msg->last_stage; + idx_info.key_flag = filter->key_flag; + idx_info.pt_cmd = prof_off_msg->pt_cmd; + ret = nbl_add_nic_hw_flow_tab(flow_tab_node, act, res_mgt, &idx_info); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw add flow 2hw fail, ret %d.\n", ret); + return ret; + } + return ret; +} + +/** + * @brief: storage flow tab: + * 1.configure which key template we need to use + * 2.storage key info + * 3.storage action info + * 4.offload to hw + * 5.if tunnel outer flow tab exist,storage tunnel outer flowtab + * + * @param[in] tc_flow_mgt: tc flow hw info + * @param[in] tc_flow_ptr: nbl_tc_flow pointer which + * point to the key template + * @param[in] filter: key info + * @param[in] act: actions info + * @return int: 0-success other-fail. + */ +static int nbl_flow_tab_storage_entr(struct nbl_resource_mgt *res_mgt, + struct nbl_tc_flow *tc_flow_ptr, + struct nbl_flow_pattern_conf *filter, + struct nbl_rule_action *act) +{ + int ret = 0; + int ret_2 = 0; + int i = 0; + struct nbl_profile_assoc_graph *asso_graph = NULL; + struct nbl_profile_offload_msg prof_off_msg = { 0 }; + u8 cur_stage = 0; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + act->next_stg_sel = NEXT_STG_SEL_EPRO; + asso_graph = &tc_flow_mgt->profile_graph[filter->graph_idx]; + + for (i = 0; i < NBL_ASSOC_PROFILE_STAGE_NUM; i++) { + /* pp used to calc ecmp-dphash no need offload flow */ + if (i && asso_graph->profile_id[i] == 0) + break; + + prof_off_msg.profile_id = asso_graph->profile_id[i]; + prof_off_msg.profile_stage = (u8)i; + prof_off_msg.pt_cmd = + tc_flow_mgt->profile_msg[asso_graph->profile_id[i + 1]].pt_cmd; + cur_stage = tc_flow_mgt->profile_msg[prof_off_msg.profile_id].pp_id; + if ((i == NBL_ASSOC_PROFILE_STAGE_NUM - 1) || asso_graph->profile_id[i + 1] == 0) + prof_off_msg.last_stage = true; + + ret = nbl_flow_tab_storage(res_mgt, act, filter, + tc_flow_ptr, &prof_off_msg); + + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, + "tc flow hw tab storage failed, ret %d.\n", ret); + goto fail_flow_tab; + } + } + + return ret; + +fail_flow_tab: + for (i = prof_off_msg.profile_stage; i >= 0; i--) { + struct nbl_flow_tab_filter *flow_tab_node = + tc_flow_ptr->profile_rule[i]; + if (!flow_tab_node) + continue; + + tc_flow_ptr->profile_rule[i] = NULL; + ret_2 |= nbl_rmv_flow_tab_filter(res_mgt, + &flow_tab_node->key, true, + false, + asso_graph->profile_id[i]); + if (ret_2 != 0 && ret_2 != -ENONET) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow hw del failed " + "when flow table storage failed. " + "tnl_flag %d, ret_2 %d.\n", + filter->input.tnl_flag, ret_2); + return ret_2; + } + } + return ret; +} + +struct nbl_tc_flow * +nbl_tc_flow_index_lookup(struct nbl_resource_mgt *res_mgt, struct nbl_flow_index_key *key) +{ + struct nbl_tc_flow *tc_flow_node = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_index_key_extra extra_key; + + spin_lock(&tc_flow_mgt->flow_lock); + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, 0, 0, true); + nbl_common_get_index_with_data(tc_flow_mgt->flow_idx_tbl, key, &extra_key, NULL, + 0, (void **)&tc_flow_node); + spin_unlock(&tc_flow_mgt->flow_lock); + + return tc_flow_node; +} + +struct nbl_tc_flow * +nbl_tc_flow_insert_index(struct nbl_resource_mgt *res_mgt, struct nbl_flow_index_key *key) +{ + int idx; + struct nbl_tc_flow *tc_flow_node = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_tc_flow tc_node_tmp; + + spin_lock(&tc_flow_mgt->flow_lock); + memset(&tc_node_tmp, 0, sizeof(struct nbl_tc_flow)); + idx = nbl_common_alloc_index(tc_flow_mgt->flow_idx_tbl, key, NULL, &tc_node_tmp, + sizeof(tc_node_tmp), (void **)&tc_flow_node); + if (idx == U32_MAX) + goto out; + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw cookie=%llx add success!\n", key->cookie); +out: + spin_unlock(&tc_flow_mgt->flow_lock); + return tc_flow_node; +} + +int nbl_tc_flow_delete_index(struct nbl_resource_mgt *res_mgt, struct nbl_flow_index_key *key) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + spin_lock(&tc_flow_mgt->flow_lock); + nbl_common_free_index(tc_flow_mgt->flow_idx_tbl, key); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw delete flow cookie=0x%llx success.\n", key->cookie); + spin_unlock(&tc_flow_mgt->flow_lock); + + return 0; +} + +/** + * @brief: nbl_profile_assoc_graph_lookup + * @return: + * true : find + * false : not found + */ +static bool nbl_flow_assoc_graph_lookup(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_pattern_conf *filter) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + u8 i = 0; + + if (filter->key_flag == 0) + return false; + + for (i = 0; i < NBL_ASSOC_PROFILE_GRAPH_NUM; i++) { + if (tc_flow_mgt->profile_graph[i].key_flag == 0) + continue; + + if ((tc_flow_mgt->profile_graph[i].key_flag & ~NBL_FLOW_KEY_TABLE_IDX_FLAG) == + (tc_flow_mgt->profile_graph[i].key_flag & filter->key_flag)) { + filter->graph_idx = i; + return true; + } + } + + return false; +} + +static int nbl_flow_tc_encap_tbl_uninit(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + nbl_common_remove_hash_table(tc_flow_mgt->encap_tbl.flow_tab_hash, NULL); + tc_flow_mgt->encap_tbl.flow_tab_hash = NULL; + + return 0; +} + +/** + * @brief: destroy nbl_tc_flow of all and action hash-list + * + * @param[in] error: error info + * return int: 0-success other-fail + */ +int nbl_flow_flush(struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (!nbl_flow_is_available(tc_flow_mgt)) + return -EINVAL; + + spin_lock(&tc_flow_mgt->flow_lock); + + ret = nbl_flow_flush_hash_list(res_mgt); + if (ret) { + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw flush_hash_list failed %d.\n", ret); + return -EINVAL; + } + + spin_unlock(&tc_flow_mgt->flow_lock); + + mutex_lock(&tc_flow_mgt->encap_tbl_lock); + nbl_flow_tc_encap_tbl_uninit(res_mgt); + mutex_unlock(&tc_flow_mgt->encap_tbl_lock); + + return ret; +} + +static void nbl_flow_clean_create_destroy_cnt(struct nbl_tc_flow_mgt *tc_flow_mgt) +{ + atomic64_set(&tc_flow_mgt->destroy_num, 0); + atomic64_set(&tc_flow_mgt->create_num, 0); +} + +/** + * @brief: flow_tab_filter hash-list init: + * + * @return int: 0-success other-fail. + */ +static int nbl_flow_tab_filter_init(struct nbl_resource_mgt *res_mgt, + u8 profile_id) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u32 entries = 0; + struct nbl_hash_tbl_key tbl_key = {0}; + + if (profile_id > NBL_PP2_PROFILE_ID_MAX && profile_id < NBL_ALL_PROFILE_NUM) + return 0; + + if (profile_id <= NBL_PP0_PROFILE_ID_MAX) + entries = 0; + else if (profile_id <= NBL_PP1_PROFILE_ID_MAX) + entries = NBL_FLOW_TABLE_LEN; + else if (profile_id <= NBL_PP2_PROFILE_ID_MAX) + entries = NBL_FLOW_TABLE_LEN * 8; + else + entries = 0; + + if (!entries) + return -EINVAL; + + /* hash_buck is 2-bytes wide, update it if needed */ + entries = entries >= 0xffff ? 0xffff : entries; + NBL_HASH_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(common), sizeof(struct nbl_flow_tab_conf), + sizeof(struct nbl_flow_tab_filter), entries, false); + tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash = + nbl_common_init_hash_table(&tbl_key); + if (!tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash) + return -EINVAL; + + nbl_info(common, NBL_DEBUG_FLOW, "tc flow init profile:%u with %u entries", + profile_id, entries); + + return 0; +} + +static int nbl_flow_tc_encap_tbl_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + struct nbl_hash_tbl_key tbl_key = {0}; + + NBL_HASH_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(common), sizeof(struct nbl_encap_key), + sizeof(struct nbl_encap_entry), NBL_TC_ENCAP_TBL_DEPTH, false); + tc_flow_mgt->encap_tbl.flow_tab_hash = nbl_common_init_hash_table(&tbl_key); + if (!tc_flow_mgt->encap_tbl.flow_tab_hash) + return -EINVAL; + + mutex_init(&tc_flow_mgt->encap_tbl_lock); + + return 0; +} + +static int nbl_flow_pp1_ht0_tbl_hash_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + tc_flow_mgt->pp1_ht0_mng.hash_map = + devm_kzalloc(common->dev, + sizeof(struct nbl_flow_pp_ht_tbl *) * NBL_FEM_HT_PP1_LEN, GFP_KERNEL); + if (!tc_flow_mgt->pp1_ht0_mng.hash_map) + return -ENOMEM; + + return 0; +} + +static void +nbl_flow_pp1_ht0_tbl_hash_uninit(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + devm_kfree(common->dev, tc_flow_mgt->pp1_ht0_mng.hash_map); + tc_flow_mgt->pp1_ht0_mng.hash_map = NULL; +} + +static int nbl_flow_pp1_ht1_tbl_hash_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + tc_flow_mgt->pp1_ht1_mng.hash_map = + devm_kzalloc(common->dev, + sizeof(struct nbl_flow_pp_ht_tbl *) * NBL_FEM_HT_PP1_LEN, GFP_KERNEL); + if (!tc_flow_mgt->pp1_ht1_mng.hash_map) + return -ENOMEM; + + return 0; +} + +static void +nbl_flow_pp1_ht1_tbl_hash_uninit(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + devm_kfree(common->dev, tc_flow_mgt->pp1_ht1_mng.hash_map); + tc_flow_mgt->pp1_ht1_mng.hash_map = NULL; +} + +static int nbl_flow_pp2_ht0_tbl_hash_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + tc_flow_mgt->pp2_ht0_mng.hash_map = + devm_kzalloc(common->dev, + sizeof(struct nbl_flow_pp_ht_tbl *) * NBL_FEM_HT_PP2_LEN, GFP_KERNEL); + if (!tc_flow_mgt->pp2_ht0_mng.hash_map) + return -ENOMEM; + + return 0; +} + +static void +nbl_flow_pp2_ht0_tbl_hash_uninit(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + devm_kfree(common->dev, tc_flow_mgt->pp2_ht0_mng.hash_map); + tc_flow_mgt->pp2_ht0_mng.hash_map = NULL; +} + +static int nbl_flow_pp2_ht1_tbl_hash_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + tc_flow_mgt->pp2_ht1_mng.hash_map = + devm_kzalloc(common->dev, + sizeof(struct nbl_flow_pp_ht_tbl *) * NBL_FEM_HT_PP2_LEN, GFP_KERNEL); + if (!tc_flow_mgt->pp2_ht1_mng.hash_map) + return -ENOMEM; + + return 0; +} + +static void +nbl_flow_pp2_ht1_tbl_hash_uninit(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + devm_kfree(common->dev, tc_flow_mgt->pp2_ht1_mng.hash_map); + tc_flow_mgt->pp2_ht1_mng.hash_map = NULL; +} + +struct nbl_flow_pp_ht_tbl * +nbl_pp_ht_lookup(struct nbl_flow_pp_ht_mng *pp_ht_mng, u16 hash_value, + struct nbl_flow_pp_ht_key *pp_ht_key) +{ + struct nbl_flow_pp_ht_tbl *node = NULL; + u16 i; + bool is_find = false; + + if (!pp_ht_mng || !pp_ht_key) + return NULL; + + node = pp_ht_mng->hash_map[hash_value]; + + if (node) { + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (!memcmp(pp_ht_key, &node->key[i], sizeof(node->key[i]))) { + is_find = true; + break; + } + } + } + + if (is_find) + return node; + + return NULL; +} + +int nbl_insert_pp_ht(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_pp_ht_mng *pp_ht_mng, u16 hash_value0, + u16 hash_value1, u32 key_index) +{ + struct nbl_flow_pp_ht_tbl *node; + + if (!pp_ht_mng) + return -EINVAL; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) { + return -ENOMEM; + } + + node->key[0].vid = 1; + node->key[0].ht_other_index = hash_value1; + node->key[0].kt_index = key_index; + node->ref_cnt = 1; + + pp_ht_mng->hash_map[hash_value0] = node; + + return 0; +} + +int nbl_delete_pp_ht(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_pp_ht_mng *pp_ht_mng, + struct nbl_flow_pp_ht_tbl *node, u16 hash_value0, + u16 hash_value1, u32 key_index) +{ + u16 i; + int ret = 0; + bool is_delete = false; + + if (!pp_ht_mng || !node) + return -EINVAL; + + if (node->ref_cnt > NBL_FLOW_TAB_ONE_TIME) { + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (node->key[i].ht_other_index == hash_value1 && + node->key[i].kt_index == key_index) { + node->key[i].vid = 0; + node->key[i].ht_other_index = 0; + node->key[i].kt_index = 0; + node->ref_cnt = node->ref_cnt - 1; + + is_delete = true; + break; + } + } + } else { + pp_ht_mng->hash_map[hash_value0] = NULL; + kfree(node); + node = NULL; + + is_delete = true; + } + + if (is_delete) + return ret; + + return -ENODEV; +} + +bool nbl_pp_ht0_ht1_search(struct nbl_flow_pp_ht_mng *pp_ht0_mng, u16 ht0_hash, + struct nbl_flow_pp_ht_mng *pp_ht1_mng, u16 ht1_hash) +{ + struct nbl_flow_pp_ht_tbl *node0 = NULL; + struct nbl_flow_pp_ht_tbl *node1 = NULL; + u16 i = 0; + bool is_find = false; + + if (!pp_ht0_mng || !pp_ht1_mng) + return is_find; + + node0 = pp_ht0_mng->hash_map[ht0_hash]; + + if (node0) + for (i = 0; i < NBL_HASH_CFT_MAX; i++) + if (node0->key[i].vid && + node0->key[i].ht_other_index == ht1_hash) { + is_find = true; + return is_find; + } + + node1 = pp_ht1_mng->hash_map[ht1_hash]; + + if (node1) + for (i = 0; i < NBL_HASH_CFT_MAX; i++) + if (node1->key[i].vid && + node1->key[i].ht_other_index == ht0_hash) { + is_find = true; + return is_find; + } + + return is_find; +} + +static int nbl_flow_pp_at_tbl_init(struct nbl_resource_mgt *res_mgt) +{ + u32 i; + u32 j; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_index_tbl_key tbl_key; + u32 at_idx_num[NBL_PP_TYPE_MAX][NBL_AT_TYPE_MAX] = { + {0, 0, 0}, + {0, NBL_FEM_AT_PP1_LEN, NBL_FEM_AT2_PP1_LEN}, + {0, NBL_FEM_AT_PP2_LEN, NBL_FEM_AT2_PP2_LEN }, + }; + + for (i = 0; i < NBL_PP_TYPE_MAX; i++) { + for (j = 0; j < NBL_AT_TYPE_MAX; j++) { + if (!at_idx_num[i][j]) + continue; + + NBL_INDEX_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(common), 0, + at_idx_num[i][j], + sizeof(struct nbl_flow_pp_at_key)); + tc_flow_mgt->at_mng.at_tbl[i][j] = nbl_common_init_index_table(&tbl_key); + if (!tc_flow_mgt->at_mng.at_tbl[i][j]) + return -ENOMEM; + } + } + + return 0; +} + +static void nbl_flow_pp_at_tbl_uninit(struct nbl_resource_mgt *res_mgt) +{ + u32 i; + u32 j; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + for (i = 0; i < NBL_PP_TYPE_MAX; i++) { + for (j = 0; j < NBL_AT_TYPE_MAX; j++) { + nbl_common_remove_index_table(tc_flow_mgt->at_mng.at_tbl[i][j], NULL); + tc_flow_mgt->at_mng.at_tbl[i][j] = NULL; + } + } +} + +int nbl_pp_at_lookup(struct nbl_resource_mgt *res_mgt, u8 pp_type, u8 at_type, + struct nbl_flow_pp_at_key *act_key, struct nbl_flow_at_tbl **act_node) +{ + int idx; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + void *at_tbl = tc_flow_mgt->at_mng.at_tbl[pp_type][at_type]; + struct nbl_index_key_extra extra_key; + + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, 0, 0, true); + idx = nbl_common_get_index_with_data(at_tbl, act_key->act, &extra_key, NULL, 0, + (void **)act_node); + return idx; +} + +int nbl_insert_pp_at(struct nbl_resource_mgt *res_mgt, u8 pp_type, u8 at_type, + struct nbl_flow_pp_at_key *act_key, struct nbl_flow_at_tbl **act_node) +{ + int idx; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + void *at_tbl = tc_flow_mgt->at_mng.at_tbl[pp_type][at_type]; + struct nbl_index_key_extra extra_key; + struct nbl_flow_at_tbl at_node_tmp; + + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, NBL_FLOW_AT_IDX_NUM, NBL_FLOW_AT_IDX_MULTIPLE, false); + at_node_tmp.ref_cnt = 1; + idx = nbl_common_alloc_index(at_tbl, act_key->act, &extra_key, &at_node_tmp, + sizeof(struct nbl_flow_at_tbl), (void **)act_node); + return idx; +} + +static int nbl_flow_tcam_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + memset(tc_flow_mgt->tcam_pp0_key_mng, 0, + sizeof(struct nbl_flow_tcam_key_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp1_key_mng, 0, + sizeof(struct nbl_flow_tcam_key_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp2_key_mng, 0, + sizeof(struct nbl_flow_tcam_key_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp0_ad_mng, 0, + sizeof(struct nbl_flow_tcam_ad_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp1_ad_mng, 0, + sizeof(struct nbl_flow_tcam_ad_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp2_ad_mng, 0, + sizeof(struct nbl_flow_tcam_ad_mng) * NBL_FEM_TCAM_MAX_NUM); + + return 0; +} + +static void nbl_flow_tcam_uninit(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + memset(tc_flow_mgt->tcam_pp0_key_mng, 0, + sizeof(struct nbl_flow_tcam_key_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp1_key_mng, 0, + sizeof(struct nbl_flow_tcam_key_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp2_key_mng, 0, + sizeof(struct nbl_flow_tcam_key_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp0_ad_mng, 0, + sizeof(struct nbl_flow_tcam_ad_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp1_ad_mng, 0, + sizeof(struct nbl_flow_tcam_ad_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp2_ad_mng, 0, + sizeof(struct nbl_flow_tcam_ad_mng) * NBL_FEM_TCAM_MAX_NUM); +} + +int nbl_tcam_key_lookup(struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_tcam_item *tcam_item, u16 *index) +{ + int ret = 0; + u16 i; + bool is_find = false; + + if (!tcam_pp_key_mng || !tcam_item || !index) + return -EINVAL; + + if (tcam_item->key_mode == NBL_TC_KT_FULL_MODE) { + for (i = 0; i < NBL_FEM_TCAM_MAX_NUM - 1; i += 2) { + if (tcam_pp_key_mng[i].item.key_mode != NBL_TC_KT_FULL_MODE) + continue; + if (!(memcmp(tcam_pp_key_mng[i].item.key, + tcam_item->kt_data.hash_key, + sizeof(tcam_item->kt_data.hash_key) / 2) && + memcmp(tcam_pp_key_mng[i + 1].item.key, + &tcam_item->kt_data.hash_key[20], + sizeof(tcam_item->kt_data.hash_key) / 2))) { + *index = i; + is_find = true; + break; + } + } + } else { + for (i = 0; i < NBL_FEM_TCAM_MAX_NUM; i++) { + if (tcam_pp_key_mng[i].item.key_mode != NBL_TC_KT_HALF_MODE) + continue; + if (!(memcmp(tcam_pp_key_mng[i].item.key, tcam_item->kt_data.hash_key, + sizeof(tcam_item->kt_data.hash_key) / 2))) { + *index = i; + is_find = true; + break; + } + } + } + + if (is_find) + return ret; + + return -ENODEV; +} + +int nbl_insert_tcam_key_ad(struct nbl_common_info *common, + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng, + struct nbl_tcam_item *tcam_item, + struct nbl_flow_tcam_ad_item *ad_item, + u16 *index) +{ + int ret = 0; + u16 i = 0; + + bool is_insert = false; + + if (!tcam_pp_key_mng || !tcam_pp_ad_mng || !tcam_item || !ad_item || !index) + return -EINVAL; + + if (tcam_item->key_mode == NBL_TC_KT_FULL_MODE) { + for (; i < NBL_FEM_TCAM_MAX_NUM - 1; i += 2) { + if (!(tcam_pp_key_mng[i].item.key_mode && + tcam_pp_key_mng[i + 1].item.key_mode)) { + memcpy(tcam_pp_key_mng[i].item.key, + tcam_item->kt_data.hash_key, + sizeof(tcam_item->kt_data.hash_key) / 2); + memcpy(tcam_pp_key_mng[i + 1].item.key, + &tcam_item->kt_data.hash_key[20], + sizeof(tcam_item->kt_data.hash_key) / 2); + tcam_pp_key_mng[i].item.key_mode = NBL_TC_KT_FULL_MODE; + tcam_pp_key_mng[i + 1].item.key_mode = NBL_TC_KT_FULL_MODE; + tcam_pp_key_mng[i].ref_cnt = 1; + tcam_pp_key_mng[i + 1].ref_cnt = 1; + tcam_pp_key_mng[i].item.sw_hash_id = tcam_item->sw_hash_id; + tcam_pp_key_mng[i].item.profile_id = tcam_item->profile_id; + + memcpy(tcam_pp_ad_mng[i].item.action, ad_item->action, + sizeof(ad_item->action)); + + *index = i; + is_insert = true; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw tcam: insert pp%d index=%d,%d\n", + tcam_item->pp_type, *index, *index + 1); + break; + } + } + } else { + for (; i < NBL_FEM_TCAM_MAX_NUM; i++) { + if (!tcam_pp_key_mng[i].item.key_mode) { + memcpy(tcam_pp_key_mng[i].item.key, tcam_item->kt_data.hash_key, + sizeof(tcam_item->kt_data.hash_key) / 2); + tcam_pp_key_mng[i].item.key_mode = NBL_TC_KT_HALF_MODE; + tcam_pp_key_mng[i].ref_cnt = 1; + tcam_pp_key_mng[i].item.sw_hash_id = + tcam_item->sw_hash_id; + tcam_pp_key_mng[i].item.profile_id = + tcam_item->profile_id; + + memcpy(tcam_pp_ad_mng[i].item.action, ad_item->action, + sizeof(ad_item->action)); + + *index = i; + is_insert = true; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw tcam: insert pp%d index=%d\n", + tcam_item->pp_type, *index); + break; + } + } + } + + if (is_insert) + return ret; + + return -ENODEV; +} + +int nbl_delete_tcam_key_ad(struct nbl_common_info *common, + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng, + u16 index, u8 key_mode, u8 pp_type) +{ + int ret = 0; + + if (!tcam_pp_key_mng || !tcam_pp_ad_mng) + return -EINVAL; + + if (key_mode == NBL_TC_KT_FULL_MODE) { + if (tcam_pp_key_mng[index].ref_cnt > 1) { + tcam_pp_key_mng[index].ref_cnt--; + tcam_pp_key_mng[index + 1].ref_cnt--; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw tcam: ref_cnt-- pp%d index=%d, ref_cnt=%d\n", + pp_type, index, tcam_pp_key_mng[index].ref_cnt); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw tcam: ref_cnt-- pp%d index=%d, ref_cnt=%d\n", + pp_type, index + 1, + tcam_pp_key_mng[index + 1].ref_cnt); + } else { + memset(&tcam_pp_key_mng[index], 0, + sizeof(tcam_pp_key_mng[index])); + memset(&tcam_pp_key_mng[index + 1], 0, + sizeof(tcam_pp_key_mng[index + 1])); + memset(&tcam_pp_ad_mng[index], 0, + sizeof(tcam_pp_ad_mng[index])); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw tcam: delete pp%d index=%d,%d\n", + pp_type, index, index + 1); + } + } else { + if (tcam_pp_key_mng[index].ref_cnt > 1) { + tcam_pp_key_mng[index].ref_cnt--; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw tcam:ref_cnt-- pp%d index=%d, ref_cnt=%d\n", + pp_type, index, tcam_pp_key_mng[index].ref_cnt); + } else { + memset(&tcam_pp_key_mng[index], 0, + sizeof(tcam_pp_key_mng[index])); + memset(&tcam_pp_ad_mng[index], 0, + sizeof(tcam_pp_ad_mng[index])); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw tcam: delete pp%d index=%d\n", pp_type, index); + } + } + + return ret; +} + +static int nbl_flow_mcc_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + nbl_tc_mcc_init(&tc_flow_mgt->tc_mcc_mgt, common); + + return 0; +} + +static void nbl_tc_flow_set_pedit_res(struct nbl_tc_pedit_res_info *pedit_res) +{ + pedit_res[NBL_FLOW_PED_UMAC_TYPE].pedit_num = NBL_FLOW_TC_PEDIT_MAC; + pedit_res[NBL_FLOW_PED_DMAC_TYPE].pedit_num = NBL_FLOW_TC_PEDIT_MAC; + pedit_res[NBL_FLOW_PED_UMAC_TYPE].pedit_base_id = NBL_FLOW_TC_PEDIT_MAC_BASE; + pedit_res[NBL_FLOW_PED_DMAC_TYPE].pedit_base_id = NBL_FLOW_TC_PEDIT_MAC_BASE; + + pedit_res[NBL_FLOW_PED_UIP_TYPE].pedit_num = NBL_FLOW_TC_PEDIT_IP; + pedit_res[NBL_FLOW_PED_DIP_TYPE].pedit_num = NBL_FLOW_TC_PEDIT_IP; + pedit_res[NBL_FLOW_PED_UIP_TYPE].pedit_base_id = NBL_FLOW_TC_PEDIT_IP_BASE; + pedit_res[NBL_FLOW_PED_DIP_TYPE].pedit_base_id = NBL_FLOW_TC_PEDIT_IP_BASE; + /* special handle:leonis ipv6 need 2 ped-addr, v4 & v6 could share the same hw-resource */ + pedit_res[NBL_FLOW_PED_UIP_TYPE].pedit_num_h = NBL_FLOW_TC_PEDIT_IP6; + pedit_res[NBL_FLOW_PED_DIP_TYPE].pedit_num_h = NBL_FLOW_TC_PEDIT_IP6; +} + +static int nbl_tc_flow_init_pedit(struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + struct nbl_tc_pedit_mgt *pedit_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + /* set pedit cap */ + memset(&tc_flow_mgt->pedit_mgt, 0, sizeof(tc_flow_mgt->pedit_mgt)); + pedit_mgt = &tc_flow_mgt->pedit_mgt; + nbl_tc_flow_set_pedit_res(pedit_mgt->pedit_res); + mutex_init(&pedit_mgt->pedit_lock); + pedit_mgt->common = common; + + /*set pedit hw-resource */ + ret = nbl_tc_pedit_init(pedit_mgt); + + if (ret) + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit init failed"); + else + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit init success"); + + return ret; +} + +static void nbl_tc_flow_uninit_pedit(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + int ret = 0; + + ret = nbl_tc_pedit_uninit(&tc_flow_mgt->pedit_mgt); + if (ret) + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit uninit failed"); + else + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit uninit success"); +} + +static struct nbl_flow_info_init flow_info_init_list[] = { + { nbl_flow_pp1_ht0_tbl_hash_init }, + { nbl_flow_pp1_ht1_tbl_hash_init }, + { nbl_flow_pp2_ht0_tbl_hash_init }, + { nbl_flow_pp2_ht1_tbl_hash_init }, + + { nbl_flow_tcam_init }, + { nbl_flow_mcc_init }, + { nbl_tc_flow_init_pedit }, +}; + +static struct nbl_flow_info_uninit flow_info_uninit_list[] = { + { nbl_flow_pp1_ht0_tbl_hash_uninit }, + { nbl_flow_pp1_ht1_tbl_hash_uninit }, + { nbl_flow_pp2_ht0_tbl_hash_uninit }, + { nbl_flow_pp2_ht1_tbl_hash_uninit }, + + { nbl_flow_tcam_uninit }, + { nbl_tc_flow_uninit_pedit }, +}; + +static int nbl_flow_info_init_list(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + u32 idx = 0; + u8 profile_id = 0; + struct nbl_profile_msg *profile_msg = NULL; + struct nbl_flow_prf_data *prf_info = NULL; + u32 item_cnt = 0; + int ret = 0; + u8 p_id = NBL_FLOW_PROFILE_START; + + for (profile_id = p_id; profile_id < NBL_ALL_PROFILE_NUM; profile_id++) { + profile_msg = &tc_flow_mgt->profile_msg[profile_id]; + if (profile_msg->key_len != 0) { + ret = nbl_flow_tab_filter_init(res_mgt, profile_id); + if (ret) + return ret; + } + + if (profile_msg->need_upcall && !profile_msg->pt_cmd && + profile_id < NBL_PP_STAGE_PROFILE_NUM) { + prf_info = &tc_flow_mgt->prf_info.prf_data[item_cnt]; + prf_info->pp_id = profile_msg->pp_id; + prf_info->prf_id = profile_msg->profile_id; + ++item_cnt; + } + } + tc_flow_mgt->prf_info.item_cnt = item_cnt; + + for (; idx < ARRAY_SIZE(flow_info_init_list); idx++) { + ret = flow_info_init_list[idx].init_func(res_mgt); + if (ret) + return ret; + } + + ret = nbl_flow_pp_at_tbl_init(res_mgt); + if (ret) + return ret; + + ret = nbl_flow_tc_encap_tbl_init(res_mgt); + + return ret; +} + +void nbl_flow_info_uninit_list(struct nbl_resource_mgt *res_mgt) +{ + u32 idx; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + spin_lock(&tc_flow_mgt->flow_lock); + for (idx = 0; idx < ARRAY_SIZE(flow_info_uninit_list); idx++) + flow_info_uninit_list[idx].uninit_func(res_mgt); + + nbl_flow_pp_at_tbl_uninit(res_mgt); + spin_unlock(&tc_flow_mgt->flow_lock); +} + +static int nbl_tc_flow_resource_init(struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + spin_lock_init(&tc_flow_mgt->flow_lock); + + ret = nbl_flow_info_init_list(res_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw info init failed.\n"); + goto flow_info_init_failed; + } + + nbl_info(common, NBL_DEBUG_FLOW, "tc flow hw resource init success\n"); + + return ret; + +flow_info_init_failed: + nbl_flow_info_uninit_list(res_mgt); + return ret; +} + +static int nbl_flow_resource_free(struct nbl_resource_mgt *res_mgt) +{ + nbl_flow_flush(res_mgt); + + nbl_flow_info_uninit_list(res_mgt); + + return 0; +} + +/** + * @brief: init flow tab all resource + * + * @param[in] dev: the dev resource + * @return void + * + * the list of function is as follows: + * 1. init nbl_tc_flow list resource + * 2. init all kinds of key template resource + * 3. init action resource + * 4. init counter resource + */ +static int nbl_tc_flow_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + int ret = 0; + + tc_flow_mgt->res_mgt = res_mgt; + nbl_flow_clean_create_destroy_cnt(tc_flow_mgt); + + if (nbl_flow_is_resource_ready(tc_flow_mgt)) + return ret; + + tc_flow_mgt->profile_graph_count = g_profile_graph_count; + memcpy(tc_flow_mgt->profile_msg, g_prf_msg, + sizeof(struct nbl_profile_msg) * NBL_ALL_PROFILE_NUM); + memcpy(tc_flow_mgt->profile_graph, g_prf_graph, + sizeof(struct nbl_profile_assoc_graph) * NBL_ASSOC_PROFILE_GRAPH_NUM); + + ret = nbl_tc_flow_resource_init(res_mgt); + if (ret == 0) { + nbl_flow_set_resource_init_status(tc_flow_mgt, true); + nbl_flow_resource_available(tc_flow_mgt); + } else { + return ret; + } + + /* not available still now, depends on mbx */ + return ret; +} + +/** + * @brief: uninit flow tab all resource + * + * @param[in] dev: the dev resource + * @return void + */ +static void nbl_flow_fini(struct nbl_resource_mgt *res_mgt, bool available) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + if (!nbl_flow_is_resource_ready(tc_flow_mgt)) + return; + + if (!available) + return; + + nbl_flow_resource_free(res_mgt); + nbl_flow_set_resource_init_status(tc_flow_mgt, false); +} + +static void +nbl_flow_wait_flows_free_done(struct nbl_tc_flow_mgt *tc_flow_mgt) +{ +#define WAIT_CNT 100 +#define WAIT_TIME 10 /* ms */ + u32 cnt = 0; + + while (1) { + if (cnt > WAIT_CNT) + break; + cnt++; + + if (!atomic64_read(&tc_flow_mgt->ref_cnt)) + break; + mdelay(WAIT_TIME); + } +} + +static int nbl_tc_flow_add_tc_flow(void *priv, struct nbl_tc_flow_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int ret = 0; + struct nbl_tc_flow *tc_flow_ptr = NULL; + + if (!tc_flow_mgt) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw add tc_flow_mgt is null.\n"); + return -EINVAL; + } + + if (!nbl_flow_is_available(tc_flow_mgt)) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw resource unavailable.\n"); + return -EINVAL; + } + + if (param->in.type == NBL_TC_PORT_TYPE_VSI) + param->act.flag |= NBL_FLOW_ACTION_EGRESS; + else + param->act.flag |= NBL_FLOW_ACTION_INGRESS; + + param->filter.input.dir = (param->act.flag & NBL_FLOW_ACTION_EGRESS); + + tc_flow_ptr = nbl_tc_flow_insert_index(res_mgt, ¶m->key); + if (!tc_flow_ptr) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow hw index=%llx add failed!\n", param->key.cookie); + ret = -EINVAL; + goto flow_idx_err; + } + + tc_flow_ptr->flow_stat_id = nbl_fc_add_stats_leonis(priv, NBL_FC_COMMON_TYPE, + param->key.cookie); + if (tc_flow_ptr->flow_stat_id < 0) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw failed to add a counter.\n"); + ret = -EINVAL; + goto stats_out; + } else { + param->act.counter_id = tc_flow_ptr->flow_stat_id; + param->act.flag |= NBL_FLOW_ACTION_COUNTER; + } + + if (!nbl_flow_assoc_graph_lookup(res_mgt, ¶m->filter)) { + nbl_info(common, NBL_DEBUG_FLOW, "tc flow hw can not find graph, key_flag:0x%llx.\n", + param->filter.key_flag); + ret = -EINVAL; + goto out; + } + + ret = nbl_flow_tab_storage_entr(res_mgt, tc_flow_ptr, + ¶m->filter, ¶m->act); + + if (ret) + goto out; + atomic64_inc(&tc_flow_mgt->create_num); + tc_flow_ptr->act_flags = param->act.flag; + if (param->act.flag & NBL_FLOW_ACTION_TUNNEL_ENCAP) { + tc_flow_ptr->encap_key = kzalloc(sizeof(*tc_flow_ptr->encap_key), GFP_KERNEL); + if (!tc_flow_ptr->encap_key) { + ret = -ENOMEM; + goto out; + } + memcpy(tc_flow_ptr->encap_key, ¶m->act.encap_key, sizeof(param->act.encap_key)); + } + + if (NBL_TC_PEDIT_GET_NODE_RES_VAL(param->act.tc_pedit_info.pedit_node)) + tc_flow_ptr->pedit_node = param->act.tc_pedit_info.pedit_node; + + return ret; + +out: + nbl_fc_del_stats_leonis(priv, param->key.cookie); + if (NBL_TC_PEDIT_GET_NODE_RES_VAL(param->act.tc_pedit_info.pedit_node)) + nbl_tc_pedit_del_node(&tc_flow_mgt->pedit_mgt, + ¶m->act.tc_pedit_info.pedit_node); +stats_out: + nbl_tc_flow_delete_index(res_mgt, ¶m->key); +flow_idx_err: + return ret; +} + +static int nbl_tc_flow_del_edit_act(struct nbl_resource_mgt *res_mgt, + struct nbl_tc_flow *tc_flow_node) +{ + int ret = 0; + struct nbl_tc_pedit_node_res *pedit_node = &tc_flow_node->pedit_node; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (tc_flow_node->act_flags & NBL_FLOW_ACTION_TUNNEL_ENCAP) { + ret = nbl_tc_tun_encap_del(res_mgt, tc_flow_node->encap_key); + kfree(tc_flow_node->encap_key); + } + + if (NBL_TC_PEDIT_GET_NODE_RES_VAL(*pedit_node)) { + ret = nbl_tc_pedit_del_node(&tc_flow_mgt->pedit_mgt, pedit_node); + if (ret) + nbl_err(common, NBL_DEBUG_FLOW, "del tc_pedit node error"); + } + + return ret; +} + +static void nbl_tc_flow_del_filter_tbl(struct nbl_resource_mgt *res_mgt, + struct nbl_tc_flow *tc_flow_node) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int ret = 0; + u8 i = 0; + bool last_stage = false; + struct nbl_flow_tab_filter *flow_tab_node; + + for (i = 0; i < NBL_PP_PROFILE_STAGE_NUM; i++) { + flow_tab_node = tc_flow_node->profile_rule[i]; + if (!flow_tab_node) + continue; + + if (i && tc_flow_node->profile_id[i] == 0) + break; + + if (tc_flow_mgt->profile_msg[tc_flow_node->profile_id[i]].key_flag == 0) + break; + + if (i == (NBL_ASSOC_PROFILE_STAGE_NUM - 1) || + tc_flow_node->profile_id[i + 1] == 0) + last_stage = true; + + if (tc_flow_mgt->profile_msg[tc_flow_node->profile_id[i]].g_profile_id < + NBL_PP_STAGE_PROFILE_NUM) { + ret |= nbl_rmv_flow_tab_filter(res_mgt, &flow_tab_node->key, + true, last_stage, tc_flow_node->profile_id[i]); + } + + if (ret != 0 && ret != -ENONET) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow hw del failed ret %d.\n", ret); + return; + } + } + + /* del actions */ + ret = nbl_tc_flow_del_edit_act(res_mgt, tc_flow_node); + if (ret) + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow del edit action failed ret %d.\n", ret); +} + +static int nbl_tc_flow_del_tc_flow(void *priv, struct nbl_tc_flow_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int ret = 0; + struct nbl_tc_flow *tc_flow_node = NULL; + + if (!tc_flow_mgt) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw del tc_flow_mgt is null.\n"); + return -EINVAL; + } + + if (!nbl_flow_is_available(tc_flow_mgt)) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow hw resource unavailable.\n"); + return -EINVAL; + } + + nbl_fc_del_stats_leonis(priv, param->key.cookie); + tc_flow_node = nbl_tc_flow_index_lookup(res_mgt, ¶m->key); + if (!tc_flow_node) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw cookie=%llx not exist to del tc flow!\n", + param->key.cookie); + return -ENOENT; + } + + nbl_tc_flow_del_filter_tbl(res_mgt, tc_flow_node); + ret = nbl_tc_flow_delete_index(res_mgt, ¶m->key); + if (ret) + nbl_info(common, NBL_DEBUG_FLOW, "tc flow hw del tc-flow-list failed.\n"); + else + atomic64_inc(&tc_flow_mgt->destroy_num); + + return ret; +} + +static int nbl_tc_flow_idx_lookup(void *priv, struct nbl_flow_index_key key) +{ + int ret = -ENOKEY; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow *tc_flow_ptr = NULL; + + tc_flow_ptr = nbl_tc_flow_index_lookup(res_mgt, &key); + if (tc_flow_ptr) + ret = 0; + + return ret; +} + +static void nbl_tc_flow_node_del_action_func(void *priv, int index, void *data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_tc_flow *flow_node; + struct nbl_flow_index_key *flow_key = (struct nbl_flow_index_key *)data; + + flow_node = (struct nbl_tc_flow *)((u8 *)flow_key + sizeof(struct nbl_flow_index_key)); + nbl_fc_del_stats_leonis(priv, flow_key->cookie); + nbl_tc_flow_del_filter_tbl(res_mgt, flow_node); + atomic64_inc(&tc_flow_mgt->destroy_num); +} + +int nbl_tc_flow_flush_flow(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_index_tbl_scan_key scan_key; + + NBL_INDEX_TBL_SCAN_KEY_INIT(&scan_key, true, res_mgt, &nbl_tc_flow_node_del_action_func); + nbl_common_scan_index_table(tc_flow_mgt->flow_idx_tbl, &scan_key); + + return 0; +} + +/* NBL_FLOW_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_TC_FLOW_OPS_TBL \ +do { \ + NBL_TC_FLOW_SET_OPS(add_tc_flow, nbl_tc_flow_add_tc_flow); \ + NBL_TC_FLOW_SET_OPS(del_tc_flow, nbl_tc_flow_del_tc_flow); \ + NBL_TC_FLOW_SET_OPS(flow_index_lookup, nbl_tc_flow_idx_lookup); \ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_tc_flow_setup_mgt(struct device *dev, struct nbl_tc_flow_mgt **tc_flow_mgt) +{ + struct nbl_index_tbl_key flow_idx_tbl_key; + + *tc_flow_mgt = devm_kzalloc(dev, sizeof(struct nbl_tc_flow_mgt), GFP_KERNEL); + if (!*tc_flow_mgt) + return -ENOMEM; + + NBL_INDEX_TBL_KEY_INIT(&flow_idx_tbl_key, dev, 0, NBL_FLOW_INDEX_LEN, + sizeof(struct nbl_flow_index_key)); + (*tc_flow_mgt)->flow_idx_tbl = nbl_common_init_index_table(&flow_idx_tbl_key); + if (!(*tc_flow_mgt)->flow_idx_tbl) + return -ENOMEM; + + return 0; +} + +static void nbl_tc_flow_remove_mgt(struct device *dev, struct nbl_tc_flow_mgt **tc_flow_mgt) +{ + nbl_common_remove_index_table((*tc_flow_mgt)->flow_idx_tbl, NULL); + devm_kfree(dev, *tc_flow_mgt); + *tc_flow_mgt = NULL; +} + +int nbl_tc_flow_mgt_start_leonis(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt **tc_flow_mgt; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + int ret = 0; + + tc_flow_mgt = &NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + ret = nbl_tc_flow_setup_mgt(dev, tc_flow_mgt); + if (ret) + return ret; + ret = nbl_tc_flow_init(res_mgt); + + /* init sub-module hw-flow-stats */ + if (!ret) + return nbl_fc_mgt_start_leonis(res_mgt); + return ret; +} + +void nbl_tc_flow_mgt_stop_leonis(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_tc_flow_mgt **tc_flow_mgt; + bool available; + + tc_flow_mgt = &NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + if (!(*tc_flow_mgt)) + return; + + mdelay(NBL_SAFE_THREADS_WAIT_TIME); + nbl_flow_wait_flows_free_done(*tc_flow_mgt); + + available = nbl_flow_is_available(*tc_flow_mgt); + nbl_flow_fini(res_mgt, available); + nbl_flow_resource_unavailable(*tc_flow_mgt); + nbl_fc_mgt_stop_leonis(res_mgt); + nbl_tc_flow_remove_mgt(dev, tc_flow_mgt); +} + +int nbl_tc_flow_setup_ops_leonis(struct nbl_resource_ops *res_ops) +{ + int ret = 0; +#define NBL_TC_FLOW_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_TC_FLOW_OPS_TBL; +#undef NBL_TC_FLOW_SET_OPS + + ret = nbl_fc_setup_ops_leonis(res_ops); + if (ret) + return ret; + ret = nbl_tc_tun_setup_ops(res_ops); + return ret; +} + +void nbl_tc_flow_remove_ops_leonis(struct nbl_resource_ops *res_ops) +{ +#define NBL_TC_FLOW_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_TC_FLOW_OPS_TBL; +#undef NBL_TC_FLOW_SET_OPS + + nbl_fc_remove_ops_leonis(res_ops); + nbl_tc_tun_remove_ops(res_ops); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.h new file mode 100644 index 0000000000000000000000000000000000000000..97b831e7ac7b65044eb03be202914f04eccf109e --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.h @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_TC_FLOW_LEONIS_H_ +#define _NBL_TC_FLOW_LEONIS_H_ + +#include "nbl_core.h" +#include "nbl_hw.h" +#include "nbl_resource.h" +#include "nbl_tc_mcc_leonis.h" + +#define NBL_FLOW_INGRESS 0 +#define NBL_FLOW_EGRESS 1 + +#define NBL_FLOW_INNER_PATTERN 0 +#define NBL_FLOW_OUTER_PATTERN 1 + +#define NBL_MAX_ETHPORTS 516 +#define NBL_FLOW_ETH_REP_0 2048 +#define NBL_FLOW_ETH_REP_1 2049 +#define NBL_FLOW_ETH_REP_2 2050 +#define NBL_FLOW_ETH_REP_3 2051 +#define NBL_FLOW_BOND_REP_PORT_ID 2052 +#define NBL_ETHER_TYPE_IPV4 4 +#define NBL_ETHER_TYPE_IPV6 6 +#define NBL_FLOW_MAX_REP_ID 0xFFFF + +#define NBL_FLOW_ICMP_REQ_TYPE 8 +#define NBL_FLOW_ICMP_REQ_CODE 0 +#define NBL_FLOW_ICMP_REP_TYPE 0 +#define NBL_FLOW_ICMP_REP_CODE 0 + +#define NBL_FLOW_ICMP6_REQ_TYPE 128 +#define NBL_FLOW_ICMP6_REQ_CODE 0 +#define NBL_FLOW_ICMP6_REP_TYPE 129 +#define NBL_FLOW_ICMP6_REP_CODE 0 + +#define NBL_HASH_CFT_MAX 4 +#define NBL_HASH_CFT_AVL 2 +#define NBL_HASH0 1 +#define NBL_HASH1 2 + +#define NBL_KEY_TYPE_160 0 +#define NBL_KEY_TYPE_320 1 + +#define NBL_FEM_KT_LEN 320 +#define NBL_FEM_KT_HALF_LEN 160 +#define NBL_FEM_AT_LEN 32 +#define NBL_FEM_AT_HALF_LEN 16 +#define NBL_AT_WIDTH 22 + +#define NBL_PP1_AT2_OFFSET (92 * 1024) +#define NBL_PP1_AT_OFFSET (80 * 1024) +#define NBL_PP2_AT2_OFFSET (64 * 1024) + +#define NBL_PP1_POWER 13 +#define NBL_PP2_POWER 14 + +#define NBL_FEM_AT_NO_ENTRY (0) +#define NBL_FEM_AT_ONE_ENTRY (1) +#define NBL_FEM_AT_TWO_ENTRY (2) + +#define NBL_HT0_HASH 1 +#define NBL_HT1_HASH 2 + +#define NBL_SAFE_THREADS_WAIT_TIME (200) + +#define NBL_MASK_16 0xffff + +#define NBL_PP_STAGE_PROFILE_NUM (48) +#define NBL_PP_PROFILE_STAGE_NUM (8) + +#define NBL_FLOW_TABLE_LEN (8 * 1024) +#define NBL_TABLE_KEY_VALUE_LEN (40) +#define NBL_TABLE_KEY_DATA_LEN (10) + +#define NBL_BITS_IN_NIBBLE (4) +#define NBL_BITS_IN_U8 (8) +#define NBL_BITS_IN_U16 (16) +#define NBL_BITS_IN_U32 (32) +#define NBL_BITS_IN_U64 (64) + +#define NBL_FLOW_PROFILE_START 16 +#define NBL_FLOW_LEN_INVALID (0xffffffff) + +#define NBL_FLOW_TAB_ONE_TIME 1 +#define NBL_FLOW_TAB_TWO_TIME 2 +#define NBL_INVALID_U32 0xFFFFFFFF +#define NBL_FLOW_TABLE_L4_PORT_DEFAULT_MASK 0xFFFF +#define NBL_FLOW_TABLE_FULL_MASK_AS_U32 0xFFFFFFFF +#define NBL_FLOW_TABLE_FULL_MASK_AS_U16 0xFFFF +#define NBL_FLOW_TABLE_FULL_MASK_AS_U8 0xFF + +#define NBL_GET_ARG_LEN(sz) ((sz) / sizeof(u32)) +#define NBL_GET_ARG_COPY_LEN(sz) ((sz) * sizeof(u32)) + +#define NBL_FLOW_TC_PEDIT_MAC 1024 +#define NBL_FLOW_TC_PEDIT_IP 1024 +#define NBL_FLOW_TC_PEDIT_IP6 512 + +#define NBL_FLOW_TC_PEDIT_MAC_BASE 0 +#define NBL_FLOW_TC_PEDIT_IP_BASE NBL_FLOW_TC_PEDIT_MAC + +/* at node's idx has two continuous idx, and the begin idx need to be even number */ +#define NBL_FLOW_AT_IDX_NUM 2 +#define NBL_FLOW_AT_IDX_MULTIPLE 2 + +struct nbl_tc_flow { + u8 acl_flag:1; + int flow_stat_id; + u64 act_flags; + u8 profile_id[NBL_ASSOC_PROFILE_STAGE_NUM]; + + struct { + void *profile_rule[NBL_ASSOC_PROFILE_STAGE_NUM]; + }; + struct nbl_encap_key *encap_key; + struct nbl_tc_pedit_node_res pedit_node; +}; + +struct nbl_tcam_item { + union nbl_tc_common_data_u kt_data; + u32 tcam_action[NBL_MAX_ACTION_NUM]; + bool tcam_flag; + u8 key_mode; + u8 pp_type; + u32 *pp_tcam_count; + u16 tcam_index; + u32 sw_hash_id; + u8 profile_id; +}; + +#define NBL_ACT_INGRESS 1 +#define NBL_ACT_ENGRESS 0 + +#define NBL_TC_KT_HALF_MODE 1 +#define NBL_TC_KT_FULL_MODE 2 + +struct nbl_edit_item { + struct list_head tc_mcc_list; + u32 encap_idx; + u16 smac_idx; + u16 dmac_idx; + u16 sip_idx; + u16 dip_idx; + u16 mcc_idx; + bool is_mir; + u8 direct; +}; + +struct nbl_select_input { + struct nbl_flow_pp_ht_mng *pp_ht0_mng; + struct nbl_flow_pp_ht_mng *pp_ht1_mng; + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng; + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng; + unsigned long *pp_kt_bmp; + u32 kt_idx_offset; + u32 *pp_tcam_count; + u32 act_offset; + u32 act2_offset; + u32 pp_kt_num; + u8 pp_type; +}; + +/* flow tab hash-list struct */ +struct nbl_flow_tab_conf { + union { + u32 key_value[NBL_TABLE_KEY_DATA_LEN]; + u8 key_data[NBL_TABLE_KEY_VALUE_LEN]; + }; +}; + +struct nbl_flow_tab_filter { + struct nbl_flow_tab_conf key; + struct nbl_tc_ht_item ht_item; + struct nbl_edit_item edit_item; + struct nbl_act_collect act_collect; + u64 act_flags; + u32 assoc_tbl_id; + u32 tbl_id; + u32 sw_hash_id; + u32 ref_cnt; + u16 tcam_index; + u8 pp_type; + bool tcam_flag; +}; + +struct nbl_flow_idx_info { + u64 key_flag; + u32 flow_idx; + u16 tnl_mac_idx; + u16 pp_flag; + u8 outer_pattern_flag; + u8 profile_id; + bool last_stage; + bool pt_cmd; +}; + +struct nbl_profile_offload_msg { + u16 assoc_tbl_id; + u8 profile_id; + u8 profile_stage; + bool pt_cmd; + bool last_stage; +}; + +struct nbl_mt_input { + u32 tbl_id; + u16 depth; + u16 power; + u8 key[NBL_KT_BYTE_LEN]; + u8 key_full; + u8 at_num; + u8 kt_left_num; + u8 pp_type; +}; + +struct nbl_flow_info_init { + int (*init_func)(struct nbl_resource_mgt *res_mgt); +}; + +struct nbl_flow_info_uninit { + void (*uninit_func)(struct nbl_resource_mgt *res_mgt); +}; + +int nbl_tc_flow_alloc_bmp_id(unsigned long *bitmap_mng, u32 size, + u8 type, u32 *bitmap_id); +void nbl_tc_flow_free_bmp_id(unsigned long *bitmap_mng, u32 id, u8 type); +int nbl_flow_flush(struct nbl_resource_mgt *res_mgt); +void nbl_flow_info_uninit_list(struct nbl_resource_mgt *res_mgt); +void nbl_flow_resource_unavailable(struct nbl_tc_flow_mgt *tc_flow_mgt); +bool nbl_flow_is_available(struct nbl_tc_flow_mgt *tc_flow_mgt); +void nbl_flow_ref_inc(void); +void nbl_flow_ref_dec(void); + +struct nbl_flow_pp_ht_tbl * +nbl_pp_ht_lookup(struct nbl_flow_pp_ht_mng *pp_ht_mng, u16 hash_value, + struct nbl_flow_pp_ht_key *pp_ht_key); +int nbl_insert_pp_ht(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_pp_ht_mng *pp_ht_mng, + u16 hash_value0, u16 hash_value1, u32 key_index); +int nbl_delete_pp_ht(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_pp_ht_mng *pp_ht_mng, + struct nbl_flow_pp_ht_tbl *node, u16 hash_value0, + u16 hash_value1, u32 key_index); + +bool nbl_pp_ht0_ht1_search(struct nbl_flow_pp_ht_mng *pp_ht0_mng, u16 ht0_hash, + struct nbl_flow_pp_ht_mng *pp_ht1_mng, u16 ht1_hash); +int nbl_pp_at_lookup(struct nbl_resource_mgt *res_mgt, u8 pp_type, u8 at_type, + struct nbl_flow_pp_at_key *act_key, struct nbl_flow_at_tbl **act_node); + +int nbl_insert_pp_at(struct nbl_resource_mgt *res_mgt, u8 pp_type, u8 at_type, + struct nbl_flow_pp_at_key *act_key, struct nbl_flow_at_tbl **act_node); + +struct nbl_tc_flow * +nbl_tc_flow_index_lookup(struct nbl_resource_mgt *res_mgt, struct nbl_flow_index_key *key); +struct nbl_tc_flow * +nbl_tc_flow_insert_index(struct nbl_resource_mgt *res_mgt, struct nbl_flow_index_key *key); +int nbl_tc_flow_delete_index(struct nbl_resource_mgt *res_mgt, struct nbl_flow_index_key *key); + +int nbl_tcam_key_lookup(struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_tcam_item *tcam_item, u16 *index); +int nbl_insert_tcam_key_ad(struct nbl_common_info *common, + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng, + struct nbl_tcam_item *tcam_item, + struct nbl_flow_tcam_ad_item *ad_item, + u16 *index); +int nbl_delete_tcam_key_ad(struct nbl_common_info *common, + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng, + u16 index, u8 key_mode, u8 pp_type); + +int nbl_cmdq_flow_ht_clear_2hw(struct nbl_tc_ht_item *ht_item, + u8 pp_type, struct nbl_resource_mgt *res_mgt); + +void nbl_flow_remove_ops(struct nbl_resource_ops *res_ops); +int nbl_flow_setup_ops(struct nbl_resource_ops *res_ops); +void nbl_flow_mgt_stop(struct nbl_resource_mgt *res_mgt); +int nbl_flow_mgt_start(struct nbl_resource_mgt *res_mgt); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.c new file mode 100644 index 0000000000000000000000000000000000000000..19ab68addd5ee34ef18f8f629f63003f62537fd4 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ +#include "nbl_tc_mcc_leonis.h" + +static u16 nbl_tc_cfg_action_set_dport_mcc_eth(u8 eth, u8 port_type) +{ + union nbl_action_data set_dport = {.data = 0}; + + set_dport.dport.down.upcall_flag = AUX_FWD_TYPE_NML_FWD; + set_dport.dport.down.port_type = SET_DPORT_TYPE_ETH_LAG; + set_dport.dport.down.next_stg_sel = NEXT_STG_SEL_EPRO; + if (port_type == NBL_TC_PORT_TYPE_ETH) { + set_dport.dport.down.eth_vld = 1; + set_dport.dport.down.eth_id = eth; + } else { + set_dport.dport.down.lag_vld = 1; + set_dport.dport.down.lag_id = eth; + } + + return set_dport.data; +} + +static u16 nbl_tc_cfg_action_set_dport_mcc_vsi(u16 vsi) +{ + union nbl_action_data set_dport = {.data = 0}; + + set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_NML_FWD; + set_dport.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + set_dport.dport.up.port_id = vsi; + set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_EPRO; + + return set_dport.data; +} + +void nbl_tc_mcc_init(struct nbl_tc_mcc_mgt *tc_mcc_mgt, struct nbl_common_info *common) +{ + tc_mcc_mgt->common = common; + INIT_LIST_HEAD(&tc_mcc_mgt->mcc_list); +} + +int nbl_tc_mcc_add_leaf_node(struct nbl_tc_mcc_mgt *tc_mcc_mgt, u16 dport_id, u8 port_type) +{ + struct nbl_tc_mcc_info *mcc_node; + long idx; + + if (tc_mcc_mgt->mcc_offload_cnt >= NBL_TC_MCC_MAX_OFFLOAD_CNT) { + nbl_err(tc_mcc_mgt->common, NBL_DEBUG_FLOW, "tc mcc groups exceed max num\n"); + return -ENOBUFS; + } + + idx = find_first_zero_bit(tc_mcc_mgt->mcc_pool, NBL_TC_MCC_TBL_DEPTH); + /* idx won't exceed NBL_TC_MCC_TBL_DEPTH unless flow call error */ + if (idx >= NBL_TC_MCC_TBL_DEPTH) { + nbl_err(tc_mcc_mgt->common, NBL_DEBUG_FLOW, "tc mcc no available idx\n"); + return -ENOBUFS; + } + mcc_node = kzalloc(sizeof(*mcc_node), GFP_KERNEL); + if (!mcc_node) + return -ENOMEM; + + mcc_node->port_type = port_type; + mcc_node->dport_id = dport_id; + mcc_node->mcc_id = (u16)idx; + + set_bit(idx, tc_mcc_mgt->mcc_pool); + list_add(&mcc_node->node, &tc_mcc_mgt->mcc_list); + nbl_debug(tc_mcc_mgt->common, NBL_DEBUG_FLOW, "tc mcc group %d add member port type %d id %d\n", + (int)idx, port_type, dport_id); + + return idx; +} + +void nbl_tc_mcc_get_list(struct nbl_tc_mcc_mgt *tc_mcc_mgt, struct list_head *tc_mcc_list) +{ + list_replace_init(&tc_mcc_mgt->mcc_list, tc_mcc_list); +} + +void nbl_tc_mcc_free_list(struct nbl_tc_mcc_mgt *tc_mcc_mgt) +{ + struct nbl_tc_mcc_info *mcc_node = NULL; + struct nbl_tc_mcc_info *safe_node = NULL; + + list_for_each_entry_safe(mcc_node, safe_node, &tc_mcc_mgt->mcc_list, node) { + list_del(&mcc_node->node); + clear_bit(mcc_node->mcc_id, tc_mcc_mgt->mcc_pool); + nbl_debug(tc_mcc_mgt->common, NBL_DEBUG_FLOW, + "tc mcc group %d free member port type %d id %d\n", + mcc_node->mcc_id, mcc_node->port_type, mcc_node->dport_id); + kfree(mcc_node); + } +} + +void nbl_tc_mcc_add_hw_tbl(struct nbl_resource_mgt *res_mgt, struct nbl_tc_mcc_mgt *tc_mcc_mgt) +{ + struct nbl_tc_mcc_info *mcc_node = NULL; + struct nbl_phy_ops *phy_ops; + u16 prev_mcc_id, mcc_action; + bool mcc_add_succ = false; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + list_for_each_entry(mcc_node, &tc_mcc_mgt->mcc_list, node) { + if (mcc_node->port_type == NBL_TC_PORT_TYPE_VSI) + mcc_action = nbl_tc_cfg_action_set_dport_mcc_vsi(mcc_node->dport_id); + else + mcc_action = nbl_tc_cfg_action_set_dport_mcc_eth((u8)mcc_node->dport_id, + mcc_node->port_type); + + if (nbl_list_is_first(&mcc_node->node, &tc_mcc_mgt->mcc_list)) + prev_mcc_id = NBL_MCC_ID_INVALID; + else + prev_mcc_id = list_prev_entry(mcc_node, node)->mcc_id; + phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, + prev_mcc_id, NBL_MCC_ID_INVALID, mcc_action); + mcc_add_succ = true; + } + if (mcc_add_succ) + ++tc_mcc_mgt->mcc_offload_cnt; +} + +void nbl_tc_mcc_free_hw_tbl(struct nbl_resource_mgt *res_mgt, struct nbl_tc_mcc_mgt *tc_mcc_mgt, + struct list_head *tc_mcc_list) +{ + struct nbl_tc_mcc_info *mcc_node = NULL; + struct nbl_tc_mcc_info *safe_node = NULL; + struct nbl_phy_ops *phy_ops; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + list_for_each_entry_safe(mcc_node, safe_node, tc_mcc_list, node) { + phy_ops->del_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, + NBL_MCC_ID_INVALID, NBL_MCC_ID_INVALID); + list_del(&mcc_node->node); + clear_bit(mcc_node->mcc_id, tc_mcc_mgt->mcc_pool); + nbl_debug(tc_mcc_mgt->common, NBL_DEBUG_FLOW, + "tc mcc group %d free member port type %d id %d\n", + mcc_node->mcc_id, mcc_node->port_type, mcc_node->dport_id); + kfree(mcc_node); + } + --tc_mcc_mgt->mcc_offload_cnt; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.h new file mode 100644 index 0000000000000000000000000000000000000000..2f3a7f89514bc1d57703106af92ffa99e7e7ee8f --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_TC_MCC_LEONIS_H_ +#define _NBL_TC_MCC_LEONIS_H_ + +#include "nbl_core.h" +#include "nbl_hw.h" +#include "nbl_resource.h" + +#define NBL_TC_MCC_MAX_OFFLOAD_CNT (8) + +struct nbl_tc_mcc_info { + struct list_head node; + u16 dport_id; + u16 mcc_id; + u8 port_type; +}; + +void nbl_tc_mcc_init(struct nbl_tc_mcc_mgt *tc_mcc_mgt, struct nbl_common_info *common); +int nbl_tc_mcc_add_leaf_node(struct nbl_tc_mcc_mgt *tc_mcc_mgt, u16 dport_id, u8 port_type); +void nbl_tc_mcc_get_list(struct nbl_tc_mcc_mgt *tc_mcc_mgt, struct list_head *tc_mcc_list); +void nbl_tc_mcc_add_hw_tbl(struct nbl_resource_mgt *res_mgt, struct nbl_tc_mcc_mgt *tc_mcc_mgt); +void nbl_tc_mcc_free_hw_tbl(struct nbl_resource_mgt *res_mgt, struct nbl_tc_mcc_mgt *tc_mcc_mgt, + struct list_head *tc_mcc_list); +void nbl_tc_mcc_free_list(struct nbl_tc_mcc_mgt *tc_mcc_mgt); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.c new file mode 100644 index 0000000000000000000000000000000000000000..ec118068555bec3e6a332914991476198d085029 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ +#include "nbl_resource.h" +#include "nbl_tc_tun_leonis.h" + +static bool nbl_tc_tun_encap_lookup(void *priv, + struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param) +{ + bool encap_find = false; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_encap_entry *encap_node = NULL; + + mutex_lock(&tc_flow_mgt->encap_tbl_lock); + if (!tc_flow_mgt->encap_tbl.flow_tab_hash) { + mutex_unlock(&tc_flow_mgt->encap_tbl_lock); + nbl_err(common, NBL_DEBUG_FLOW, "encap hash tbl is null.\n"); + encap_find = false; + goto end; + } + + encap_node = nbl_common_get_hash_node(tc_flow_mgt->encap_tbl.flow_tab_hash, + &rule_act->encap_key); + if (encap_node) { + encap_node->ref_cnt++; + rule_act->encap_idx = encap_node->encap_idx; + rule_act->vni = encap_node->vni; + rule_act->tc_tun_encap_out_dev = encap_node->out_dev; + nbl_debug(common, NBL_DEBUG_FLOW, "encap is exist, vni %d, encap_idx %d", + rule_act->vni, rule_act->encap_idx); + encap_find = true; + } + mutex_unlock(&tc_flow_mgt->encap_tbl_lock); +end: + return encap_find; +} + +int nbl_tc_tun_encap_del(void *priv, struct nbl_encap_key *key) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(tc_flow_mgt->res_mgt); + struct nbl_encap_entry *e = NULL; + const struct nbl_phy_ops *phy_ops; + bool del_hw_encap_tbl = false; + u16 encap_idx = 0; + + res_mgt = tc_flow_mgt->res_mgt; + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (!key) { + nbl_err(common, NBL_DEBUG_FLOW, "encap_key is null"); + return -EINVAL; + } + + mutex_lock(&tc_flow_mgt->encap_tbl_lock); + + e = nbl_common_get_hash_node(tc_flow_mgt->encap_tbl.flow_tab_hash, key); + if (e) { + if (e->ref_cnt > 1) { + e->ref_cnt--; + } else { + /* remove encap from hw */ + del_hw_encap_tbl = true; + encap_idx = e->encap_idx; + /* free soft encap hash node */ + clear_bit(e->encap_idx, tc_flow_mgt->encap_tbl_bmp); + nbl_common_free_hash_node(tc_flow_mgt->encap_tbl.flow_tab_hash, key); + tc_flow_mgt->encap_tbl.tab_cnt--; + } + } + + mutex_unlock(&tc_flow_mgt->encap_tbl_lock); + + if (del_hw_encap_tbl) + phy_ops->del_tnl_encap(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), encap_idx); + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl tc del encap_idx: %u, encap_node:%p, " + "del_hw:%d", encap_idx, e, del_hw_encap_tbl); + + return 0; +} + +static int nbl_tc_tun_encap_add(void *priv, struct nbl_rule_action *action) +{ + u16 encap_idx; + int encap_cnt; + int ret = 0; + struct nbl_encap_entry e; + struct nbl_encap_entry *encap_node; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(tc_flow_mgt->res_mgt); + const struct nbl_phy_ops *phy_ops; + + res_mgt = tc_flow_mgt->res_mgt; + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + mutex_lock(&tc_flow_mgt->encap_tbl_lock); + + encap_idx = (u16)find_first_zero_bit(tc_flow_mgt->encap_tbl_bmp, + NBL_TC_ENCAP_TBL_DEPTH); + if (encap_idx == NBL_TC_ENCAP_TBL_DEPTH) { + mutex_unlock(&tc_flow_mgt->encap_tbl_lock); + ret = -ENOSPC; + nbl_info(common, NBL_DEBUG_FLOW, "encap tbl is full, cnt:%u", encap_idx); + goto err; + } + + set_bit(encap_idx, tc_flow_mgt->encap_tbl_bmp); + action->encap_idx = encap_idx; + memset(&e, 0, sizeof(e)); + e.ref_cnt = 1; + e.out_dev = action->tc_tun_encap_out_dev; + memcpy(e.encap_buf, action->encap_buf, NBL_FLOW_ACTION_ENCAP_TOTAL_LEN); + e.encap_size = action->encap_size; + e.encap_idx = action->encap_idx; + e.vni = action->vni; + memcpy(&e.key, &action->encap_key, sizeof(action->encap_key)); + + /* insert encap_node */ + ret = nbl_common_alloc_hash_node(tc_flow_mgt->encap_tbl.flow_tab_hash, + &action->encap_key, &e, (void **)&encap_node); + if (ret) { + clear_bit(encap_idx, tc_flow_mgt->encap_tbl_bmp); + mutex_unlock(&tc_flow_mgt->encap_tbl_lock); + nbl_info(common, NBL_DEBUG_FLOW, "alloc encap node failed, ret %d!", ret); + goto err; + } + + tc_flow_mgt->encap_tbl.tab_cnt++; + encap_cnt = tc_flow_mgt->encap_tbl.tab_cnt; + + mutex_unlock(&tc_flow_mgt->encap_tbl_lock); + + /* fill act_buf and send to hw */ + phy_ops->add_tnl_encap(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), action->encap_buf, + action->encap_idx, action->encap_idx_info); + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl tc add new encap, encap_idx %u, encap_cnt %d " + "vni %u, encap_size %u, out_dev %s", + encap_idx, encap_cnt, e.vni, e.encap_size, netdev_name(e.out_dev)); + +err: + return ret; +} + +/* NBL_TC_TUN_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_TC_TUN_OPS_TBL \ +do { \ + NBL_TC_TUN_SET_OPS(tc_tun_encap_lookup, nbl_tc_tun_encap_lookup); \ + NBL_TC_TUN_SET_OPS(tc_tun_encap_del, nbl_tc_tun_encap_del); \ + NBL_TC_TUN_SET_OPS(tc_tun_encap_add, nbl_tc_tun_encap_add); \ +} while (0) + +int nbl_tc_tun_setup_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_TC_TUN_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_TC_TUN_OPS_TBL; +#undef NBL_TC_TUN_SET_OPS + + return 0; +} + +void nbl_tc_tun_remove_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_TC_TUN_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_TC_TUN_OPS_TBL; +#undef NBL_TC_TUN_SET_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.h new file mode 100644 index 0000000000000000000000000000000000000000..381b8050329a8c393c7ddd6b8d9abc4aa5a2e49c --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef __NBL_TC_TUN_LEONIS_H__ +#define __NBL_TC_TUN_LEONIS_H__ + +#include +#include "nbl_include.h" +#include "nbl_core.h" +#include "nbl_resource.h" + +int nbl_tc_tun_setup_ops(struct nbl_resource_ops *res_ops); +void nbl_tc_tun_remove_ops(struct nbl_resource_ops *res_ops); + +int nbl_tc_tun_encap_del(void *priv, struct nbl_encap_key *key); + +#endif /* end of __NBL_TC_TUN_H__ */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.c index 6445fc548383ac67594d0714b4ff91b90321636a..e312b7fea1377ff21e4505afc55c55d509d6df69 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.c @@ -113,6 +113,7 @@ static int nbl_res_intr_configure_msix_map(void *priv, u16 func_id, u16 num_net_ intr_mgt->func_intr_res[func_id].interrupts = interrupts; intr_mgt->func_intr_res[func_id].num_interrupts = requested; + intr_mgt->func_intr_res[func_id].num_net_interrupts = num_net_msix; for (i = 0; i < num_net_msix; i++) { intr_index = find_first_zero_bit(intr_mgt->interrupt_net_bitmap, @@ -156,7 +157,7 @@ static int nbl_res_intr_configure_msix_map(void *priv, u16 func_id, u16 num_net_ /* use ctrl dev bdf */ phy_ops->configure_msix_map(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, true, - msix_map_table->dma, common->bus, common->devid, + msix_map_table->dma, common->hw_bus, common->devid, NBL_COMMON_TO_PCI_FUNC_ID(common)); return 0; @@ -217,17 +218,6 @@ static int nbl_res_intr_enable_abnormal_irq(void *priv, u16 vector_id, bool enab return 0; } -static int nbl_res_intr_enable_msix_irq(void *priv, u16 global_vector_id) -{ - struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - struct nbl_phy_ops *phy_ops; - - phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - - phy_ops->enable_msix_irq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_vector_id); - return 0; -} - static u8 *nbl_res_get_msix_irq_enable_info(void *priv, u16 global_vector_id, u32 *irq_data) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; @@ -254,7 +244,7 @@ static u16 nbl_res_intr_get_msix_entry_id(void *priv, u16 vsi_id, u16 local_vect } static void nbl_res_intr_get_coalesce(void *priv, u16 func_id, u16 vector_id, - struct ethtool_coalesce *ec) + struct nbl_chan_param_get_coalesce *ec) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); @@ -266,10 +256,7 @@ static void nbl_res_intr_get_coalesce(void *priv, u16 func_id, u16 vector_id, global_vector_id = intr_mgt->func_intr_res[func_id].interrupts[vector_id]; phy_ops->get_coalesce(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_vector_id, &pnum, &rate); /* tx and rx using the same interrupt */ - ec->tx_coalesce_usecs = rate; - ec->tx_max_coalesced_frames = pnum; - ec->rx_coalesce_usecs = rate; - ec->rx_max_coalesced_frames = pnum; + NBL_SET_INTR_COALESCE(ec, rate, pnum, rate, pnum); } static void nbl_res_intr_set_coalesce(void *priv, u16 func_id, u16 vector_id, @@ -319,7 +306,7 @@ static int nbl_res_intr_get_abnormal_irq_num(void *priv) return 1; } -static u16 nbl_res_intr_get_suppress_level(void *priv, u64 rates, u16 last_level) +u16 nbl_res_intr_get_suppress_level(void *priv, u64 rates, u16 last_level) { switch (last_level) { case NBL_INTR_SUPPRESS_LEVEL0: @@ -328,17 +315,24 @@ static u16 nbl_res_intr_get_suppress_level(void *priv, u64 rates, u16 last_level else return NBL_INTR_SUPPRESS_LEVEL0; case NBL_INTR_SUPPRESS_LEVEL1: - if (rates > NBL_INTR_SUPPRESS_LEVEL1_DOWNGRADE_THRESHOLD) + if (rates > NBL_INTR_SUPPRESS_LEVEL2_THRESHOLD) + return NBL_INTR_SUPPRESS_LEVEL2; + else if (rates > NBL_INTR_SUPPRESS_LEVEL1_DOWNGRADE_THRESHOLD) return NBL_INTR_SUPPRESS_LEVEL1; else return NBL_INTR_SUPPRESS_LEVEL0; + case NBL_INTR_SUPPRESS_LEVEL2: + if (rates > NBL_INTR_SUPPRESS_LEVEL2_DOWNGRADE_THRESHOLD) + return NBL_INTR_SUPPRESS_LEVEL2; + else + return NBL_INTR_SUPPRESS_LEVEL1; default: return NBL_INTR_SUPPRESS_LEVEL0; } } -static void nbl_res_intr_set_intr_suppress_level(void *priv, u16 func_id, u16 vector_id, - u16 num_net_msix, u16 level) +void nbl_res_intr_set_intr_suppress_level(void *priv, u16 func_id, u16 vector_id, + u16 num_net_msix, u16 level) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); @@ -357,11 +351,24 @@ static void nbl_res_intr_set_intr_suppress_level(void *priv, u16 func_id, u16 ve rate = NBL_INTR_SUPPRESS_LEVEL1_25G_RATE; } break; + case NBL_INTR_SUPPRESS_LEVEL2: + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) { + pnum = NBL_INTR_SUPPRESS_LEVEL2_100G_PNUM; + rate = NBL_INTR_SUPPRESS_LEVEL2_100G_RATE; + } else { + pnum = NBL_INTR_SUPPRESS_LEVEL1_25G_PNUM; + rate = NBL_INTR_SUPPRESS_LEVEL1_25G_RATE; + } + break; default: pnum = NBL_INTR_SUPPRESS_LEVEL0_PNUM; rate = NBL_INTR_SUPPRESS_LEVEL0_RATE; break; } + + if (num_net_msix == U16_MAX) + num_net_msix = intr_mgt->func_intr_res[func_id].num_net_interrupts; + for (i = 0; i < num_net_msix; i++) { global_vector_id = intr_mgt->func_intr_res[func_id].interrupts[vector_id + i]; phy_ops->set_coalesce(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), @@ -412,7 +419,6 @@ do { \ NBL_INTR_SET_OPS(enable_mailbox_irq, nbl_res_intr_enable_mailbox_irq); \ NBL_INTR_SET_OPS(enable_abnormal_irq, nbl_res_intr_enable_abnormal_irq); \ NBL_INTR_SET_OPS(enable_adminq_irq, nbl_res_intr_enable_adminq_irq); \ - NBL_INTR_SET_OPS(enable_msix_irq, nbl_res_intr_enable_msix_irq); \ NBL_INTR_SET_OPS(get_msix_irq_enable_info, nbl_res_get_msix_irq_enable_info); \ NBL_INTR_SET_OPS(get_global_vector, nbl_res_intr_get_global_vector); \ NBL_INTR_SET_OPS(get_msix_entry_id, nbl_res_intr_get_msix_entry_id); \ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.h index 30ca7aec72bcd051d1e8fa9b5d916e2dd55d6536..c16218ba4df5a1f3110338ec963cbb5d9a4c82fe 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.h @@ -12,15 +12,20 @@ #define NBL_MSIX_MAP_TABLE_MAX_ENTRIES (1024) #define NBL_INTR_SUPPRESS_LEVEL1_THRESHOLD (100000) /* 100k pps */ +#define NBL_INTR_SUPPRESS_LEVEL2_THRESHOLD (4000000) /* 4M pps */ #define NBL_INTR_SUPPRESS_LEVEL1_DOWNGRADE_THRESHOLD (60000) /* 60kpps */ +#define NBL_INTR_SUPPRESS_LEVEL2_DOWNGRADE_THRESHOLD (2400000) /* 2.4Mpps */ #define NBL_INTR_SUPPRESS_LEVEL0 (0) #define NBL_INTR_SUPPRESS_LEVEL1 (1) +#define NBL_INTR_SUPPRESS_LEVEL2 (2) #define NBL_INTR_SUPPRESS_LEVEL0_PNUM (0) #define NBL_INTR_SUPPRESS_LEVEL1_25G_PNUM (8) -#define NBL_INTR_SUPPRESS_LEVEL1_100G_PNUM (16) +#define NBL_INTR_SUPPRESS_LEVEL1_100G_PNUM (8) +#define NBL_INTR_SUPPRESS_LEVEL2_100G_PNUM (24) #define NBL_INTR_SUPPRESS_LEVEL0_RATE (0) #define NBL_INTR_SUPPRESS_LEVEL1_25G_RATE (1) -#define NBL_INTR_SUPPRESS_LEVEL1_100G_RATE (2) +#define NBL_INTR_SUPPRESS_LEVEL1_100G_RATE (1) +#define NBL_INTR_SUPPRESS_LEVEL2_100G_RATE (3) #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_phy.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_phy.h index e15bc2b174f5cf63d8bd4833ad3a7bf905a736bc..675650037362c3da6d2fb59baa2dcc68399a9a7a 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_phy.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_phy.h @@ -24,8 +24,18 @@ struct nbl_phy_mgt { u32 hw_size; spinlock_t reg_lock; /* Protect reg access */ bool should_lock; + u8 resv[3]; + enum nbl_hw_status hw_status; }; +struct nbl_phy_ped_tbl { + u64 addr:56; + u64 addr_len:8; +}; + +#define NBL_DELAY_MIN_TIME_FOR_REGS 400 /* 200us for palladium,3us for s2c */ +#define NBL_DELAY_MAX_TIME_FOR_REGS 500 /* 300us for palladium,5us for s2c */ + static inline __maybe_unused u32 rd32(u8 __iomem *addr, u64 reg) { return readl(addr + (reg)); @@ -45,6 +55,11 @@ static inline __maybe_unused void nbl_hw_read_regs(struct nbl_phy_mgt *phy_mgt, if (len % 4) return; + if (phy_mgt->hw_status) { + for (i = 0; i < size; i++) + *(u32 *)(data + i * sizeof(u32)) = U32_MAX; + } + if (size > 1 && phy_mgt->should_lock) spin_lock(&phy_mgt->reg_lock); @@ -64,6 +79,9 @@ static inline __maybe_unused void nbl_hw_write_regs(struct nbl_phy_mgt *phy_mgt, if (len % 4) return; + if (phy_mgt->hw_status) + return; + if (size > 1 && phy_mgt->should_lock) spin_lock(&phy_mgt->reg_lock); @@ -76,14 +94,44 @@ static inline __maybe_unused void nbl_hw_write_regs(struct nbl_phy_mgt *phy_mgt, spin_unlock(&phy_mgt->reg_lock); } +static inline __maybe_unused void nbl_hw_write_be_regs(struct nbl_phy_mgt *phy_mgt, + u64 reg, const u8 *data, u32 len) +{ + u32 size = len / 4; + u32 i = 0; + u32 data_le; + + if (len % 4) + return; + + if (size > 1 && phy_mgt->should_lock) + spin_lock(&phy_mgt->reg_lock); + + for (i = 0; i < size; i++) { + data_le = swab32(*(u32 *)(data + i * sizeof(u32))); + /* Used for emu, make sure that we won't write too frequently */ + wr32_barrier(phy_mgt->hw_addr, reg + i * sizeof(u32), data_le); + } + + data_le = rd32(phy_mgt->hw_addr, 0x0); + if (size > 1 && phy_mgt->should_lock) + spin_unlock(&phy_mgt->reg_lock); +} + static __maybe_unused void nbl_hw_wr32(struct nbl_phy_mgt *phy_mgt, u64 reg, u32 value) { + if (phy_mgt->hw_status) + return; + /* Used for emu, make sure that we won't write too frequently */ wr32_barrier(phy_mgt->hw_addr, reg, value); } static __maybe_unused u32 nbl_hw_rd32(struct nbl_phy_mgt *phy_mgt, u64 reg) { + if (phy_mgt->hw_status) + return U32_MAX; + return rd32(phy_mgt->hw_addr, reg); } @@ -91,6 +139,9 @@ static __maybe_unused void nbl_mbx_wr32(void *priv, u64 reg, u32 value) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + if (phy_mgt->hw_status) + return; + writel((value), ((phy_mgt)->mailbox_bar_hw_addr + (reg))); } @@ -98,6 +149,9 @@ static __maybe_unused u32 nbl_mbx_rd32(void *priv, u64 reg) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + if (phy_mgt->hw_status) + return U32_MAX; + return readl((phy_mgt)->mailbox_bar_hw_addr + (reg)); } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.c index 57e21862f4d02aaa0b965c4e71d676113b729133..4201c232a83ec7b47215b9a5dae525ea7e623af8 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.c @@ -36,7 +36,6 @@ static u16 func_id_to_vsi_id(void *p, u16 func_id, u16 type) int vfid = U32_MAX; nbl_res_func_id_to_pfvfid(p, func_id, &pfid, &vfid); - return nbl_res_pfvfid_to_vsi_id(p, pfid, vfid, type); } @@ -66,7 +65,7 @@ static u16 vsi_id_to_func_id(void *p, u16 vsi_id) if (vsi_find) { /* if pf_id < eth_num */ - if (j >= NBL_VSI_SERV_PF_DATA_TYPE && j <= NBL_VSI_SERV_PF_USER_TYPE) + if (j >= NBL_VSI_SERV_PF_DATA_TYPE && j <= NBL_VSI_SERV_PF_XDP_TYPE) func_id = i + NBL_COMMON_TO_MGT_PF(common); /* if vf */ else if (j == NBL_VSI_SERV_VF_DATA_TYPE) { @@ -80,6 +79,9 @@ static u16 vsi_id_to_func_id(void *p, u16 vsi_id) } } + if (func_id == U16_MAX) + pr_err("convert vsi_id %d to func_id failed!\n", vsi_id); + return func_id; } @@ -253,6 +255,29 @@ static u8 eth_id_to_pf_id(void *p, u8 eth_id) return pf_id_offset + NBL_COMMON_TO_MGT_PF(common); } +static u8 eth_id_to_lag_id(void *p, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_eth_bond_info *eth_bond_info = NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt); + int i, j; + + for (i = 0; i < NBL_LAG_MAX_NUM; i++) + for (j = 0; j < eth_bond_info->entry[i].lag_num && + NBL_ETH_BOND_VALID_PORT(j); j++) + if (eth_bond_info->entry[i].eth_id[j] == eth_id) + return i; + + return -1; +} + +static bool check_func_active_by_queue(void *p, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + + return queue_mgt->queue_info[func_id].txrx_queues ? true : false; +} + int nbl_res_func_id_to_pfvfid(struct nbl_resource_mgt *res_mgt, u16 func_id, int *pfid, int *vfid) { if (!res_mgt->common_ops.func_id_to_pfvfid) @@ -339,6 +364,22 @@ u8 nbl_res_eth_id_to_pf_id(struct nbl_resource_mgt *res_mgt, u8 eth_id) return res_mgt->common_ops.eth_id_to_pf_id(res_mgt, eth_id); } +u8 nbl_res_eth_id_to_lag_id(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + if (!res_mgt->common_ops.eth_id_to_lag_id) + return eth_id_to_lag_id(res_mgt, eth_id); + + return res_mgt->common_ops.eth_id_to_lag_id(res_mgt, eth_id); +} + +bool nbl_res_check_func_active_by_queue(struct nbl_resource_mgt *res_mgt, u16 func_id) +{ + if (!res_mgt->common_ops.check_func_active_by_queue) + return check_func_active_by_queue(res_mgt, func_id); + + return res_mgt->common_ops.check_func_active_by_queue(res_mgt, func_id); +} + bool nbl_res_get_flex_capability(void *priv, enum nbl_flex_cap_type cap_type) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; @@ -371,6 +412,18 @@ void nbl_res_pf_dev_vsi_type_to_hw_vsi_type(u16 src_type, enum nbl_vsi_serv_type *dst_type = NBL_VSI_SERV_PF_USER_TYPE; else if (src_type == NBL_VSI_CTRL) *dst_type = NBL_VSI_SERV_PF_CTLR_TYPE; + else if (src_type == NBL_VSI_XDP) + *dst_type = NBL_VSI_SERV_PF_XDP_TYPE; +} + +int nbl_res_get_rep_idx(struct nbl_eswitch_info *eswitch_info, u16 rep_vsi_id) +{ + u32 rep_idx = U32_MAX; + + if (rep_vsi_id >= eswitch_info->vf_base_vsi_id) + rep_idx = rep_vsi_id - eswitch_info->vf_base_vsi_id; + + return rep_idx; } bool nbl_res_vf_is_active(void *priv, u16 func_id) @@ -380,3 +433,26 @@ bool nbl_res_vf_is_active(void *priv, u16 func_id) return test_bit(func_id, resource_info->func_bitmap); } + +void nbl_res_set_hw_status(void *priv, enum nbl_hw_status hw_status) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->set_hw_status(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), hw_status); +} + +int nbl_res_get_pf_vf_num(void *priv, u16 pf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_sriov_info *sriov_info; + + if (pf_id >= NBL_RES_MGT_TO_PF_NUM(res_mgt)) + return -1; + + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + pf_id; + if (!sriov_info->num_vfs) + return -1; + + return sriov_info->num_vfs; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.h index 868ca7af412e6fad897041524075dcaa774879a2..8a83c758d537bacb85206e6f9918cdabe8b36e2c 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.h @@ -16,18 +16,27 @@ #define NBL_RES_MGT_TO_DMA_DEV(res_mgt) \ NBL_COMMON_TO_DMA_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)) #define NBL_RES_MGT_TO_INTR_MGT(res_mgt) ((res_mgt)->intr_mgt) +#define NBL_RES_MGT_TO_ACCEL_MGT(res_mgt) ((res_mgt)->accel_mgt) #define NBL_RES_MGT_TO_QUEUE_MGT(res_mgt) ((res_mgt)->queue_mgt) #define NBL_RES_MGT_TO_TXRX_MGT(res_mgt) ((res_mgt)->txrx_mgt) #define NBL_RES_MGT_TO_FLOW_MGT(res_mgt) ((res_mgt)->flow_mgt) +#define NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt) ((res_mgt)->tc_flow_mgt) +#define NBL_RES_MGT_TO_COUNTER_MGT(res_mgt) (((res_mgt)->tc_flow_mgt)->fc_mgt) #define NBL_RES_MGT_TO_VSI_MGT(res_mgt) ((res_mgt)->vsi_mgt) #define NBL_RES_MGT_TO_PORT_MGT(res_mgt) ((res_mgt)->port_mgt) #define NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt) ((res_mgt)->adminq_mgt) -#define NBL_RES_MGT_TO_RES_INFO(res_mgt) ((res_mgt)->resource_info) +#define NBL_RES_MGT_TO_INTR_MGT(res_mgt) ((res_mgt)->intr_mgt) +#define NBL_RES_MGT_TO_FD_MGT(res_mgt) ((res_mgt)->fd_mgt) #define NBL_RES_MGT_TO_PROD_OPS(res_mgt) ((res_mgt)->product_ops) +#define NBL_RES_MGT_TO_RES_INFO(res_mgt) ((res_mgt)->resource_info) #define NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->sriov_info) +#define NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->eswitch_info) #define NBL_RES_MGT_TO_ETH_INFO(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->eth_info) #define NBL_RES_MGT_TO_VSI_INFO(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->vsi_info) +#define NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->eth_bond_info) #define NBL_RES_MGT_TO_PF_NUM(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->max_pf) +#define NBL_RES_MGT_TO_VDPA_VF_STATS(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->vdpa.vf_stats) +#define NBL_RES_MGT_TO_USTORE_STATS(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->ustore_stats) #define NBL_RES_MGT_TO_PHY_OPS_TBL(res_mgt) ((res_mgt)->phy_ops_tbl) #define NBL_RES_MGT_TO_PHY_OPS(res_mgt) (NBL_RES_MGT_TO_PHY_OPS_TBL(res_mgt)->ops) @@ -45,14 +54,13 @@ #define NBL_RES_BASE_QID(res_mgt) NBL_RES_MGT_TO_RES_INFO(res_mgt)->base_qid #define NBL_RES_NOFITY_QID(res_mgt, local_qid) (NBL_RES_BASE_QID(res_mgt) * 2 + (local_qid)) -#define NBL_MAX_FUNC (520) +#define NBL_MAX_NET_ID NBL_MAX_FUNC #define NBL_MAX_JUMBO_FRAME_SIZE (9600) #define NBL_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) -/* temperature threshold1 */ -#define NBL_LEONIS_TEMP_MAX (100) -/* temperature threshold2 */ -#define NBL_LEONIS_TEMP_CRIT (115) +#define NBL_TPID_PORT_NUM (1031) +#define NBL_VLAN_TPYE (0) +#define NBL_QINQ_TPYE (1) /* --------- QUEUE ---------- */ #define NBL_MAX_TXRX_QUEUE (2048) @@ -63,11 +71,15 @@ #define NBL_DEFAULT_PF_HW_QUEUE_NUM (16) #define NBL_DEFAULT_USER_HW_QUEUE_NUM (16) #define NBL_DEFAULT_VF_HW_QUEUE_NUM (2) +#define NBL_VSI_PF_LEGACY_QUEUE_NUM_MAX (NBL_MAX_TXRX_QUEUE_PER_FUNC - \ + NBL_DEFAULT_REP_HW_QUEUE_NUM) #define NBL_SPECIFIC_VSI_NET_ID_OFFSET (4) #define NBL_MAX_CACHE_SIZE (256) #define NBL_MAX_BATCH_DESC (64) +#define NBL_VDPA_ITR_BATCH_CNT (64) + enum nbl_qid_map_table_type { NBL_MASTER_QID_MAP_TABLE, NBL_SLAVE_QID_MAP_TABLE, @@ -93,12 +105,13 @@ struct nbl_queue_info { u64 notify_addr; u32 qid_map_index; u16 num_txrx_queues; + u16 rss_ret_base; u16 *txrx_queues; u16 *queues_context; - u16 rss_ret_base; + u32 *uvn_stat_pkt_drop; u16 rss_entry_size; - u32 curr_qps; u16 split; + u32 curr_qps; u16 queue_size; }; @@ -113,6 +126,7 @@ struct nbl_queue_mgt { DECLARE_BITMAP(rss_ret_bitmap, NBL_EPRO_RSS_RET_TBL_DEPTH); struct nbl_qid_map_table qid_map_table[NBL_QID_MAP_TABLE_ENTRIES]; struct nbl_queue_info queue_info[NBL_MAX_FUNC]; + u16 net_id_ref_vsinum[NBL_MAX_NET_ID]; u32 total_qid_map_entries; int qid_map_select; bool qid_map_ready; @@ -138,6 +152,7 @@ struct nbl_msix_map_table { struct nbl_func_interrupt_resource_mng { u16 num_interrupts; + u16 num_net_interrupts; u16 msix_base; u16 msix_max; u16 *interrupts; @@ -154,6 +169,11 @@ struct nbl_port_mgt { }; /* --------- TXRX ---------- */ +struct nbl_txrx_vsi_info { + u16 ring_offset; + u16 ring_num; +}; + struct nbl_ring_desc { /* buffer address */ __le64 addr; @@ -167,7 +187,11 @@ struct nbl_ring_desc { struct nbl_tx_buffer { struct nbl_ring_desc *next_to_watch; - struct sk_buff *skb; + union nbl_tx_extend_head *tls_pkthdr; + union { + struct sk_buff *skb; + void *raw_buff; /* for xdp */ + }; dma_addr_t dma; u32 len; @@ -180,6 +204,7 @@ struct nbl_tx_buffer { struct nbl_dma_info { dma_addr_t addr; struct page *page; + u32 size; }; struct nbl_page_cache { @@ -190,14 +215,17 @@ struct nbl_page_cache { struct nbl_rx_buffer { struct nbl_dma_info *di; - u32 offset; + u16 offset; + u16 rx_pad; + u16 size; bool last_in_page; }; struct nbl_res_vector { - struct napi_struct napi; + struct nbl_napi_struct nbl_napi; struct nbl_res_tx_ring *tx_ring; struct nbl_res_rx_ring *rx_ring; + struct nbl_res_tx_ring *xdp_ring; u8 *irq_enable_base; u32 irq_data; bool started; @@ -211,6 +239,9 @@ struct nbl_res_tx_ring { struct device *dma_dev; struct net_device *netdev; u8 __iomem *notify_addr; + struct nbl_queue_stats stats; + struct u64_stats_sync syncp; + struct nbl_tx_queue_stats tx_stats; enum nbl_product_type product_type; u16 queue_index; @@ -223,19 +254,21 @@ struct nbl_res_tx_ring { u16 next_to_clean; u16 tail_ptr; u16 mode; + u16 vlan_tci; + u16 vlan_proto; u8 eth_id; u8 extheader_tx_len; - struct nbl_queue_stats stats; - struct u64_stats_sync syncp; - struct nbl_tx_queue_stats tx_stats; - /* control path */ // dma for desc[] dma_addr_t dma; // size for desc[] unsigned int size; bool valid; + + struct nbl_txrx_vsi_info *vsi_info; + void *xdp_prog; + spinlock_t xmit_lock; /* use for xdp tx_act; because XDP queue'num may less then core num */ } ____cacheline_internodealigned_in_smp; struct nbl_res_rx_ring { @@ -255,36 +288,47 @@ struct nbl_res_rx_ring { u32 buf_len; u16 avail_used_flags; bool used_wrap_counter; + u8 nid; u16 next_to_use; u16 next_to_clean; u16 tail_ptr; u16 mode; u16 desc_num; u16 queue_index; + u16 vlan_tci; + u16 vlan_proto; /* control path */ struct nbl_common_info *common; void *txrx_mgt; + void *xdp_prog; + struct xdp_rxq_info xdp_rxq; // dma for desc[] dma_addr_t dma; // size for desc[] unsigned int size; bool valid; u16 notify_qid; + + u16 frags_num_per_page; } ____cacheline_internodealigned_in_smp; -struct nbl_txrx_vsi_info { - u16 ring_offset; - u16 ring_num; +struct nbl_txrx_bond_info { + u16 eth_id[NBL_LAG_MAX_PORTS]; + u16 lag_id; + bool bond_enable; }; struct nbl_txrx_mgt { struct nbl_res_vector **vectors; struct nbl_res_tx_ring **tx_rings; struct nbl_res_rx_ring **rx_rings; + struct nbl_txrx_bond_info bond_info; struct nbl_txrx_vsi_info vsi_info[NBL_VSI_MAX]; u16 tx_ring_num; u16 rx_ring_num; + u16 xdp_ring_offset; + u16 xdp_ring_num; }; struct nbl_vsi_mgt { @@ -315,8 +359,8 @@ struct nbl_adminq_mgt { }; /* --------- FLOW ---------- */ -#define NBL_FEM_HT_PP0_LEN (1 * 1024) -#define NBL_MACVLAN_TABLE_LEN (4096) +#define NBL_FEM_HT_PP0_LEN (2 * 1024) +#define NBL_MACVLAN_TABLE_LEN (4096 * 2) enum nbl_next_stg_id_e { NBL_NEXT_STG_PA = 1, @@ -339,12 +383,19 @@ enum { NBL_FLOW_UP, NBL_FLOW_DOWN, NBL_FLOW_MACVLAN_MAX, - NBL_FLOW_L2_UP = NBL_FLOW_MACVLAN_MAX, - NBL_FLOW_L2_DOWN, - NBL_FLOW_L3_UP, - NBL_FLOW_L3_DOWN, - NBL_FLOW_TYPE_MAX, - NBL_FLOW_LLDP_LACP_UP, + NBL_FLOW_LLDP_LACP_UP = NBL_FLOW_MACVLAN_MAX, + NBL_FLOW_PMD_ND_UPCALL, + NBL_FLOW_L2_UP_MULTI_MCAST, + NBL_FLOW_L3_UP_MULTI_MCAST, + NBL_FLOW_UP_MULTI_MCAST_END, + NBL_FLOW_L2_DOWN_MULTI_MCAST = NBL_FLOW_UP_MULTI_MCAST_END, + NBL_FLOW_L3_DOWN_MULTI_MCAST, + NBL_FLOW_DOWN_MULTI_MCAST_END, + NBL_FLOW_ACCEL_BEGIN = NBL_FLOW_DOWN_MULTI_MCAST_END, + NBL_FLOW_TLS_UP = NBL_FLOW_ACCEL_BEGIN, + NBL_FLOW_IPSEC_DOWN, + NBL_FLOW_ACCEL_END, + NBL_FLOW_TYPE_MAX = NBL_FLOW_ACCEL_END, }; struct nbl_flow_ht_key { @@ -376,17 +427,44 @@ struct nbl_flow_fem_entry { struct nbl_flow_mcc_node { struct list_head node; + u16 data; u16 mcc_id; + u16 mcc_action; + bool mcc_head; + u8 type; }; -struct nbl_flow_multi_group { - struct list_head mcc_list; - struct nbl_flow_fem_entry entry[NBL_FLOW_TYPE_MAX - NBL_FLOW_MACVLAN_MAX]; - u8 ether_id; - u16 mcc_id; +struct nbl_flow_mcc_group { + struct list_head group_node; + /* list_head for mcc_node_list */ + struct list_head mcc_node; + struct list_head mcc_head; + unsigned long *vsi_bitmap; + u32 nbits; + u32 vsi_base; + u32 vsi_num; + u32 ref_cnt; + u16 up_mcc_id; + u16 down_mcc_id; + bool multi; +}; + +struct nbl_flow_switch_res { + void *mac_hash_tbl; + unsigned long *vf_bitmap; + struct list_head allmulti_head; + struct list_head allmulti_list; + struct list_head mcc_group_head; + struct nbl_flow_fem_entry allmulti_up[2]; + struct nbl_flow_fem_entry allmulti_down[2]; + u16 vld; u16 network_status; u16 pfc_mode; u16 bp_mode; + u16 allmulti_first_mcc; + u16 num_vfs; + u16 active_vfs; + u8 ether_id; }; struct nbl_flow_lacp_rule { @@ -401,8 +479,8 @@ struct nbl_flow_lldp_rule { u16 vsi; }; -struct nbl_flow_UL4S_rule { - struct nbl_flow_fem_entry UL4S_entry; +struct nbl_flow_ul4s_rule { + struct nbl_flow_fem_entry ul4s_entry; struct list_head node; u16 vsi; u32 index; @@ -424,18 +502,29 @@ struct nbl_flow_nd_upcall_rule { struct list_head node; }; +struct nbl_event_mirror_outputport_data { + u16 func_id; + bool opcode; /* true: add; false: del */ +}; + struct nbl_flow_mgt { - DECLARE_BITMAP(flow_id, NBL_MACVLAN_TABLE_LEN); + unsigned long *flow_id_bitmap; + unsigned long *mcc_id_bitmap; DECLARE_BITMAP(tcam_id, NBL_TCAM_TABLE_LEN); - u32 pp_tcam_count; - u32 unicast_mac_threshold; struct nbl_flow_ht_mng pp0_ht0_mng; struct nbl_flow_ht_mng pp0_ht1_mng; - struct nbl_flow_multi_group multi_flow[NBL_MAX_ETHERNET]; - void *mac_hash_tbl[NBL_MAX_ETHERNET]; + struct nbl_flow_switch_res switch_res[NBL_MAX_ETHERNET]; struct list_head lldp_list; struct list_head lacp_list; - void *mcc_tbl_priv; + struct list_head ul4s_head; + struct list_head dprbac_head; + struct list_head nd_upcall_list; // note: works only for offload network + u32 pp_tcam_count; + u32 accel_flow_count; + u32 flow_id_cnt; + u16 vsi_max_per_switch; +#define NBL_MIRROR_OUTPUTPORT_MAX_FUNC 8 + u16 mirror_outputport_func[NBL_MIRROR_OUTPUTPORT_MAX_FUNC]; }; #define NBL_FLOW_INIT_BIT BIT(1) @@ -463,20 +552,22 @@ enum nbl_flow_key_type { }; #define NBL_PP0_KT_NUM (0) -#define NBL_PP1_KT_NUM (12 * 1024) -#define NBL_PP2_KT_NUM (112 * 1024) -#define NBL_PP0_KT_OFFSET (124 * 1024) -#define NBL_PP1_KT_OFFSET (112 * 1024) -#define NBL_FEM_HT_PP0_LEN (1 * 1024) -#define NBL_FEM_HT_PP1_LEN (3 * 1024) +#define NBL_PP1_KT_NUM (24 * 1024) +#define NBL_PP2_KT_NUM (96 * 1024) +#define NBL_PP0_KT_OFFSET (120 * 1024) +#define NBL_PP1_KT_OFFSET (96 * 1024) +#define NBL_FEM_HT_PP0_LEN (2 * 1024) +#define NBL_FEM_HT_PP1_LEN (6 * 1024) #define NBL_FEM_HT_PP2_LEN (16 * 1024) -#define NBL_FEM_HT_PP0_DEPTH (1 * 1024) -#define NBL_FEM_HT_PP1_DEPTH (3 * 1024) -#define NBL_FEM_HT_PP2_DEPTH (0) -#define NBL_FEM_AT_PP1_LEN (6 * 1024) -#define NBL_FEM_AT2_PP1_LEN (2 * 1024) -#define NBL_FEM_AT_PP2_LEN (72 * 1024) +#define NBL_FEM_HT_PP0_DEPTH (2 * 1024) +#define NBL_FEM_HT_PP1_DEPTH (6 * 1024) +#define NBL_FEM_HT_PP2_DEPTH (0) /* 16K, treat as zero */ +#define NBL_FEM_AT_PP1_LEN (12 * 1024) +#define NBL_FEM_AT2_PP1_LEN (4 * 1024) +#define NBL_FEM_AT_PP2_LEN (64 * 1024) #define NBL_FEM_AT2_PP2_LEN (16 * 1024) +#define NBL_TC_MCC_TBL_DEPTH (4096) +#define NBL_TC_ENCAP_TBL_DEPTH (4 * 1024) struct nbl_flow_key_info { bool valid; @@ -518,15 +609,10 @@ struct nbl_profile_msg { }; struct nbl_flow_tab_hash_info { - struct hlist_head *flow_tab_head; + void *flow_tab_hash; s32 tab_cnt; }; -struct nbl_flow_index_mng { - struct hlist_head flow_index_head[NBL_FLOW_INDEX_LEN]; - DECLARE_BITMAP(flow_index_bmp, NBL_FLOW_INDEX_LEN); -}; - struct nbl_profile_assoc_graph { u64 key_flag; u8 profile_count; @@ -558,12 +644,257 @@ struct nbl_flow_pp_at_key { }; struct nbl_flow_at_tbl { - struct hlist_node node; - struct nbl_flow_pp_at_key key; - u32 at_bitmap_index; u32 ref_cnt; }; +struct nbl_flow_at_mng { + void *at_tbl[NBL_PP_TYPE_MAX][NBL_AT_TYPE_MAX]; +}; + +struct nbl_tc_ht_item { + u16 ht_entry; + u16 ht0_hash; + u16 ht1_hash; + u16 hash_bucket; + u32 tbl_id; +}; + +union nbl_tc_common_data_u { + struct nbl_tc_common_data { + u32 rsv[10]; + } __packed info; +#define NBL_TC_COMMON_DATA_TAB_WIDTH (sizeof(struct nbl_tc_common_data) \ + / sizeof(u32)) + u32 data[NBL_TC_COMMON_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_tc_common_data)]; +}; + +struct nbl_tc_kt_item { + union nbl_tc_common_data_u kt_data; + u8 pp_type; + u8 key_type; +}; + +struct nbl_act_collect { + u32 act_vld; + u32 act2_vld; + u32 act_offset; + u32 act2_offset; + u32 act_hw_index; + u32 act2_hw_index; + struct nbl_flow_pp_at_key act_key[2]; +}; + +struct nbl_tc_at_item { + u32 act_buf[NBL_AT_MAX_NUM]; + u32 act_num; + u32 act1_buf[NBL_AT_MAX_NUM]; + u32 act1_num; + u32 act2_buf[NBL_AT_MAX_NUM]; + u32 act2_num; + struct nbl_act_collect act_collect; +}; + +struct nbl_flow_tcam_key_item { + u8 key[NBL_KT_BYTE_HALF_LEN]; + u8 key_mode; + struct nbl_tc_ht_item ht_item; + struct nbl_tc_kt_item kt_item; + struct nbl_tc_at_item at_item; + u32 sw_hash_id; + u8 profile_id; +}; + +struct nbl_flow_tcam_key_mng { + struct nbl_flow_tcam_key_item item; + u32 ref_cnt; +}; + +struct nbl_flow_tcam_ad_item { + u32 action[NBL_MAX_ACTION_NUM]; +}; + +struct nbl_flow_tcam_ad_mng { + struct nbl_flow_tcam_ad_item item; +}; + +struct nbl_count_mng { + u32 pp1_tcam_count; + u32 pp2_tcam_count; +}; + +/* --------- tc flow stats ---------- */ +struct nbl_flow_counter_cache { + u64 packets; + u64 bytes; +}; + +struct nbl_flow_counter { + struct list_head entries; + u64 lastpackets; + u64 lastbytes; + u64 lastuse; + struct nbl_flow_counter_cache cache; + unsigned long cookie; + u32 counter_id; +}; + +struct nbl_flow_update_counter { + u32 counter_id; + unsigned long cookie; +}; + +struct nbl_flow_query_counter { + u32 counter_id[NBL_FLOW_COUNT_NUM]; + unsigned long cookie[NBL_FLOW_COUNT_NUM]; +}; + +struct nbl_fc_mgt; +struct nbl_fc_product_ops { + void (*get_spec_stat_sz)(u16 *hit_sz, u16 *bytes_sz); + void (*get_flow_stat_sz)(u16 *hit_sz, u16 *bytes_sz); + void (*get_spec_stats)(struct nbl_flow_counter *counter, u64 *pkts, u64 *bytes); + void (*get_flow_stats)(struct nbl_flow_counter *counter, u64 *pkts, u64 *bytes); + int (*update_stats)(struct nbl_fc_mgt *mgt, struct nbl_flow_query_counter *counter_array, + u32 flow_num, u32 clear, enum nbl_pp_fc_type fc_type); +}; + +struct nbl_fc_mgt { + spinlock_t counter_lock; /* protect the counter */ + void *cls_cookie_tbl[NBL_FC_TYPE_MAX]; + struct workqueue_struct *counter_wq; + struct nbl_common_info *common; + struct delayed_work counter_work; + struct list_head counter_hash_list; + struct list_head counter_stat_hash_list; + struct nbl_flow_update_counter *counter_update_list; + struct nbl_cmd_hdr cmd_hdr[NBL_CMDQ_MAX_OP_CODE]; + unsigned long query_interval; + unsigned long next_query; + struct nbl_fc_product_ops fc_ops; + enum nbl_product_type type; +}; + +struct nbl_tc_mcc_mgt { + DECLARE_BITMAP(mcc_pool, NBL_TC_MCC_TBL_DEPTH); + struct nbl_common_info *common; + struct list_head mcc_list; + u16 mcc_offload_cnt; +}; + +struct nbl_tc_pedit_res_info { +#define NBL_TC_MAX_PED_IDX 2048 + /* common pedit resource */ + DECLARE_BITMAP(pedit_pool, NBL_TC_MAX_PED_IDX); + void *pedit_tbl; + u32 pedit_num:16; + u32 pedit_cnt:16; + + /* special use for leonis-ipv6, ipv6 need 2 addrs */ + DECLARE_BITMAP(pedit_pool_h, NBL_TC_MAX_PED_H_IDX); + void *pedit_tbl_h; + /* normal could store in _h */ + u32 pedit_num_h:16; + u32 pedit_cnt_h:16; + + u32 pedit_base_id; +}; + +struct nbl_tc_pedit_mgt { + struct nbl_tc_pedit_res_info pedit_res[NBL_FLOW_PED_RES_MAX]; + struct nbl_common_info *common; + struct mutex pedit_lock; /* protect the pedit */ +}; + +struct nbl_tc_flow_mgt { + spinlock_t flow_lock; /* used to lock flow resource */ + struct nbl_flow_prf_upcall_info prf_info; + u8 profile_graph_count; + struct nbl_profile_msg profile_msg[NBL_ALL_PROFILE_NUM]; + struct nbl_profile_assoc_graph profile_graph[NBL_ASSOC_PROFILE_GRAPH_NUM]; + void *flow_idx_tbl; + + struct nbl_flow_tab_hash_info flow_tab_hash[NBL_ALL_PROFILE_NUM]; + + DECLARE_BITMAP(assoc_table_bmp, NBL_FLOW_TABLE_NUM); + DECLARE_BITMAP(pp1_kt_bmp, NBL_PP1_KT_NUM); + DECLARE_BITMAP(pp2_kt_bmp, NBL_PP2_KT_NUM); + + u8 init_status; + atomic64_t destroy_num; + atomic64_t create_num; + atomic64_t ref_cnt; + + struct nbl_flow_pp_ht_mng pp0_ht0_mng; + struct nbl_flow_pp_ht_mng pp0_ht1_mng; + struct nbl_flow_pp_ht_mng pp1_ht0_mng; + struct nbl_flow_pp_ht_mng pp1_ht1_mng; + struct nbl_flow_pp_ht_mng pp2_ht0_mng; + struct nbl_flow_pp_ht_mng pp2_ht1_mng; + struct nbl_flow_at_mng at_mng; + + struct nbl_flow_tcam_key_mng tcam_pp0_key_mng[NBL_FEM_TCAM_MAX_NUM]; + struct nbl_flow_tcam_ad_mng tcam_pp0_ad_mng[NBL_FEM_TCAM_MAX_NUM]; + struct nbl_flow_tcam_key_mng tcam_pp1_key_mng[NBL_FEM_TCAM_MAX_NUM]; + struct nbl_flow_tcam_ad_mng tcam_pp1_ad_mng[NBL_FEM_TCAM_MAX_NUM]; + struct nbl_flow_tcam_key_mng tcam_pp2_key_mng[NBL_FEM_TCAM_MAX_NUM]; + struct nbl_flow_tcam_ad_mng tcam_pp2_ad_mng[NBL_FEM_TCAM_MAX_NUM]; + + struct nbl_count_mng count_mng; + struct nbl_fc_mgt *fc_mgt; + struct nbl_resource_mgt *res_mgt; + u8 pf_set_tc_count; + struct nbl_tc_mcc_mgt tc_mcc_mgt; + u16 port_tpid_type[NBL_TPID_PORT_NUM]; + + /* encap and decap info */ + struct mutex encap_tbl_lock; /* used to lock encap resource */ + struct nbl_flow_tab_hash_info encap_tbl; + DECLARE_BITMAP(encap_tbl_bmp, NBL_TC_ENCAP_TBL_DEPTH); + + /* pedit info */ + struct nbl_tc_pedit_mgt pedit_mgt; +}; + +/* --------- ACCEL ---------- */ +#define NBL_MAX_KTLS_SESSION (1024) +#define NBL_MAX_IPSEC_SESSION (2048) +#define NBL_MAX_IPSEC_TCAM (32) +#define NBL_IPSEC_HT_LEN (1 * 512) + +struct nbl_ipsec_ht_mng { + struct nbl_flow_ht_tbl *hash_map[NBL_IPSEC_HT_LEN]; +}; + +struct nbl_accel_uipsec_rule { + struct nbl_flow_fem_entry uipsec_entry; + struct list_head node; + u16 vsi; + u32 index; +}; + +struct nbl_tls_cfg_info { + u16 vld; + u16 vsi; +}; + +struct nbl_accel_mgt { + DECLARE_BITMAP(tx_ktls_bitmap, NBL_MAX_KTLS_SESSION); + DECLARE_BITMAP(rx_ktls_bitmap, NBL_MAX_KTLS_SESSION); + struct nbl_tls_cfg_info dtls_cfg_info[NBL_MAX_KTLS_SESSION]; + struct nbl_tls_cfg_info utls_cfg_info[NBL_MAX_KTLS_SESSION]; + + DECLARE_BITMAP(tx_ipsec_bitmap, NBL_MAX_IPSEC_SESSION); + DECLARE_BITMAP(rx_ipsec_bitmap, NBL_MAX_IPSEC_SESSION); + struct nbl_ipsec_cfg_info tx_cfg_info[NBL_MAX_IPSEC_SESSION]; + struct nbl_ipsec_cfg_info rx_cfg_info[NBL_MAX_IPSEC_SESSION]; + + DECLARE_BITMAP(ipsec_tcam_id, NBL_MAX_IPSEC_TCAM); + struct nbl_ipsec_ht_mng ipsec_ht0_mng; + struct nbl_ipsec_ht_mng ipsec_ht1_mng; + struct list_head uprbac_head; +}; + /* --------- INFO ---------- */ #define NBL_RES_RDMA_MAX (63) #define NBL_RES_RDMA_INTR_NUM (3) @@ -581,6 +912,13 @@ struct nbl_sriov_info { u64 pf_bar_start; }; +struct nbl_eswitch_info { + struct nbl_rep_data *rep_data; + int num_vfs; + u16 mode; + u16 vf_base_vsi_id; +}; + struct nbl_eth_info { DECLARE_BITMAP(eth_bitmap, NBL_MAX_ETHERNET); u64 port_caps[NBL_MAX_ETHERNET]; @@ -593,18 +931,21 @@ struct nbl_eth_info { u8 module_inplace[NBL_MAX_ETHERNET]; u8 port_type[NBL_MAX_ETHERNET]; /* enum nbl_port_type */ u8 port_max_rate[NBL_MAX_ETHERNET]; /* enum nbl_port_max_rate */ + u8 module_repluged[NBL_MAX_ETHERNET]; u8 pf_bitmap[NBL_MAX_ETHERNET]; u8 eth_num; u8 resv[3]; u8 eth_id[NBL_MAX_PF]; u8 logic_eth_id[NBL_MAX_PF]; + u64 link_down_count[NBL_MAX_ETHERNET]; }; enum nbl_vsi_serv_type { NBL_VSI_SERV_PF_DATA_TYPE, NBL_VSI_SERV_PF_CTLR_TYPE, NBL_VSI_SERV_PF_USER_TYPE, + NBL_VSI_SERV_PF_XDP_TYPE, NBL_VSI_SERV_VF_DATA_TYPE, /* use for pf_num > eth_num, the extra pf belong pf0's switch */ NBL_VSI_SERV_PF_EXTRA_TYPE, @@ -616,9 +957,38 @@ struct nbl_vsi_serv_info { u16 num; }; +struct nbl_vsi_mac_info { + u16 vlan_proto; + u16 vlan_tci; + int rate; + u8 mac[ETH_ALEN]; + bool trusted; +}; + struct nbl_vsi_info { u16 num; struct nbl_vsi_serv_info serv_info[NBL_MAX_ETHERNET][NBL_VSI_SERV_MAX_TYPE]; + struct nbl_vsi_mac_info mac_info[NBL_MAX_FUNC]; +}; + +#define NBL_RDMA_BOND_KEY_MAGIC 0x1000 +struct nbl_rdma_info { + DECLARE_BITMAP(func_cap, NBL_MAX_FUNC); + u16 rdma_id[NBL_MAX_FUNC]; + u32 mem_type; + /* TODO: merge draco code, and delete this */ + u16 rdma_vacant; +}; + +#define NBL_ETH_BOND_VALID_PORT(x) ((x) < NBL_LAG_MAX_PORTS) +struct nbl_eth_bond_entry { + u8 eth_id[NBL_LAG_MAX_PORTS]; + u16 lag_id; + u16 lag_num; +}; + +struct nbl_eth_bond_info { + struct nbl_eth_bond_entry entry[NBL_LAG_MAX_NUM]; }; struct nbl_net_ring_num_info { @@ -627,14 +997,42 @@ struct nbl_net_ring_num_info { u16 net_max_qp_num[NBL_MAX_FUNC]; }; +struct nbl_rdma_cap_info { + u32 valid; + u8 rdma_func_bitmaps[65]; + u8 rsv[7]; +}; + +struct nbl_rdma_mem_type_info { + u32 mem_type; +}; + +struct nbl_vdpa_status { + struct nbl_vf_stats init_stats; + struct nbl_vf_stats prev_stats; + unsigned long timestamp; + u16 itr_level; +}; + +struct nbl_vdpa_info { + DECLARE_BITMAP(vdpa_func_bitmap, NBL_MAX_FUNC); + struct nbl_vdpa_status *vf_stats[NBL_MAX_FUNC]; + u32 start; +}; + struct nbl_resource_info { /* ctrl-dev owned pfs */ DECLARE_BITMAP(func_bitmap, NBL_MAX_FUNC); + struct nbl_vdpa_info vdpa; struct nbl_sriov_info *sriov_info; + struct nbl_eswitch_info *eswitch_info; struct nbl_eth_info *eth_info; struct nbl_vsi_info *vsi_info; + struct nbl_eth_bond_info *eth_bond_info; u32 base_qid; + u32 max_vf_num; + struct nbl_rdma_info rdma_info; struct nbl_net_ring_num_info net_ring_num_info; /* for af use */ @@ -644,8 +1042,72 @@ struct nbl_resource_info { u8 max_pf; u16 nd_upcall_refnt; struct nbl_board_port_info board_info; + /* store all pf names for vf/rep device name use */ + char pf_name_list[NBL_MAX_PF][IFNAMSIZ]; + + u8 link_forced_info[NBL_MAX_FUNC]; + struct nbl_mtu_entry mtu_list[NBL_MAX_MTU]; + + struct nbl_ustore_stats *ustore_stats; +}; + +enum { + NBL_FD_MODE_DEFAULT = 0,/* Support src_mac & dst_mac, ipv4 + other in total 512 rules */ + NBL_FD_MODE_FULL, /* Unsupport src_mac & dst_mac, ipv4 + other each 512 rules */ + NBL_FD_MODE_LITE, /* Only support ipv4, 1536 rules */ + NBL_FD_MODE_MAX, +}; + +union nbl_fd_compo_info { + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + u16 ethertype; + u32 src_ipv4; + u32 dst_ipv4; + u32 src_ipv6[NBL_IPV6_U32LEN]; + u32 dst_ipv6[NBL_IPV6_U32LEN]; + u8 ipproto; + u16 l4_sport; + u16 l4_dport; + struct nbl_fd_compo_udf { + u32 offset; + u32 data; + } udf; }; +struct nbl_flow_direct_entry { + struct list_head node; + u8 pid; + bool udf; + u16 action_index; + u16 depth_index; + struct nbl_chan_param_fdir_replace param; +}; + +struct nbl_flow_direct_info { + struct list_head list[NBL_CHAN_FDIR_RULE_MAX]; + u16 state[NBL_CHAN_FDIR_RULE_MAX]; + u16 cnt[NBL_CHAN_FDIR_RULE_MAX]; +}; + +struct nbl_fd_component_ops { + int (*validate)(struct ethtool_rx_flow_spec *fs); + int (*form)(struct nbl_flow_direct_entry *entry, struct ethtool_rx_flow_spec *fs); + u16 layer; +}; + +struct nbl_flow_direct_mgt { + struct nbl_flow_direct_info info[NBL_MAX_PF]; + u32 max_spec; + u32 udf_offset; + u32 udf_cnt; + u16 udf_layer; + u16 cnt[NBL_FD_PROFILE_MAX]; + u8 mode; + u8 state; +}; + +/* --------- PMD status ---------- */ struct nbl_upcall_port_info { bool upcall_port_active; u16 func_id; @@ -661,9 +1123,15 @@ struct nbl_rep_offload_status { unsigned long timestamp; }; +struct nbl_pmd_status { + struct nbl_upcall_port_info upcall_port_info; + struct nbl_rep_offload_status rep_status; +}; + struct nbl_resource_common_ops { u16 (*vsi_id_to_func_id)(void *res_mgt, u16 vsi_id); int (*vsi_id_to_pf_id)(void *res_mgt, u16 vsi_id); + u16 (*vsi_id_to_vf_id)(void *res_mgt, u16 vsi_id); u16 (*pfvfid_to_func_id)(void *res_mgt, int pfid, int vfid); u16 (*pfvfid_to_vsi_id)(void *res_mgt, int pfid, int vfid, u16 type); u16 (*func_id_to_vsi_id)(void *res_mgt, u16 func_id, u16 type); @@ -673,6 +1141,8 @@ struct nbl_resource_common_ops { u16 (*get_particular_queue_id)(void *res_mgt, u16 vsi_id); u8 (*vsi_id_to_eth_id)(void *res_mgt, u16 vsi_id); u8 (*eth_id_to_pf_id)(void *res_mgt, u8 eth_id); + u8 (*eth_id_to_lag_id)(void *res_mgt, u8 eth_id); + bool (*check_func_active_by_queue)(void *res_mgt, u16 func_id); }; struct nbl_res_product_ops { @@ -697,10 +1167,12 @@ struct nbl_resource_mgt { struct nbl_interrupt_mgt *intr_mgt; struct nbl_txrx_mgt *txrx_mgt; struct nbl_flow_mgt *flow_mgt; + struct nbl_tc_flow_mgt *tc_flow_mgt; struct nbl_vsi_mgt *vsi_mgt; struct nbl_adminq_mgt *adminq_mgt; struct nbl_accel_mgt *accel_mgt; struct nbl_port_mgt *port_mgt; + struct nbl_flow_direct_mgt *fd_mgt; struct nbl_res_product_ops *product_ops; DECLARE_BITMAP(flex_capability, NBL_FLEX_CAP_NBITS); DECLARE_BITMAP(fix_capability, NBL_FIX_CAP_NBITS); @@ -712,12 +1184,21 @@ struct nbl_resource_mgt { */ struct nbl_resource_mgt_leonis { struct nbl_resource_mgt res_mgt; + struct nbl_pmd_status pmd_status; +}; + +struct nbl_resource_mgt_bootis { + struct nbl_resource_mgt res_mgt; +}; + +struct nbl_resource_mgt_virtio { + struct nbl_resource_mgt res_mgt; }; #define NBL_RES_FW_CMD_FILTER_MAX 8 struct nbl_res_fw_cmd_filter { - int (*in)(struct nbl_resource_mgt *res_mgt, void *data, int len); - int (*out)(struct nbl_resource_mgt *res_mgt, void *data, int len); + int (*in)(struct nbl_resource_mgt *res_mgt, void *in, u16 in_len); + int (*out)(struct nbl_resource_mgt *res_mgt, void *in, u16 in_len, void *out, u16 out_len); }; u16 nbl_res_vsi_id_to_func_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id); @@ -727,11 +1208,13 @@ u16 nbl_res_pfvfid_to_vsi_id(struct nbl_resource_mgt *res_mgt, int pfid, int vfi u16 nbl_res_func_id_to_vsi_id(struct nbl_resource_mgt *res_mgt, u16 func_id, u16 type); int nbl_res_func_id_to_pfvfid(struct nbl_resource_mgt *res_mgt, u16 func_id, int *pfid, int *vfid); u8 nbl_res_eth_id_to_pf_id(struct nbl_resource_mgt *res_mgt, u8 eth_id); +u8 nbl_res_eth_id_to_lag_id(struct nbl_resource_mgt *res_mgt, u8 eth_id); int nbl_res_func_id_to_bdf(struct nbl_resource_mgt *res_mgt, u16 func_id, u8 *bus, u8 *dev, u8 *function); u64 nbl_res_get_func_bar_base_addr(struct nbl_resource_mgt *res_mgt, u16 func_id); u16 nbl_res_get_particular_queue_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id); u8 nbl_res_vsi_id_to_eth_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id); +bool nbl_res_check_func_active_by_queue(struct nbl_resource_mgt *res_mgt, u16 func_id); int nbl_adminq_mgt_start(struct nbl_resource_mgt *res_mgt); void nbl_adminq_mgt_stop(struct nbl_resource_mgt *res_mgt); @@ -756,6 +1239,15 @@ void nbl_vsi_mgt_stop(struct nbl_resource_mgt *res_mgt); int nbl_vsi_setup_ops(struct nbl_resource_ops *resource_ops); void nbl_vsi_remove_ops(struct nbl_resource_ops *resource_ops); +int nbl_accel_setup_ops(struct nbl_resource_ops *res_ops); +int nbl_accel_mgt_start(struct nbl_resource_mgt *res_mgt); +void nbl_accel_mgt_stop(struct nbl_resource_mgt *res_mgt); + +int nbl_fd_mgt_start(struct nbl_resource_mgt *res_mgt); +void nbl_fd_mgt_stop(struct nbl_resource_mgt *res_mgt); +int nbl_fd_setup_ops(struct nbl_resource_ops *resource_ops); +void nbl_fd_remove_ops(struct nbl_resource_ops *resource_ops); + bool nbl_res_get_flex_capability(void *priv, enum nbl_flex_cap_type cap_type); bool nbl_res_get_fix_capability(void *priv, enum nbl_fix_cap_type cap_type); void nbl_res_set_flex_capability(struct nbl_resource_mgt *res_mgt, enum nbl_flex_cap_type cap_type); @@ -764,6 +1256,12 @@ void nbl_res_set_fix_capability(struct nbl_resource_mgt *res_mgt, enum nbl_fix_c int nbl_res_open_sfp(struct nbl_resource_mgt *res_mgt, u8 eth_id); int nbl_res_get_eth_mac(struct nbl_resource_mgt *res_mgt, u8 *mac, u8 eth_id); void nbl_res_pf_dev_vsi_type_to_hw_vsi_type(u16 src_type, enum nbl_vsi_serv_type *dst_type); +int nbl_res_get_rep_idx(struct nbl_eswitch_info *eswitch_info, u16 rep_vsi_id); bool nbl_res_vf_is_active(void *priv, u16 func_id); +void nbl_res_set_hw_status(void *priv, enum nbl_hw_status hw_status); +int nbl_res_get_pf_vf_num(void *priv, u16 pf_id); +u16 nbl_res_intr_get_suppress_level(void *priv, u64 rates, u16 last_level); +void nbl_res_intr_set_intr_suppress_level(void *priv, u16 func_id, u16 vector_id, + u16 num_net_msix, u16 level); #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.c new file mode 100644 index 0000000000000000000000000000000000000000..9c47a1cca2680b74d85db2a53a70aa223d6bcd15 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.c @@ -0,0 +1,340 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_tc_pedit.h" +#include "nbl_p4_actions.h" + +static int nbl_tc_pedit_get_h_idx(struct nbl_tc_pedit_res_info *pedit_res, + struct nbl_common_info *common, struct nbl_tc_pedit_entry *e) +{ + u32 ped_idx = 0; + int idx = 0; + bool h_idx_vld = false; + int ret = -ENOMEM; + + if (pedit_res->pedit_cnt_h >= pedit_res->pedit_num_h) { + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit over-hlimit (%u-%u)", + pedit_res->pedit_num_h, pedit_res->pedit_cnt_h); + return -ENOBUFS; + } + + ped_idx = find_first_zero_bit(pedit_res->pedit_pool_h, pedit_res->pedit_num_h); + WARN_ON(ped_idx >= pedit_res->pedit_num_h); + for (idx = ped_idx; idx < pedit_res->pedit_num_h; ++idx) { + /* don't overlap the pool */ + if (idx >= pedit_res->pedit_num) + break; + + /* only when idx in pool and h_pool are both available, then idx is valid */ + if (!test_bit(idx, pedit_res->pedit_pool_h) && + !test_bit(idx, pedit_res->pedit_pool)) { + h_idx_vld = true; + break; + } + } + + if (h_idx_vld) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc_pedit find a vld idx(%u)-(%u-%u)", + idx, pedit_res->pedit_num_h, pedit_res->pedit_cnt_h); + ret = 0; + /* now set bit in both pool and h_pool */ + set_bit(idx, pedit_res->pedit_pool); + set_bit(idx, pedit_res->pedit_pool_h); + + /* h_idx occupy 2 bits actually */ + ++pedit_res->pedit_cnt; + ++pedit_res->pedit_cnt_h; + NBL_TC_PEDIT_SET_NODE_H(e); + NBL_TC_PEDIT_SET_NODE_IDX(e, idx); + } else { + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit no valid hidx in hpool-(%u-%u)", + pedit_res->pedit_num_h, pedit_res->pedit_cnt_h); + } + + return ret; +} + +static int nbl_tc_pedit_get_normal_idx(struct nbl_tc_pedit_res_info *pedit_res, + struct nbl_common_info *common, + struct nbl_tc_pedit_entry *e) +{ + u32 ped_idx = 0; + + ped_idx = find_first_zero_bit(pedit_res->pedit_pool, pedit_res->pedit_num); + /* normal ped_idx used up, try get from pedit_h if we got */ + if (ped_idx >= pedit_res->pedit_num && pedit_res->pedit_num_h) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc_pedit try to get idx from h_pool"); + + if (pedit_res->pedit_cnt_h >= pedit_res->pedit_num_h) { + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit over-hlimit for normal (%u-%u)", + pedit_res->pedit_num_h, pedit_res->pedit_cnt_h); + return -ENOBUFS; + } + ped_idx = find_first_zero_bit(pedit_res->pedit_pool_h, pedit_res->pedit_num_h); + WARN_ON(ped_idx >= pedit_res->pedit_num_h); + nbl_debug(common, NBL_DEBUG_FLOW, "tc_pedit get h-idx(%u) success(%u-%u)", + ped_idx, pedit_res->pedit_num_h, pedit_res->pedit_cnt_h); + NBL_TC_PEDIT_SET_NORMAL_IN_H(e); + ++pedit_res->pedit_cnt_h; + set_bit(ped_idx, pedit_res->pedit_pool_h); + } else if (ped_idx >= pedit_res->pedit_num) { + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit get no available idx(%u-%u)", + pedit_res->pedit_num, pedit_res->pedit_cnt); + return -ENOMEM; + } + /* get a normal idx */ + nbl_debug(common, NBL_DEBUG_FLOW, "tc_pedit get idx(%u) success(%u-%u)", + ped_idx, pedit_res->pedit_num, pedit_res->pedit_cnt); + set_bit(ped_idx, pedit_res->pedit_pool); + + NBL_TC_PEDIT_SET_NODE_IDX(e, ped_idx); + return 0; +} + +static int nbl_tc_pedit_get_idx(struct nbl_tc_pedit_res_info *pedit_res, + struct nbl_common_info *common, struct nbl_tc_pedit_entry *e) +{ + int ret = 0; + + if (pedit_res->pedit_cnt >= pedit_res->pedit_num) { + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit over-limit (%u-%u)", + pedit_res->pedit_num, pedit_res->pedit_cnt); + return -ENOBUFS; + } + + if (NBL_TC_PEDIT_GET_NODE_H(e)) + ret = nbl_tc_pedit_get_h_idx(pedit_res, common, e); + + else + ret = nbl_tc_pedit_get_normal_idx(pedit_res, common, e); + + if (ret) + return ret; + ++pedit_res->pedit_cnt; + NBL_TC_PEDIT_SET_NODE_VAL(e); + NBL_TC_PEDIT_SET_NODE_BASE_ID(e, pedit_res->pedit_base_id); + NBL_TC_PEDIT_INC_NODE_REF(e); + return 0; +} + +static int nbl_tc_pedit_put_idx(struct nbl_tc_pedit_res_info *pedit_res, + struct nbl_common_info *common, struct nbl_tc_pedit_entry *e) +{ + void *pool_addr; + bool idx_h = false; + + if (NBL_TC_PEDIT_GET_NODE_H(e)) { + WARN_ON(NBL_TC_PEDIT_GET_NODE_IDX(e) >= pedit_res->pedit_num_h); + pool_addr = pedit_res->pedit_pool_h; + idx_h = true; + clear_bit(NBL_TC_PEDIT_GET_NODE_IDX(e), pedit_res->pedit_pool); + pedit_res->pedit_cnt_h--; + pedit_res->pedit_cnt--; + } else if (NBL_TC_PEDIT_GET_NORMAL_IN_H(e)) { + WARN_ON(NBL_TC_PEDIT_GET_NODE_IDX(e) >= pedit_res->pedit_num_h); + pool_addr = pedit_res->pedit_pool_h; + idx_h = true; + pedit_res->pedit_cnt_h--; + } else { + WARN_ON(NBL_TC_PEDIT_GET_NODE_IDX(e) >= pedit_res->pedit_num); + pool_addr = pedit_res->pedit_pool; + } + + if (!test_bit(NBL_TC_PEDIT_GET_NODE_IDX(e), pool_addr)) + nbl_err(common, NBL_DEBUG_FLOW, "tc_pedit clear a null bit %u in h(%d)", + NBL_TC_PEDIT_GET_NODE_IDX(e), idx_h ? 1 : 0); + + pedit_res->pedit_cnt--; + nbl_debug(common, NBL_DEBUG_FLOW, "tc_pedit put idx(%u) success normal(%u-%u)-high(%u-%u)", + NBL_TC_PEDIT_GET_NODE_IDX(e), pedit_res->pedit_num, pedit_res->pedit_cnt, + pedit_res->pedit_num_h, pedit_res->pedit_cnt_h); + clear_bit(NBL_TC_PEDIT_GET_NODE_IDX(e), pool_addr); + NBL_TC_PEDIT_DEC_NODE_REF(e); + NBL_TC_PEDIT_SET_NODE_INVAL(e); + return 0; +} + +static enum nbl_flow_ped_type nbl_tc_pedit_get_ped_type(enum nbl_flow_ped_type ped_type) +{ + /* default ped_type return directly */ + if (NBL_TC_PEDIT_IS_DEFAULT_TYPE(ped_type)) + return ped_type; + + NBL_TC_PEDIT_UNSET_D_TYPE(ped_type); + /* we need get the hw recongnize ped_type */ + return ped_type; +} + +u16 nbl_tc_pedit_get_hw_id(struct nbl_tc_pedit_entry *ped_node) +{ + return (ped_node->hnode.node_idx + ped_node->hnode.node_base); +} + +int nbl_tc_pedit_del_node(struct nbl_tc_pedit_mgt *pedit_mgt, + struct nbl_tc_pedit_node_res *pedit_node) +{ + struct nbl_common_info *common = pedit_mgt->common; + struct nbl_tc_pedit_res_info *pedit_res = pedit_mgt->pedit_res; + int idx = 0; + struct nbl_tc_pedit_entry *l_e; + void *h_e; + u32 e_ref = 0; + int ret = -EINVAL; + void *pedit_tbl; + enum nbl_flow_ped_type ped_type; + + if (!NBL_TC_PEDIT_GET_NODE_RES_VAL(*pedit_node)) { + return -EINVAL; + } + + mutex_lock(&pedit_mgt->pedit_lock); + for (idx = 0; idx < NBL_FLOW_PED_RES_MAX; ++idx) { + l_e = NBL_TC_PEDIT_GET_NODE_RES_ENTRY(*pedit_node, idx); + if (l_e) + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_tc_pedit(%u):del %d-%u-%u-(%u-%u)", + NBL_TC_PEDIT_GET_NODE_REF(l_e), + idx, NBL_TC_PEDIT_GET_NODE_IDX(l_e), + nbl_tc_pedit_get_hw_id(l_e), + NBL_TC_PEDIT_GET_NORMAL_IN_H(l_e), + NBL_TC_PEDIT_GET_NODE_H(l_e)); + else + continue; + + /* get hw ped_type, cuz resource are stored in hw-style */ + ped_type = nbl_tc_pedit_get_ped_type(idx); + WARN_ON(!NBL_TC_PEDIT_GET_NODE_VAL(l_e)); + if (NBL_TC_PEDIT_GET_NODE_H(l_e)) + pedit_tbl = pedit_res[ped_type].pedit_tbl_h; + else + pedit_tbl = pedit_res[ped_type].pedit_tbl; + + h_e = nbl_common_get_hash_node(pedit_tbl, NBL_TC_PEDIT_GET_KEY(l_e)); + WARN_ON(l_e != h_e); + e_ref = NBL_TC_PEDIT_GET_NODE_REF(l_e); + if (e_ref > 1) { + NBL_TC_PEDIT_DEC_NODE_REF(l_e); + } else { + NBL_TC_PEDIT_DEC_NODE_REF(l_e); + nbl_tc_pedit_put_idx(&pedit_res[ped_type], common, l_e); + nbl_common_free_hash_node(pedit_tbl, NBL_TC_PEDIT_GET_KEY(l_e)); + } + ret = 0; + } + mutex_unlock(&pedit_mgt->pedit_lock); + + return ret; +} + +int nbl_tc_pedit_add_node(struct nbl_tc_pedit_mgt *pedit_mgt, + struct nbl_tc_pedit_entry *e, + void **e_out, enum nbl_flow_ped_type pedit_type) +{ + struct nbl_tc_pedit_res_info *pedit_res = &pedit_mgt->pedit_res[pedit_type]; + struct nbl_common_info *common = pedit_mgt->common; + struct nbl_tc_pedit_entry *h_e; + void *new_e; + int ret = 0; + void *pedit_tbl = pedit_res->pedit_tbl; + + if (NBL_TC_PEDIT_GET_NODE_H(e)) + pedit_tbl = pedit_res->pedit_tbl_h; + + if (!pedit_tbl) { + nbl_err(common, NBL_DEBUG_FLOW, "nbl_tc_pedit add failed: not init type %d", + pedit_type); + return -EINVAL; + } + + mutex_lock(&pedit_mgt->pedit_lock); + h_e = nbl_common_get_hash_node(pedit_tbl, NBL_TC_PEDIT_GET_KEY(e)); + if (h_e) { + NBL_TC_PEDIT_INC_NODE_REF(h_e); + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_tc_pedit:type(%d) exist in %u-%u(%u)", + pedit_type, NBL_TC_PEDIT_GET_NODE_IDX(h_e), + nbl_tc_pedit_get_hw_id(h_e), NBL_TC_PEDIT_GET_NODE_REF(h_e)); + *e_out = h_e; + NBL_TC_PEDIT_COPY_NODE(h_e, e); + goto pedit_add_fin; + } + + ret = nbl_tc_pedit_get_idx(pedit_res, common, e); + if (ret) + goto pedit_add_fin; + + ret = nbl_common_alloc_hash_node(pedit_tbl, NBL_TC_PEDIT_GET_KEY(e), e, &new_e); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "nbl_tc_pedit:type(%d) add hash failed", + pedit_type); + nbl_tc_pedit_put_idx(pedit_res, common, e); + goto pedit_add_fin; + } + + *e_out = new_e; + NBL_TC_PEDIT_SET_NODE_ENTRY(e, new_e); + /* tell caller this is the first node added in hash */ + NBL_TC_PEDIT_SET_NODE_INVAL(e); +pedit_add_fin: + mutex_unlock(&pedit_mgt->pedit_lock); + return ret; +} + +int nbl_tc_pedit_init(struct nbl_tc_pedit_mgt *pedit_mgt) +{ + int ret = 0; + int idx = 0; + struct nbl_common_info *common = pedit_mgt->common; + struct nbl_tc_pedit_res_info *pedit_res = pedit_mgt->pedit_res; + struct nbl_hash_tbl_key tbl_key = {0}; + + for (idx = 0; idx < NBL_FLOW_PED_RES_MAX; ++idx) { + if (!pedit_res[idx].pedit_num) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_tc_pedit:pedit(%d) skip init", + idx); + continue; + } + NBL_HASH_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(common), NBL_TC_PEDIT_KEY_LEN, + sizeof(struct nbl_tc_pedit_entry), + pedit_res[idx].pedit_num, false); + pedit_res[idx].pedit_tbl = nbl_common_init_hash_table(&tbl_key); + if (!pedit_res[idx].pedit_tbl) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_tc_pedit:pedit(%d) init failed", + idx); + return -ENOMEM; + } + + /* init pedit_h if needed */ + if (pedit_res[idx].pedit_num_h) { + NBL_HASH_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(common), + NBL_TC_PEDIT_KEY_LEN, + sizeof(struct nbl_tc_pedit_entry), + pedit_res[idx].pedit_num_h, false); + pedit_res[idx].pedit_tbl_h = nbl_common_init_hash_table(&tbl_key); + if (!pedit_res[idx].pedit_tbl_h) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_tc_pedit:pedit(%d) init failed", + idx); + return -ENOMEM; + } + } + } + + return ret; +} + +int nbl_tc_pedit_uninit(struct nbl_tc_pedit_mgt *pedit_mgt) +{ + int idx = 0; + struct nbl_tc_pedit_res_info *pedit_res = pedit_mgt->pedit_res; + + if (!pedit_mgt) + return -EINVAL; + + for (idx = 0; idx < NBL_FLOW_PED_RES_MAX; ++idx) { + nbl_common_remove_hash_table(pedit_res[idx].pedit_tbl, NULL); + nbl_common_remove_hash_table(pedit_res[idx].pedit_tbl_h, NULL); + } + + return 0; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.h new file mode 100644 index 0000000000000000000000000000000000000000..245a6ac8dde67be81d4fdd5e2b86bbe718d7c9f2 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef __NBL_TC_PEDIT_H__ +#define __NBL_TC_PEDIT_H__ + +#include "nbl_include.h" +#include "nbl_core.h" +#include "nbl_resource.h" + +#define NBL_TC_PEDIT_MAC_LEN 6 +#define NBL_TC_PEDIT_IP6_LEN 16 +#define NBL_TC_PEDIT_KEY_LEN 16 +#define NBL_TC_PEDIT_TAB_LEN 8 + +#define NBL_TC_PEDIT_HW_END_PED_TYPE NBL_FLOW_PED_UMAC_D_TYPE +#define NBL_TC_PEDIT_IS_DEFAULT_TYPE(p_type) ((p_type) < NBL_TC_PEDIT_HW_END_PED_TYPE) +#define NBL_TC_PEDIT_SET_D_TYPE(p_type) ((p_type) += NBL_TC_PEDIT_HW_END_PED_TYPE) +#define NBL_TC_PEDIT_UNSET_D_TYPE(p_type) ((p_type) -= NBL_TC_PEDIT_HW_END_PED_TYPE) + +#define NBL_TC_PEDIT_IP6_PHY_TYPE_GAP (NBL_FLOW_PED_UIP6_TYPE - NBL_FLOW_PED_UIP_TYPE) +#define NBL_TC_PEDIT_GET_IP6_PHY_TYPE(p_type) ((p_type) + NBL_TC_PEDIT_IP6_PHY_TYPE_GAP) + +struct nbl_tc_pedit_node { + u32 ref_cnt:31; + u32 normal_in_h:1; + u32 node_idx:15; + u32 node_base:15; + u32 node_h:1; + u32 node_val:1; + void *entry; + u8 key[]; +}; + +struct nbl_tc_pedit_entry { + struct nbl_tc_pedit_node hnode; + union { + u8 mac[NBL_TC_PEDIT_MAC_LEN]; + u32 ip[2]; + u8 ip6[NBL_TC_PEDIT_IP6_LEN]; + u8 key[NBL_TC_PEDIT_KEY_LEN]; + }; +}; + +#define NBL_TC_PEDIT_SET_NODE_RES_VAL(node) ((node).pedit_val = 1) +#define NBL_TC_PEDIT_SET_NODE_RES_ENTRY(node, idx, e) ((node).pedit_node[idx] = e) + +#define NBL_TC_PEDIT_GET_NODE_RES_VAL(node) ((node).pedit_val) +#define NBL_TC_PEDIT_GET_NODE_RES_ENTRY(node, idx) \ + ((struct nbl_tc_pedit_entry *)(node).pedit_node[idx]) + +#define NBL_TC_PEDIT_GET_KEY(ped_node) ((ped_node)->hnode.key) +#define NBL_TC_PEDIT_GET_NODE_REF(ped_node) ((ped_node)->hnode.ref_cnt) +#define NBL_TC_PEDIT_GET_NODE_H(ped_node) ((ped_node)->hnode.node_h) +#define NBL_TC_PEDIT_GET_NORMAL_IN_H(ped_node) ((ped_node)->hnode.normal_in_h) +#define NBL_TC_PEDIT_GET_NODE_IDX(ped_node) ((ped_node)->hnode.node_idx) +#define NBL_TC_PEDIT_GET_NODE_VAL(ped_node) ((ped_node)->hnode.node_val) + +#define NBL_TC_PEDIT_INC_NODE_REF(ped_node) ((ped_node)->hnode.ref_cnt++) +#define NBL_TC_PEDIT_DEC_NODE_REF(ped_node) ((ped_node)->hnode.ref_cnt--) + +#define NBL_TC_PEDIT_SET_NODE_IDX(ped_node, idx) ((ped_node)->hnode.node_idx = idx) +#define NBL_TC_PEDIT_SET_NODE_BASE_ID(ped_node, idx) ((ped_node)->hnode.node_base = idx) +#define NBL_TC_PEDIT_SET_NODE_VAL(ped_node) ((ped_node)->hnode.node_val = 1) +#define NBL_TC_PEDIT_SET_NODE_INVAL(ped_node) ((ped_node)->hnode.node_val = 0) +#define NBL_TC_PEDIT_SET_NORMAL_IN_H(ped_node) ((ped_node)->hnode.normal_in_h = 1) +#define NBL_TC_PEDIT_SET_NODE_H(ped_node) ((ped_node)->hnode.node_h = 1) +#define NBL_TC_PEDIT_SET_NODE_ENTRY(ped_node, e) ((ped_node)->hnode.entry = e) + +#define NBL_TC_PEDIT_COPY_NODE(src_node, dst_node) ((dst_node)->hnode = (src_node)->hnode) + +u16 nbl_tc_pedit_get_hw_id(struct nbl_tc_pedit_entry *ped_node); +int nbl_tc_pedit_init(struct nbl_tc_pedit_mgt *pedit_mgt); +int nbl_tc_pedit_uninit(struct nbl_tc_pedit_mgt *pedit_mgt); +int nbl_tc_pedit_del_node(struct nbl_tc_pedit_mgt *pedit_mgt, + struct nbl_tc_pedit_node_res *ped_node); +int nbl_tc_pedit_add_node(struct nbl_tc_pedit_mgt *pedit_mgt, + struct nbl_tc_pedit_entry *e, + void **e_out, enum nbl_flow_ped_type pedit_type); +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.c index f1b9f92ac057d70310ac547940cea29c1bb52a77..b24aedd8efdb081578cb3d86387d8328d4fa56ed 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.c @@ -4,22 +4,68 @@ * Author: */ +#include "nbl_txrx.h" #include #include #include #include +#include #include -#include "nbl_txrx.h" +#include + +DEFINE_STATIC_KEY_FALSE(nbl_xdp_locking_key); + +static bool nbl_txrx_within_vsi(struct nbl_txrx_vsi_info *vsi_info, u16 ring_index) +{ + return ring_index >= vsi_info->ring_offset && + ring_index < vsi_info->ring_offset + vsi_info->ring_num; +} + +static struct netdev_queue *txring_txq(const struct nbl_res_tx_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} -int nbl_alloc_tx_rings(struct nbl_resource_mgt *res_mgt, struct net_device *netdev, - u16 tx_num, u16 desc_num) +static struct nbl_res_tx_ring * +nbl_alloc_tx_ring(struct nbl_resource_mgt *res_mgt, struct net_device *netdev, u16 ring_index, + u16 desc_num) { struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); struct nbl_res_tx_ring *ring; + + ring = devm_kzalloc(dev, sizeof(struct nbl_res_tx_ring), GFP_KERNEL); + if (!ring) + return NULL; + + ring->vsi_info = txrx_mgt->vsi_info; + ring->dma_dev = common->dma_dev; + ring->product_type = common->product_type; + ring->eth_id = common->eth_id; + ring->queue_index = ring_index; + ring->notify_addr = phy_ops->get_tail_ptr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + ring->notify_qid = NBL_RES_NOFITY_QID(res_mgt, ring_index * 2 + 1); + ring->netdev = netdev; + ring->desc_num = desc_num; + ring->used_wrap_counter = 1; + ring->avail_used_flags |= BIT(NBL_PACKED_DESC_F_AVAIL); + + if (res_mgt->resource_info->eswitch_info) + ring->mode = res_mgt->resource_info->eswitch_info->mode; + + return ring; +} + +static int nbl_alloc_tx_rings(struct nbl_resource_mgt *res_mgt, struct net_device *netdev, + u16 tx_num, u16 desc_num) +{ + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_res_tx_ring *ring; u32 ring_index; if (txrx_mgt->tx_rings) { @@ -32,26 +78,19 @@ int nbl_alloc_tx_rings(struct nbl_resource_mgt *res_mgt, struct net_device *netd txrx_mgt->tx_rings = devm_kcalloc(dev, tx_num, sizeof(struct nbl_res_tx_ring *), GFP_KERNEL); - if (!txrx_mgt->tx_rings) + if (!txrx_mgt->tx_rings) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "Allocate the tx rings array failed\n"); return -ENOMEM; + } for (ring_index = 0; ring_index < tx_num; ring_index++) { ring = txrx_mgt->tx_rings[ring_index]; WARN_ON(ring); - ring = devm_kzalloc(dev, sizeof(struct nbl_res_tx_ring), GFP_KERNEL); + ring = nbl_alloc_tx_ring(res_mgt, netdev, ring_index, desc_num); if (!ring) goto alloc_tx_ring_failed; - ring->dma_dev = common->dma_dev; - ring->product_type = common->product_type; - ring->eth_id = common->eth_id; - ring->queue_index = ring_index; - ring->notify_addr = phy_ops->get_tail_ptr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); - ring->notify_qid = NBL_RES_NOFITY_QID(res_mgt, ring_index * 2 + 1); - ring->netdev = netdev; - ring->desc_num = desc_num; - ring->used_wrap_counter = 1; - ring->avail_used_flags |= BIT(NBL_PACKED_DESC_F_AVAIL); WRITE_ONCE(txrx_mgt->tx_rings[ring_index], ring); } @@ -118,8 +157,10 @@ static int nbl_alloc_rx_rings(struct nbl_resource_mgt *res_mgt, struct net_devic ring->notify_qid = NBL_RES_NOFITY_QID(res_mgt, ring_index * 2); ring->netdev = netdev; ring->desc_num = desc_num; - /* TODO: maybe TX buffer length should be determined by other factors */ - ring->buf_len = NBL_RX_BUFSZ - NBL_RX_PAD; + /* RX buffer length is determined by mtu, + * when netdev up we will set buf_len according to its mtu + */ + ring->buf_len = PAGE_SIZE / 2 - NBL_RX_PAD; ring->used_wrap_counter = 1; ring->avail_used_flags |= BIT(NBL_PACKED_DESC_F_AVAIL); @@ -153,13 +194,14 @@ static void nbl_free_rx_rings(struct nbl_resource_mgt *res_mgt) txrx_mgt->rx_rings = NULL; } -static int nbl_alloc_vectors(struct nbl_resource_mgt *res_mgt, u16 num) +static int nbl_alloc_vectors(struct nbl_resource_mgt *res_mgt, u16 total_num, u16 xdp_ring_offset) { struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); struct nbl_res_vector *vector; u32 index; + u16 xdp_ring_num; if (txrx_mgt->vectors) { nbl_err(common, NBL_DEBUG_RESOURCE, @@ -167,11 +209,12 @@ static int nbl_alloc_vectors(struct nbl_resource_mgt *res_mgt, u16 num) return -EINVAL; } - txrx_mgt->vectors = devm_kcalloc(dev, num, sizeof(struct nbl_res_vector *), GFP_KERNEL); + txrx_mgt->vectors = devm_kcalloc(dev, xdp_ring_offset, sizeof(struct nbl_res_vector *), + GFP_KERNEL); if (!txrx_mgt->vectors) return -ENOMEM; - for (index = 0; index < num; index++) { + for (index = 0; index < xdp_ring_offset; index++) { vector = txrx_mgt->vectors[index]; WARN_ON(vector); vector = devm_kzalloc(dev, sizeof(struct nbl_res_vector), GFP_KERNEL); @@ -183,6 +226,12 @@ static int nbl_alloc_vectors(struct nbl_resource_mgt *res_mgt, u16 num) WRITE_ONCE(txrx_mgt->vectors[index], vector); } + xdp_ring_num = total_num - xdp_ring_offset; + for (index = 0; index < xdp_ring_num; index++) { + vector = txrx_mgt->vectors[index]; + vector->xdp_ring = txrx_mgt->tx_rings[index + xdp_ring_offset]; + } + return 0; alloc_vector_failed: @@ -200,7 +249,7 @@ static void nbl_free_vectors(struct nbl_resource_mgt *res_mgt) struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); u16 count, index; - count = txrx_mgt->rx_ring_num; + count = txrx_mgt->xdp_ring_offset; for (index = 0; index < count; index++) { vector = txrx_mgt->vectors[index]; devm_kfree(dev, vector); @@ -209,27 +258,34 @@ static void nbl_free_vectors(struct nbl_resource_mgt *res_mgt) txrx_mgt->vectors = NULL; } -static int nbl_res_txrx_alloc_rings(void *priv, struct net_device *netdev, u16 tx_num, - u16 rx_num, u16 tx_desc_num, u16 rx_desc_num) +static int nbl_res_txrx_alloc_rings(void *priv, struct net_device *netdev, + struct nbl_ring_param *param) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; int err = 0; - err = nbl_alloc_tx_rings(res_mgt, netdev, tx_num, tx_desc_num); + err = nbl_alloc_tx_rings(res_mgt, netdev, param->tx_ring_num, param->queue_size); if (err) return err; - err = nbl_alloc_rx_rings(res_mgt, netdev, rx_num, rx_desc_num); + err = nbl_alloc_rx_rings(res_mgt, netdev, param->rx_ring_num, param->queue_size); if (err) goto alloc_rx_rings_err; - err = nbl_alloc_vectors(res_mgt, rx_num); + err = nbl_alloc_vectors(res_mgt, param->tx_ring_num, param->xdp_ring_offset); if (err) goto alloc_vectors_err; + txrx_mgt->xdp_ring_offset = param->xdp_ring_offset; + txrx_mgt->xdp_ring_num = param->tx_ring_num - param->xdp_ring_offset; + + if (txrx_mgt->xdp_ring_num && num_online_cpus() > txrx_mgt->xdp_ring_num) + static_branch_inc(&nbl_xdp_locking_key); + nbl_info(res_mgt->common, NBL_DEBUG_RESOURCE, - "Alloc rings for %d tx, %d rx, %d tx_desc %d rx_desc\n", - tx_num, rx_num, tx_desc_num, rx_desc_num); + "Alloc rings for %d tx, %d rx, %d xdp_offset, %d desc\n", + param->tx_ring_num, param->rx_ring_num, param->xdp_ring_offset, param->queue_size); return 0; alloc_vectors_err: @@ -242,6 +298,11 @@ static int nbl_res_txrx_alloc_rings(void *priv, struct net_device *netdev, u16 t static void nbl_res_txrx_remove_rings(void *priv) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + + if (txrx_mgt->xdp_ring_num && num_online_cpus() > txrx_mgt->xdp_ring_num && + static_key_enabled(&nbl_xdp_locking_key)) + static_branch_dec(&nbl_xdp_locking_key); nbl_free_vectors(res_mgt); nbl_free_tx_rings(res_mgt); @@ -308,7 +369,8 @@ static inline bool nbl_rx_cache_get(struct nbl_res_rx_ring *rx_ring, struct nbl_ cache->head = (cache->head + 1) & (NBL_MAX_CACHE_SIZE - 1); stats->rx_cache_reuse++; - dma_sync_single_for_device(rx_ring->dma_dev, dma_info->addr, PAGE_SIZE, DMA_FROM_DEVICE); + dma_sync_single_for_device(rx_ring->dma_dev, dma_info->addr, + dma_info->size, DMA_FROM_DEVICE); return true; } @@ -339,7 +401,7 @@ static inline int nbl_get_rx_frag(struct nbl_res_rx_ring *rx_ring, struct nbl_rx int err = 0; /* first buffer alloc page */ - if (buffer->offset == NBL_RX_PAD) + if (buffer->offset == buffer->rx_pad) err = nbl_page_alloc_pool(rx_ring, buffer->di); return err; @@ -379,7 +441,7 @@ static inline bool nbl_alloc_rx_bufs(struct nbl_res_rx_ring *rx_ring, u16 count) if (nbl_get_rx_frag(rx_ring, rx_buf)) break; - for (i = 0; i < NBL_RX_PAGE_PER_FRAGS; i++, rx_desc++, rx_buf++) { + for (i = 0; i < rx_ring->frags_num_per_page; i++, rx_desc++, rx_buf++) { rx_desc->addr = cpu_to_le64(rx_buf->di->addr + rx_buf->offset); rx_desc->len = cpu_to_le32(buf_len); rx_desc->id = cpu_to_le16(next_to_use); @@ -392,9 +454,9 @@ static inline bool nbl_alloc_rx_bufs(struct nbl_res_rx_ring *rx_ring, u16 count) NBL_PACKED_DESC_F_WRITE); } - next_to_use += NBL_RX_PAGE_PER_FRAGS; - rx_ring->tail_ptr += NBL_RX_PAGE_PER_FRAGS; - count -= NBL_RX_PAGE_PER_FRAGS; + next_to_use += rx_ring->frags_num_per_page; + rx_ring->tail_ptr += rx_ring->frags_num_per_page; + count -= rx_ring->frags_num_per_page; if (next_to_use == rx_ring->desc_num) { next_to_use = 0; rx_desc = NBL_RX_DESC(rx_ring, next_to_use); @@ -415,115 +477,23 @@ static inline bool nbl_alloc_rx_bufs(struct nbl_res_rx_ring *rx_ring, u16 count) return !!count; } -static dma_addr_t nbl_res_txrx_start_rx_ring(void *priv, u8 ring_index, bool use_napi) -{ - struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); - struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); - struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); - struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index); - struct nbl_res_vector *vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); - struct page_pool_params pp_params = {0}; - int i, j; - - if (rx_ring->rx_bufs) { - nbl_err(common, NBL_DEBUG_RESOURCE, - "Try to setup a RX ring with buffer management array already allocated\n"); - return (dma_addr_t)NULL; - } - - pp_params.order = 0; - pp_params.flags = 0; - pp_params.pool_size = rx_ring->desc_num; - pp_params.nid = dev_to_node(dev); - pp_params.dev = dev; - pp_params.dma_dir = DMA_FROM_DEVICE; - - rx_ring->page_pool = page_pool_create(&pp_params); - if (IS_ERR(rx_ring->page_pool)) { - nbl_err(common, NBL_DEBUG_RESOURCE, "Page_pool Allocate %u Failed failed\n", - rx_ring->queue_index); - return (dma_addr_t)NULL; - } - - rx_ring->di = kvzalloc_node(array_size(rx_ring->desc_num / NBL_RX_PAGE_PER_FRAGS, - sizeof(struct nbl_dma_info)), - GFP_KERNEL, dev_to_node(dev)); - if (!rx_ring->di) { - nbl_err(common, NBL_DEBUG_RESOURCE, "Dma info Allocate %u Failed failed\n", - rx_ring->queue_index); - goto alloc_di_err; - } - - rx_ring->rx_bufs = devm_kcalloc(dev, rx_ring->desc_num, sizeof(*rx_ring->rx_bufs), - GFP_KERNEL); - if (!rx_ring->rx_bufs) - goto alloc_buffers_err; - - /* Alloc twice memory, and second half is used to back up the desc for desc checking */ - rx_ring->size = ALIGN(rx_ring->desc_num * sizeof(struct nbl_ring_desc), PAGE_SIZE); - rx_ring->desc = dmam_alloc_coherent(dma_dev, rx_ring->size, &rx_ring->dma, - GFP_KERNEL | __GFP_ZERO); - if (!rx_ring->desc) - goto alloc_dma_err; - - rx_ring->next_to_use = 0; - rx_ring->next_to_clean = 0; - rx_ring->tail_ptr = 0; - - j = 0; - for (i = 0; i < rx_ring->desc_num / NBL_RX_PAGE_PER_FRAGS; i++) { - struct nbl_dma_info *di = &rx_ring->di[i]; - struct nbl_rx_buffer *buffer; - int f; - - for (f = 0; f < NBL_RX_PAGE_PER_FRAGS; f++, j++) { - buffer = &rx_ring->rx_bufs[j]; - buffer->di = di; - buffer->offset = NBL_RX_PAD + f * NBL_RX_BUFSZ; - buffer->last_in_page = false; - } - - buffer->last_in_page = true; - } - - if (nbl_alloc_rx_bufs(rx_ring, rx_ring->desc_num - NBL_MAX_BATCH_DESC)) - goto alloc_rx_bufs_err; - - rx_ring->valid = true; - if (use_napi) - vector->started = true; - - nbl_debug(common, NBL_DEBUG_RESOURCE, "Start rx ring %d", ring_index); - return rx_ring->dma; - -alloc_rx_bufs_err: - dmam_free_coherent(dma_dev, rx_ring->size, rx_ring->desc, rx_ring->dma); - rx_ring->desc = NULL; - rx_ring->dma = (dma_addr_t)NULL; -alloc_dma_err: - devm_kfree(dev, rx_ring->rx_bufs); - rx_ring->rx_bufs = NULL; -alloc_buffers_err: - kvfree(rx_ring->di); -alloc_di_err: - page_pool_destroy(rx_ring->page_pool); - rx_ring->size = 0; - return (dma_addr_t)NULL; -} - static void nbl_unmap_and_free_tx_resource(struct nbl_res_tx_ring *ring, struct nbl_tx_buffer *tx_buffer, - bool free_skb, bool in_napi) + bool free, bool in_napi) { struct device *dma_dev = NBL_RING_TO_DMA_DEV(ring); if (tx_buffer->skb) { - if (likely(free_skb)) { - if (in_napi) - napi_consume_skb(tx_buffer->skb, NBL_TX_POLL_WEIGHT); - else - dev_kfree_skb_any(tx_buffer->skb); + if (likely(!nbl_res_txrx_is_xdp_ring(ring))) { + if (likely(free)) { + if (in_napi) + napi_consume_skb(tx_buffer->skb, NBL_TX_POLL_WEIGHT); + else + dev_kfree_skb_any(tx_buffer->skb); + } + } else { + if (likely(free)) + page_frag_free(tx_buffer->raw_buff); } if (dma_unmap_len(tx_buffer, len)) @@ -540,9 +510,13 @@ static void nbl_unmap_and_free_tx_resource(struct nbl_res_tx_ring *ring, DMA_TO_DEVICE); } + kfree(tx_buffer->tls_pkthdr); + tx_buffer->tls_pkthdr = NULL; tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; tx_buffer->page = 0; + tx_buffer->bytecount = 0; + tx_buffer->gso_segs = 0; dma_unmap_len_set(tx_buffer, len, 0); } @@ -578,13 +552,18 @@ static void nbl_res_txrx_stop_tx_ring(void *priv, u8 ring_index) struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); struct nbl_res_tx_ring *tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, ring_index); - struct nbl_res_vector *vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); + struct nbl_res_vector *vector = NULL; - vector->started = false; - /* Flush napi task, to ensue the sched napi finish. So napi will no to access the - * ring memory(wild point), bacause the vector->started has set false. - */ - napi_synchronize(&vector->napi); + if (!nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_XDP], ring_index)) + vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); + + if (vector) { + vector->started = false; + /* Flush napi task, to ensue the sched napi finish. So napi will no to access the + * ring memory(wild point), bacause the vector->started has set false. + */ + napi_synchronize(&vector->nbl_napi.napi); + } tx_ring->valid = false; @@ -599,10 +578,18 @@ static void nbl_res_txrx_stop_tx_ring(void *priv, u8 ring_index) tx_ring->dma = (dma_addr_t)NULL; tx_ring->size = 0; + if (nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_DATA], tx_ring->queue_index)) + netdev_tx_reset_queue(txring_txq(tx_ring)); + nbl_debug(res_mgt->common, NBL_DEBUG_RESOURCE, "Stop tx ring %d", ring_index); } -static inline bool nbl_rx_cache_put(struct nbl_res_rx_ring *rx_ring, struct nbl_dma_info *dma_info) +static inline bool nbl_dev_page_is_reusable(struct page *page, u8 nid) +{ + return likely(page_to_nid(page) == nid && !page_is_pfmemalloc(page)); +} + +static inline int nbl_rx_cache_put(struct nbl_res_rx_ring *rx_ring, struct nbl_dma_info *dma_info) { struct nbl_page_cache *cache = &rx_ring->page_cache; u32 tail_next = (cache->tail + 1) & (NBL_MAX_CACHE_SIZE - 1); @@ -610,34 +597,42 @@ static inline bool nbl_rx_cache_put(struct nbl_res_rx_ring *rx_ring, struct nbl_ if (tail_next == cache->head) { stats->rx_cache_full++; - return false; + return 0; } - if (!dev_page_is_reusable(dma_info->page)) { + if (!nbl_dev_page_is_reusable(dma_info->page, rx_ring->nid)) { stats->rx_cache_waive++; - return false; + return 1; } cache->page_cache[cache->tail] = *dma_info; cache->tail = tail_next; - return true; + return 2; } static inline void nbl_page_release_dynamic(struct nbl_res_rx_ring *rx_ring, struct nbl_dma_info *dma_info, bool recycle) { + u32 ret; + if (likely(recycle)) { - if (nbl_rx_cache_put(rx_ring, dma_info)) + ret = nbl_rx_cache_put(rx_ring, dma_info); + if (ret == 2) return; - dma_unmap_page_attrs(rx_ring->dma_dev, dma_info->addr, PAGE_SIZE, + if (ret == 1) + goto free_page; + dma_unmap_page_attrs(rx_ring->dma_dev, dma_info->addr, dma_info->size, DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); page_pool_recycle_direct(rx_ring->page_pool, dma_info->page); - } else { - dma_unmap_page_attrs(rx_ring->dma_dev, dma_info->addr, PAGE_SIZE, - DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); - page_pool_put_page(rx_ring->page_pool, dma_info->page, PAGE_SIZE, true); + + return; } + +free_page: + + dma_unmap_page_attrs(rx_ring->dma_dev, dma_info->addr, dma_info->size, + DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); } static inline void nbl_put_rx_frag(struct nbl_res_rx_ring *rx_ring, @@ -682,6 +677,150 @@ static void nbl_free_rx_ring_bufs(struct nbl_res_rx_ring *rx_ring) memset(rx_ring->desc, 0, rx_ring->size); } +static dma_addr_t nbl_res_txrx_start_rx_ring(void *priv, u8 ring_index, bool use_napi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index); + struct nbl_res_vector *vector = NULL; + struct page_pool_params pp_params = {0}; + int pkt_len_shift = 0; + int pkt_len = 0, order = 0; + int dma_size = 0, buf_size = 0; + int i, j; + u16 rx_pad, tailroom; + + if (rx_ring->rx_bufs) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "Try to setup a RX ring with buffer management array already allocated\n"); + return (dma_addr_t)NULL; + } + + if (!nbl_txrx_within_vsi(&txrx_mgt->vsi_info[NBL_VSI_XDP], ring_index)) + vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); + + rx_pad = NBL_RX_PAD; + tailroom = 0; + if (!!adaptive_rxbuf_len_disable && !rx_ring->xdp_prog) { + buf_size = NBL_RX_BUFSZ; + pkt_len_shift = PAGE_SHIFT - 1; + } else { + pkt_len = rx_pad + ETH_HLEN + (VLAN_HLEN * 2) + rx_ring->netdev->mtu + + tailroom + NBL_BUFFER_HDR_LEN; + pkt_len_shift = ilog2((pkt_len) - 1) + 1; + pkt_len_shift = max(pkt_len_shift, NBL_RXBUF_MIN_ORDER); + buf_size = 1UL << pkt_len_shift; + } + + if (pkt_len_shift >= PAGE_SHIFT) { + order = pkt_len_shift - PAGE_SHIFT; + rx_ring->frags_num_per_page = 1; + } else { + order = 0; + rx_ring->frags_num_per_page = PAGE_SIZE / buf_size; + WARN_ON(rx_ring->frags_num_per_page > NBL_MAX_BATCH_DESC); + } + dma_size = PAGE_SIZE << order; + + rx_ring->buf_len = buf_size - rx_pad - tailroom; + + pp_params.order = order; + pp_params.flags = 0; + pp_params.pool_size = rx_ring->desc_num; + pp_params.nid = dev_to_node(dev); + pp_params.dev = dev; + pp_params.dma_dir = DMA_FROM_DEVICE; + + if (dev_to_node(dev) == NUMA_NO_NODE) + rx_ring->nid = 0; + else + rx_ring->nid = dev_to_node(dev); + + rx_ring->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rx_ring->page_pool)) { + nbl_err(common, NBL_DEBUG_RESOURCE, "Page_pool Allocate %u Failed failed\n", + rx_ring->queue_index); + return (dma_addr_t)NULL; + } + + rx_ring->di = kvzalloc_node(array_size(rx_ring->desc_num / rx_ring->frags_num_per_page, + sizeof(struct nbl_dma_info)), + GFP_KERNEL, dev_to_node(dev)); + if (!rx_ring->di) { + nbl_err(common, NBL_DEBUG_RESOURCE, "Dma info Allocate %u Failed failed\n", + rx_ring->queue_index); + goto alloc_di_err; + } + + rx_ring->rx_bufs = devm_kcalloc(dev, rx_ring->desc_num, sizeof(*rx_ring->rx_bufs), + GFP_KERNEL); + if (!rx_ring->rx_bufs) { + goto alloc_buffers_err; + } + + /* Alloc twice memory, and second half is used to back up the desc for desc checking */ + rx_ring->size = ALIGN(rx_ring->desc_num * sizeof(struct nbl_ring_desc), PAGE_SIZE); + rx_ring->desc = dmam_alloc_coherent(dma_dev, rx_ring->size, &rx_ring->dma, + GFP_KERNEL | __GFP_ZERO); + if (!rx_ring->desc) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "Allocate %u bytes descriptor DMA memory for RX queue %u failed\n", + rx_ring->size, rx_ring->queue_index); + goto alloc_dma_err; + } + + rx_ring->next_to_use = 0; + rx_ring->next_to_clean = 0; + rx_ring->tail_ptr = 0; + + j = 0; + for (i = 0; i < rx_ring->desc_num / rx_ring->frags_num_per_page; i++) { + struct nbl_dma_info *di = &rx_ring->di[i]; + struct nbl_rx_buffer *buffer = &rx_ring->rx_bufs[j]; + int f; + + di->size = dma_size; + for (f = 0; f < rx_ring->frags_num_per_page; f++, j++) { + buffer = &rx_ring->rx_bufs[j]; + buffer->di = di; + buffer->size = buf_size; + buffer->offset = rx_pad + f * buf_size; + buffer->rx_pad = rx_pad; + buffer->last_in_page = false; + } + + buffer->last_in_page = true; + } + + if (nbl_alloc_rx_bufs(rx_ring, rx_ring->desc_num - NBL_MAX_BATCH_DESC)) + goto alloc_rx_bufs_err; + + rx_ring->valid = true; + if (use_napi && vector) + vector->started = true; + + nbl_debug(common, NBL_DEBUG_RESOURCE, "Start rx ring %d", ring_index); + return rx_ring->dma; + +alloc_rx_bufs_err: + nbl_free_rx_ring_bufs(rx_ring); + dmam_free_coherent(dma_dev, rx_ring->size, rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + rx_ring->dma = (dma_addr_t)NULL; +alloc_dma_err: + devm_kfree(dev, rx_ring->rx_bufs); + rx_ring->rx_bufs = NULL; +alloc_buffers_err: + kvfree(rx_ring->di); +alloc_di_err: + page_pool_destroy(rx_ring->page_pool); + rx_ring->size = 0; + return (dma_addr_t)NULL; +} + static void nbl_res_txrx_stop_rx_ring(void *priv, u8 ring_index) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; @@ -721,11 +860,86 @@ static inline bool nbl_ring_desc_used(struct nbl_ring_desc *ring_desc, bool used return avail == used && used == used_wrap_counter; } -static int nbl_res_txrx_clean_tx_irq(struct nbl_res_tx_ring *tx_ring) +static inline void nbl_rep_update_tx_stats(struct net_device *netdev, struct nbl_tx_buffer *buffer) { - struct nbl_tx_buffer *tx_buffer; - struct nbl_ring_desc *tx_desc; - unsigned int i = tx_ring->next_to_clean; + struct nbl_resource_mgt *res_mgt = + NBL_ADAPTER_TO_RES_MGT(NBL_NETDEV_TO_ADAPTER(netdev)); + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + u16 rep_data_index = 0, rep_vsi_id; + + if (!eswitch_info || eswitch_info->mode != NBL_ESWITCH_OFFLOADS) + return; + + if (!buffer->skb) + return; + + rep_vsi_id = *(u16 *)&buffer->skb->cb[NBL_SKB_FILL_VSI_ID_OFF]; + rep_data_index = nbl_res_get_rep_idx(eswitch_info, rep_vsi_id); + if (rep_data_index >= eswitch_info->num_vfs) + return; + + if (eswitch_info->rep_data[rep_data_index].rep_vsi_id == rep_vsi_id) { + u64_stats_update_begin(&eswitch_info->rep_data[rep_data_index].rep_syncp); + eswitch_info->rep_data[rep_data_index].tx_packets += buffer->gso_segs; + eswitch_info->rep_data[rep_data_index].tx_bytes += buffer->bytecount; + u64_stats_update_end(&eswitch_info->rep_data[rep_data_index].rep_syncp); + } +} + +static struct net_device *nbl_get_rep_netdev(struct nbl_resource_mgt *res_mgt, u16 rep_vsi_id) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + u16 rep_data_index = 0; + + rep_data_index = nbl_res_get_rep_idx(eswitch_info, rep_vsi_id); + if (rep_data_index >= eswitch_info->num_vfs) + return NULL; + if (eswitch_info->rep_data[rep_data_index].rep_vsi_id == rep_vsi_id) + return eswitch_info->rep_data[rep_data_index].netdev; + nbl_info(common, NBL_DEBUG_RESOURCE, "get rep netdev error rep_vsi_id:%d\n", rep_vsi_id); + return NULL; +} + +static inline void nbl_rep_update_rx_stats(struct net_device *netdev, + struct sk_buff *skb, u16 sport_id) +{ + struct nbl_resource_mgt *res_mgt = + NBL_ADAPTER_TO_RES_MGT(NBL_NETDEV_TO_ADAPTER(netdev)); + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct net_device *rep_netdev = NULL; + u16 rep_data_index = 0; + + if (!eswitch_info || eswitch_info->mode != NBL_ESWITCH_OFFLOADS) + return; + + rep_data_index = nbl_res_get_rep_idx(eswitch_info, sport_id); + if (rep_data_index >= eswitch_info->num_vfs) + return; + + rep_netdev = nbl_get_rep_netdev(res_mgt, sport_id); + if (!rep_netdev) { + /* it's a common case when switchdev mode is opening */ + nbl_info(common, NBL_DEBUG_RESOURCE, + "rep update netdev fail. sport_id:%d\n", sport_id); + return; + } + skb->dev = rep_netdev; + + if (eswitch_info->rep_data[rep_data_index].rep_vsi_id == sport_id) { + u64_stats_update_begin(&eswitch_info->rep_data[rep_data_index].rep_syncp); + eswitch_info->rep_data[rep_data_index].rx_packets += 1; + eswitch_info->rep_data[rep_data_index].rx_bytes += skb->len; + u64_stats_update_end(&eswitch_info->rep_data[rep_data_index].rep_syncp); + } +} + +static int nbl_res_txrx_clean_tx_irq(struct nbl_res_tx_ring *tx_ring) +{ + struct nbl_tx_buffer *tx_buffer; + struct nbl_ring_desc *tx_desc; + unsigned int i = tx_ring->next_to_clean; unsigned int total_tx_pkts = 0; unsigned int total_tx_bytes = 0; unsigned int total_tx_descs = 0; @@ -750,6 +964,9 @@ static int nbl_res_txrx_clean_tx_irq(struct nbl_res_tx_ring *tx_ring) total_tx_pkts += tx_buffer->gso_segs; total_tx_bytes += tx_buffer->bytecount; + if (nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_CTRL], tx_ring->queue_index)) + nbl_rep_update_tx_stats(tx_ring->netdev, tx_buffer); + while (true) { total_tx_descs++; nbl_unmap_and_free_tx_resource(tx_ring, tx_buffer, true, true); @@ -792,7 +1009,7 @@ static int nbl_res_txrx_clean_tx_irq(struct nbl_res_tx_ring *tx_ring) #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(total_tx_pkts && netif_carrier_ok(tx_ring->netdev) && - tx_ring->queue_index < NBL_DEFAULT_PF_HW_QUEUE_NUM && + nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_DATA], tx_ring->queue_index) && (nbl_unused_tx_desc_count(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. @@ -835,7 +1052,100 @@ static inline void nbl_add_rx_frag(struct nbl_rx_buffer *rx_buffer, { page_ref_inc(rx_buffer->di->page); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->di->page, - rx_buffer->offset, size, NBL_RX_BUFSZ); + rx_buffer->offset, size, rx_buffer->size); +} + +static void nbl_resync_update_sn(struct net_device *netdev, struct sk_buff *skb, u16 offset) +{ + struct ethhdr *eth = (struct ethhdr *)(skb->data); + struct net *net = dev_net(netdev); + struct sock *sk; + struct tls_context *tls_ctx; + struct nbl_ktls_offload_context_rx **ctx; + struct nbl_ktls_offload_context_rx *priv; + struct iphdr *iph; + struct tcphdr *th; + int depth = 0; + __be32 seq; + + skb->mac_len = ETH_HLEN; + (void)__vlan_get_protocol(skb, eth->h_proto, &depth); + iph = (struct iphdr *)(skb->data + depth); + + if (iph->version == 4) { + depth += iph->ihl * 4; + th = (void *)iph + iph->ihl * 4; + + sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, + iph->saddr, th->source, iph->daddr, + th->dest, netdev->ifindex); + } else { + struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph; + + depth += sizeof(struct ipv6hdr); + th = (void *)ipv6h + sizeof(struct ipv6hdr); + + sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, + &ipv6h->saddr, th->source, + &ipv6h->daddr, ntohs(th->dest), + netdev->ifindex, 0); + } + + depth += th->doff * 4; + if (unlikely(!sk)) + return; + + if (unlikely(sk->sk_state == TCP_TIME_WAIT)) + goto unref; + seq = th->seq; + seq = htonl(ntohl(seq) + offset - depth - 1); + tls_offload_rx_resync_request(sk, seq); + tls_ctx = tls_get_ctx(sk); + ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); + priv = *ctx; + priv->tcp_seq = ntohl(th->seq); + +unref: + sock_gen_put(sk); +} + +static int nbl_ktls_rx_handle_skb(struct nbl_res_rx_ring *rx_ring, struct sk_buff *skb, + struct nbl_rx_extend_head *hdr) +{ + if (!hdr->l4s_hdl_ind) + return 0; + + if (hdr->l4s_dec_ind) { + skb->decrypted = 1; + rx_ring->rx_stats.tls_decrypted_packets++; + } else if (hdr->l4s_resync_ind) { + rx_ring->rx_stats.tls_resync_req_num++; + nbl_resync_update_sn(rx_ring->netdev, skb, hdr->l4s_tcp_offset); + dev_dbg(NBL_RING_TO_DEV(rx_ring), "ingress ktls %u resync sn\n", hdr->l4s_sid); + } else if (!hdr->l4s_check_ind) { + dev_dbg(NBL_RING_TO_DEV(rx_ring), "ingress ktls %u auth fail\n", hdr->l4s_sid); + } else { + dev_err(NBL_RING_TO_DEV(rx_ring), "ingress ktls %u unknown error\n", hdr->l4s_sid); + } + + return 0; +} + +static inline int nbl_rx_vlan_pop(struct nbl_res_rx_ring *rx_ring, struct sk_buff *skb) +{ + struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; + + if (!rx_ring->vlan_proto) + return 0; + + if (rx_ring->vlan_proto != ntohs(veth->h_vlan_proto) || + (rx_ring->vlan_tci & VLAN_VID_MASK) != (ntohs(veth->h_vlan_TCI) & VLAN_VID_MASK)) + return 1; + + memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); + __skb_pull(skb, VLAN_HLEN); + + return 0; } static void nbl_txrx_register_vsi_ring(void *priv, u16 vsi_index, u16 ring_offset, u16 ring_num) @@ -847,35 +1157,59 @@ static void nbl_txrx_register_vsi_ring(void *priv, u16 vsi_index, u16 ring_offse txrx_mgt->vsi_info[vsi_index].ring_num = ring_num; } +static void nbl_res_txrx_cfg_txrx_vlan(void *priv, u16 vlan_tci, u16 vlan_proto, u8 vsi_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_txrx_vsi_info *vsi_info = &txrx_mgt->vsi_info[vsi_index]; + struct nbl_res_tx_ring *tx_ring; + struct nbl_res_rx_ring *rx_ring; + u16 i; + + if (!txrx_mgt->tx_rings || !txrx_mgt->rx_rings) + return; + + for (i = vsi_info->ring_offset; i < vsi_info->ring_offset + vsi_info->ring_num; i++) { + tx_ring = txrx_mgt->tx_rings[i]; + rx_ring = txrx_mgt->rx_rings[i]; + + if (tx_ring) { + tx_ring->vlan_tci = vlan_tci; + tx_ring->vlan_proto = vlan_proto; + } + + if (rx_ring) { + rx_ring->vlan_tci = vlan_tci; + rx_ring->vlan_proto = vlan_proto; + } + } +} + /** * Current version support merging multiple descriptor for one packet. */ static struct sk_buff *nbl_construct_skb(struct nbl_res_rx_ring *rx_ring, struct napi_struct *napi, - struct nbl_rx_buffer *rx_buf, unsigned int size) + struct nbl_rx_buffer *rx_buf, struct xdp_buff *xdp) { struct sk_buff *skb; - char *p, *buf; int tailroom, shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - unsigned int truesize = NBL_RX_BUFSZ; + unsigned int truesize = rx_buf->size; unsigned int headlen; + unsigned int size = xdp->data_end - xdp->data; + u8 metasize = xdp->data - xdp->data_meta; - /* p point dma buff start, buf point whole buffer start*/ - p = page_address(rx_buf->di->page) + rx_buf->offset; - buf = p - NBL_RX_PAD; - /* p point pkt start */ - p += NBL_BUFFER_HDR_LEN; - tailroom = truesize - size - NBL_RX_PAD; - size -= NBL_BUFFER_HDR_LEN; + tailroom = truesize - size - rx_buf->rx_pad - NBL_BUFFER_HDR_LEN; if (size > NBL_RX_HDR_SIZE && tailroom >= shinfo_size) { - skb = build_skb(buf, truesize); + skb = build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; page_ref_inc(rx_buf->di->page); - skb_reserve(skb, p - buf); - skb_put(skb, size); + skb_reserve(skb, xdp->data - xdp->data_hard_start); + skb_put(skb, xdp->data_end - xdp->data); + skb_metadata_set(skb, metasize); goto ok; } @@ -885,9 +1219,9 @@ static struct sk_buff *nbl_construct_skb(struct nbl_res_rx_ring *rx_ring, struct headlen = size; if (headlen > NBL_RX_HDR_SIZE) - headlen = eth_get_headlen(skb->dev, p, NBL_RX_HDR_SIZE); + headlen = eth_get_headlen(skb->dev, xdp->data, NBL_RX_HDR_SIZE); - memcpy(__skb_put(skb, headlen), p, ALIGN(headlen, sizeof(long))); + memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long))); size -= headlen; if (size) { page_ref_inc(rx_buf->di->page); @@ -930,24 +1264,341 @@ static inline void nbl_put_rx_buf(struct nbl_res_rx_ring *rx_ring, struct nbl_rx nbl_put_rx_frag(rx_ring, rx_buf, true); } +static inline int nbl_maybe_stop_tx(struct nbl_res_tx_ring *tx_ring, unsigned int size) +{ + if (likely(nbl_unused_tx_desc_count(tx_ring) >= size)) + return 0; + + if (!nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_DATA], tx_ring->queue_index)) + return -EBUSY; + + dev_dbg(NBL_RING_TO_DEV(tx_ring), "unused_desc_count:%u, size:%u, stop queue %u\n", + nbl_unused_tx_desc_count(tx_ring), size, tx_ring->queue_index); + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* smp_mb */ + smp_mb(); + + if (likely(nbl_unused_tx_desc_count(tx_ring) < size)) + return -EBUSY; + + dev_dbg(NBL_RING_TO_DEV(tx_ring), "unused_desc_count:%u, size:%u, start queue %u\n", + nbl_unused_tx_desc_count(tx_ring), size, tx_ring->queue_index); + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + + return 0; +} + +static int nbl_res_txrx_xmit_xdp_ring(struct nbl_res_tx_ring *xdp_ring, struct xdp_frame *xdpf) +{ + u16 index = xdp_ring->next_to_use; + u16 avail_used_flags = xdp_ring->avail_used_flags; + unsigned int size; + dma_addr_t dma; + union nbl_tx_extend_head *hdr; + struct device *dma_dev = NBL_RING_TO_DMA_DEV(xdp_ring); + struct nbl_tx_buffer *tx_buffer = NBL_TX_BUF(xdp_ring, index); + struct nbl_ring_desc *tx_desc = NBL_TX_DESC(xdp_ring, index); + const struct ethhdr *eth; + + if (xdpf->headroom < sizeof(union nbl_tx_extend_head)) + return -EOVERFLOW; + + if (unlikely(nbl_maybe_stop_tx(xdp_ring, 1))) { + xdp_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + size = xdpf->len; + eth = (struct ethhdr *)xdpf->data; + xdpf->headroom -= sizeof(union nbl_tx_extend_head); + xdpf->data -= sizeof(union nbl_tx_extend_head); + hdr = xdpf->data; + memset(hdr, 0, sizeof(union nbl_tx_extend_head)); + hdr->fwd = NBL_TX_FWD_TYPE_NORMAL; + xdpf->len += sizeof(union nbl_tx_extend_head); + dma = dma_map_single(dma_dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, dma)) { + xdp_ring->tx_stats.tx_dma_busy++; + return NETDEV_TX_BUSY; + } + + dma_unmap_addr_set(tx_buffer, dma, dma); + dma_unmap_len_set(tx_buffer, len, xdpf->len); + tx_buffer->raw_buff = xdpf->data; + tx_buffer->gso_segs = 1; + tx_buffer->bytecount = size; + tx_desc->addr = cpu_to_le64(dma); + tx_desc->len = xdpf->len; + tx_desc->id = 0; + index++; + if (index == xdp_ring->desc_num) { + index = 0; + xdp_ring->avail_used_flags ^= + 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + } + + /* todo:xdp add multicast case */ + xdp_ring->tx_stats.tx_unicast_packets++; + tx_buffer->next_to_watch = tx_desc; + + /* wmb */ + wmb(); + + xdp_ring->next_to_use = index; + tx_desc->flags = cpu_to_le16(avail_used_flags); + + return NETDEV_TX_OK; +} + +static int nbl_res_txrx_xmit_xdp_buff(struct nbl_res_rx_ring *rx_ring, struct xdp_buff *xdp_buff) +{ + int ret; + struct nbl_res_tx_ring *xdp_ring; + struct xdp_frame *xdpf; + struct nbl_txrx_mgt *txrx_mgt = rx_ring->txrx_mgt; + + xdpf = xdp_convert_buff_to_frame(xdp_buff); + if (unlikely(!xdpf)) + goto buff_to_frame_failed; + + xdp_ring = nbl_res_txrx_select_xdp_ring(txrx_mgt); + if (static_branch_unlikely(&nbl_xdp_locking_key)) + spin_lock(&xdp_ring->xmit_lock); + + ret = nbl_res_txrx_xmit_xdp_ring(xdp_ring, xdpf); + if (static_branch_unlikely(&nbl_xdp_locking_key)) + spin_unlock(&xdp_ring->xmit_lock); + + return ret; +buff_to_frame_failed: + return -1; +} + +static int +nbl_res_txrx_run_xdp(struct nbl_res_rx_ring *rx_ring, struct nbl_rx_buffer *rx_buf, + struct nbl_xdp_output *xdp_output, struct xdp_buff *xdp_buff) +{ + struct nbl_rx_extend_head *hdr; + struct nbl_ring_desc *rx_desc; + const struct ethhdr *eth; + int i; + int err; + enum xdp_action act; + int nbl_act; + u16 num_buffers = 0; + + hdr = xdp_buff->data - NBL_BUFFER_HDR_LEN; + net_prefetch(hdr); + num_buffers = le16_to_cpu(hdr->num_buffers); + + /* receive xdp only support one desc for one packet */ + if (num_buffers > 1) + goto drop_big_packet; + + xdp_output->bytes = xdp_buff->data_end - xdp_buff->data; + eth = (struct ethhdr *)(hdr + 1); + if (unlikely(is_multicast_ether_addr(eth->h_dest))) + xdp_output->flags |= NBL_XDP_FLAG_MULTICAST; + + xdp_output->desc_done_num++; + xdp_init_buff(xdp_buff, rx_buf->size, &rx_ring->xdp_rxq); + act = bpf_prog_run_xdp(rx_ring->xdp_prog, xdp_buff); + switch (act) { + case XDP_PASS: + nbl_act = 0; + break; + case XDP_TX: + nbl_act = 1; + page_ref_inc(rx_buf->di->page); + err = nbl_res_txrx_xmit_xdp_buff(rx_ring, xdp_buff); + if (unlikely(err)) { + page_ref_dec(rx_buf->di->page); + goto xdp_aborted; + } + + xdp_output->flags |= NBL_XDP_FLAG_TX; + break; + case XDP_REDIRECT: + nbl_act = 1; + page_ref_inc(rx_buf->di->page); + err = xdp_do_redirect(rx_ring->netdev, xdp_buff, rx_ring->xdp_prog); + if (unlikely(err)) { + page_ref_dec(rx_buf->di->page); + goto xdp_aborted; + } + + xdp_output->flags |= NBL_XDP_FLAG_REDIRECT; + break; + default: + bpf_warn_invalid_xdp_action(rx_ring->netdev, rx_ring->xdp_prog, act); + fallthrough; + case XDP_ABORTED: +xdp_aborted: + trace_xdp_exception(rx_ring->netdev, rx_ring->xdp_prog, act); + fallthrough; + case XDP_DROP: + xdp_output->flags |= NBL_XDP_FLAG_DROP; + nbl_act = 1; + break; + } + + if (nbl_act) + nbl_put_rx_buf(rx_ring, rx_buf); + + return nbl_act; + +drop_big_packet: + nbl_put_rx_buf(rx_ring, rx_buf); + xdp_output->desc_done_num++; + xdp_output->flags |= NBL_XDP_FLAG_OVERSIZE; + for (i = 1; i < num_buffers; i++) { + rx_desc = NBL_RX_DESC(rx_ring, rx_ring->next_to_clean); + if (!nbl_ring_desc_used(rx_desc, rx_ring->used_wrap_counter)) + break; + + dma_rmb(); + xdp_output->bytes += le32_to_cpu(rx_desc->len); + xdp_output->desc_done_num++; + rx_buf = nbl_get_rx_buf(rx_ring); + nbl_put_rx_buf(rx_ring, rx_buf); + } + + return 1; +} + +static int +nbl_res_txrx_xdp_xmit(struct net_device *netdev, int n, struct xdp_frame **frame, u32 flags) +{ + int ret; + int i; + int nxmit = 0; + struct nbl_res_tx_ring *xdp_ring; + struct nbl_resource_mgt *res_mgt = + NBL_ADAPTER_TO_RES_MGT(NBL_NETDEV_TO_ADAPTER(netdev)); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + xdp_ring = nbl_res_txrx_select_xdp_ring(txrx_mgt); + if (unlikely(!xdp_ring)) + return -ENXIO; + + if (unlikely(!xdp_ring->valid)) + return -ENETDOWN; + + if (unlikely(!nbl_res_txrx_is_xdp_ring(xdp_ring))) + return -ENXIO; + + if (static_branch_unlikely(&nbl_xdp_locking_key)) + spin_lock(&xdp_ring->xmit_lock); + + for (i = 0; i < n; i++) { + ret = nbl_res_txrx_xmit_xdp_ring(xdp_ring, frame[i]); + if (ret) + break; + + nxmit++; + } + + if (unlikely(flags & XDP_XMIT_FLUSH && nxmit)) + writel(xdp_ring->notify_qid, xdp_ring->notify_addr); + + if (static_branch_unlikely(&nbl_xdp_locking_key)) + spin_unlock(&xdp_ring->xmit_lock); + + return nxmit; +} + +static void +nbl_res_txrx_update_xdp_tail_locked(struct nbl_res_rx_ring *rx_ring) +{ + struct nbl_res_tx_ring *xdp_ring; + struct nbl_txrx_mgt *txrx_mgt = rx_ring->txrx_mgt; + + xdp_ring = nbl_res_txrx_select_xdp_ring(txrx_mgt); + if (static_branch_unlikely(&nbl_xdp_locking_key)) + spin_lock(&xdp_ring->xmit_lock); + + writel(xdp_ring->notify_qid, xdp_ring->notify_addr); + + if (static_branch_unlikely(&nbl_xdp_locking_key)) + spin_unlock(&xdp_ring->xmit_lock); +} + +static int nbl_res_txrx_register_xdp_rxq(void *priv, u8 ring_index) +{ + int err; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_res_vector *vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); + struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index); + + err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->queue_index, + vector->nbl_napi.napi.napi_id); + if (err < 0) { + nbl_err(common, NBL_DEBUG_RESOURCE, "Register xdp rxq err\n"); + return -1; + } + + err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); + if (err < 0) { + nbl_err(common, NBL_DEBUG_RESOURCE, "Register xdp rxq mem model err\n"); + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + return -1; + } + + return 0; +} + +static void nbl_res_txrx_unregister_xdp_rxq(void *priv, u8 ring_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index); + + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); +} + +static inline void nbl_res_txrx_build_xdp_buff(struct nbl_rx_buffer *rx_buf, + struct nbl_ring_desc *rx_desc, + struct xdp_buff *xdp) +{ + char *p, *buf; + u32 size; + + p = page_address(rx_buf->di->page) + rx_buf->offset; + buf = p - rx_buf->rx_pad; + size = rx_desc->len - NBL_BUFFER_HDR_LEN; + xdp_prepare_buff(xdp, buf, rx_buf->rx_pad + NBL_BUFFER_HDR_LEN, size, true); +} static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, struct napi_struct *napi, int budget) { + struct nbl_xdp_output xdp_output; + struct xdp_buff xdp; struct nbl_ring_desc *rx_desc; struct nbl_rx_buffer *rx_buf; struct nbl_rx_extend_head *hdr; struct sk_buff *skb = NULL; unsigned int total_rx_pkts = 0; unsigned int total_rx_bytes = 0; + unsigned int xdp_tx_pkts = 0; + unsigned int xdp_redirect_pkts = 0; + unsigned int xdp_oversize = 0; + unsigned int xdp_drop = 0; unsigned int size; - u16 desc_count = 0; - u16 num_buffers = 0; + int nbl_act; u32 rx_multicast_packets = 0; u32 rx_unicast_packets = 0; + u16 desc_count = 0; + u16 num_buffers = 0; u16 cleaned_count = nbl_unused_rx_desc_count(rx_ring); - u16 sport_id; + u16 sport_id, sport_type; bool failure = 0; + bool drop = 0; while (likely(total_rx_pkts < budget)) { rx_desc = NBL_RX_DESC(rx_ring, rx_ring->next_to_clean); @@ -960,6 +1611,29 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, size = le32_to_cpu(rx_desc->len); rx_buf = nbl_get_rx_buf(rx_ring); + nbl_res_txrx_build_xdp_buff(rx_buf, rx_desc, &xdp); + + if (READ_ONCE(rx_ring->xdp_prog)) { + memset(&xdp_output, 0, sizeof(xdp_output)); + nbl_act = nbl_res_txrx_run_xdp(rx_ring, rx_buf, &xdp_output, &xdp); + if (nbl_act) { + cleaned_count += xdp_output.desc_done_num; + if (unlikely(xdp_output.flags & NBL_XDP_FLAG_MULTICAST)) + rx_multicast_packets++; + else + rx_unicast_packets++; + + xdp_tx_pkts += !!(xdp_output.flags & NBL_XDP_FLAG_TX); + xdp_redirect_pkts += !!(xdp_output.flags & NBL_XDP_FLAG_REDIRECT); + xdp_drop += !!(xdp_output.flags & NBL_XDP_FLAG_DROP); + xdp_oversize += !!(xdp_output.flags & NBL_XDP_FLAG_OVERSIZE); + + total_rx_pkts++; + total_rx_bytes += xdp_output.bytes; + continue; + } + } + desc_count++; if (skb) { @@ -967,7 +1641,7 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, } else { hdr = page_address(rx_buf->di->page) + rx_buf->offset; net_prefetch(hdr); - skb = nbl_construct_skb(rx_ring, napi, rx_buf, size); + skb = nbl_construct_skb(rx_ring, napi, rx_buf, &xdp); if (unlikely(!skb)) { rx_ring->rx_stats.rx_alloc_buf_err_cnt++; break; @@ -975,7 +1649,10 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, num_buffers = le16_to_cpu(hdr->num_buffers); sport_id = hdr->sport_id; + sport_type = hdr->sport; nbl_rx_csum(rx_ring, skb, hdr); + nbl_ktls_rx_handle_skb(rx_ring, skb, hdr); + drop = nbl_rx_vlan_pop(rx_ring, skb); } cleaned_count++; @@ -986,9 +1663,18 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, if (unlikely(eth_skb_pad(skb))) { skb = NULL; + drop = 0; continue; } + if (unlikely(drop)) { + kfree(skb); + skb = NULL; + drop = 0; + continue; + } + + total_rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, rx_ring->netdev); if (unlikely(skb->pkt_type == PACKET_BROADCAST || skb->pkt_type == PACKET_MULTICAST)) @@ -996,14 +1682,21 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, else rx_unicast_packets++; - total_rx_bytes += skb->len; + if (sport_type) + nbl_rep_update_rx_stats(rx_ring->netdev, skb, sport_id); // nbl_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); napi_gro_receive(napi, skb); skb = NULL; + drop = 0; total_rx_pkts++; } + if (xdp_redirect_pkts) + xdp_do_flush(); + + if (xdp_tx_pkts) + nbl_res_txrx_update_xdp_tail_locked(rx_ring); if (cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))) failure = nbl_alloc_rx_bufs(rx_ring, cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))); @@ -1012,6 +1705,10 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, rx_ring->stats.bytes += total_rx_bytes; rx_ring->rx_stats.rx_multicast_packets += rx_multicast_packets; rx_ring->rx_stats.rx_unicast_packets += rx_unicast_packets; + rx_ring->rx_stats.xdp_tx_packets += xdp_tx_pkts; + rx_ring->rx_stats.xdp_redirect_packets += xdp_redirect_pkts; + rx_ring->rx_stats.xdp_oversize_packets += xdp_oversize; + rx_ring->rx_stats.xdp_drop_packets += xdp_drop; u64_stats_update_end(&rx_ring->syncp); return failure ? budget : total_rx_pkts; @@ -1019,20 +1716,26 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, static int nbl_res_napi_poll(struct napi_struct *napi, int budget) { - struct nbl_res_vector *vector = container_of(napi, struct nbl_res_vector, napi); + struct nbl_napi_struct *nbl_napi = container_of(napi, struct nbl_napi_struct, napi); + struct nbl_res_vector *vector = container_of(nbl_napi, struct nbl_res_vector, nbl_napi); struct nbl_res_tx_ring *tx_ring; + struct nbl_res_tx_ring *xdp_ring; struct nbl_res_rx_ring *rx_ring; - int complete = 1, cleaned = 0, tx_done = 1; + int complete = 1, cleaned = 0, tx_done = 1, xdp_done = 1; tx_ring = vector->tx_ring; rx_ring = vector->rx_ring; + xdp_ring = vector->xdp_ring; if (vector->started) { tx_done = nbl_res_txrx_clean_tx_irq(tx_ring); + if (xdp_ring && xdp_ring->valid) + xdp_done = nbl_res_txrx_clean_tx_irq(xdp_ring); + cleaned = nbl_res_txrx_clean_rx_irq(rx_ring, napi, budget); } - if (!tx_done) + if (!tx_done || !xdp_done) complete = 0; if (cleaned >= budget) @@ -1083,31 +1786,6 @@ static unsigned int nbl_xmit_desc_count(struct sk_buff *skb) return count; } -static inline int nbl_maybe_stop_tx(struct nbl_res_tx_ring *tx_ring, unsigned int size) -{ - if (likely(nbl_unused_tx_desc_count(tx_ring) >= size)) - return 0; - - if (tx_ring->queue_index >= NBL_DEFAULT_PF_HW_QUEUE_NUM) - return -EBUSY; - - dev_dbg(NBL_RING_TO_DEV(tx_ring), "unused_desc_count:%u, size:%u, stop queue %u\n", - nbl_unused_tx_desc_count(tx_ring), size, tx_ring->queue_index); - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - - /* smp_mb */ - smp_mb(); - - if (likely(nbl_unused_tx_desc_count(tx_ring) < size)) - return -EBUSY; - - dev_dbg(NBL_RING_TO_DEV(tx_ring), "unused_desc_count:%u, size:%u, start queue %u\n", - nbl_unused_tx_desc_count(tx_ring), size, tx_ring->queue_index); - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - - return 0; -} - /* set up TSO(TCP Segmentation Offload) */ static int nbl_tx_tso(struct nbl_tx_buffer *first, struct nbl_tx_hdr_param *hdr_param) { @@ -1224,149 +1902,681 @@ static int nbl_tx_csum(struct nbl_tx_buffer *first, struct nbl_tx_hdr_param *hdr return -1; } - l3_len = l4.hdr - ip.hdr; + l3_len = l4.hdr - ip.hdr; + + switch (l4_proto) { + case IPPROTO_TCP: + l4_type = NBL_TX_L4T_TCP; + l4_len = l4.tcp->doff; + l4_csum = 1; + break; + case IPPROTO_UDP: + l4_type = NBL_TX_L4T_UDP; + l4_len = (sizeof(struct udphdr) >> 2); + l4_csum = 1; + break; + case IPPROTO_SCTP: + if (first->tx_flags & NBL_TX_FLAGS_TSO) + return -1; + l4_type = NBL_TX_L4T_RSV; + l4_len = (sizeof(struct sctphdr) >> 2); + l4_csum = 1; + break; + default: + if (first->tx_flags & NBL_TX_FLAGS_TSO) + return -2; + + /* unsopported L4 protocol, device cannot offload L4 checksum, + * so software compute L4 checskum + */ + skb_checksum_help(skb); + return 0; + } + + hdr_param->mac_len = l2_len >> 1; + hdr_param->ip_len = l3_len >> 2; + hdr_param->l4_len = l4_len; + hdr_param->l4_type = l4_type; + hdr_param->inner_ip_type = inner_ip_type; + hdr_param->l3_csum_en = 0; + hdr_param->l4_csum_en = l4_csum; + + return 1; +} + +static int nbl_map_skb(struct nbl_res_tx_ring *tx_ring, struct sk_buff *skb, + u16 first, u16 *desc_index) +{ + u16 index = *desc_index; + const skb_frag_t *frag; + unsigned int frag_num = skb_shinfo(skb)->nr_frags; + struct device *dma_dev = NBL_RING_TO_DMA_DEV(tx_ring); + struct nbl_tx_buffer *tx_buffer = NBL_TX_BUF(tx_ring, index); + struct nbl_ring_desc *tx_desc = NBL_TX_DESC(tx_ring, index); + unsigned int i; + unsigned int size; + dma_addr_t dma; + + size = skb_headlen(skb); + dma = dma_map_single(dma_dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, dma)) + return -1; + + tx_buffer->dma = dma; + tx_buffer->len = size; + + tx_desc->addr = cpu_to_le64(dma); + tx_desc->len = size; + if (!first) + tx_desc->flags = cpu_to_le16(tx_ring->avail_used_flags | NBL_PACKED_DESC_F_NEXT); + + index++; + tx_desc++; + tx_buffer++; + if (index == tx_ring->desc_num) { + index = 0; + tx_ring->avail_used_flags ^= + 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + tx_desc = NBL_TX_DESC(tx_ring, 0); + tx_buffer = NBL_TX_BUF(tx_ring, 0); + } + + if (!frag_num) { + *desc_index = index; + return 0; + } + + frag = &skb_shinfo(skb)->frags[0]; + for (i = 0; i < frag_num; i++) { + size = skb_frag_size(frag); + dma = skb_frag_dma_map(dma_dev, frag, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, dma)) { + *desc_index = index; + return -1; + } + + tx_buffer->dma = dma; + tx_buffer->len = size; + tx_buffer->page = 1; + + tx_desc->addr = cpu_to_le64(dma); + tx_desc->len = size; + tx_desc->flags = cpu_to_le16(tx_ring->avail_used_flags | NBL_PACKED_DESC_F_NEXT); + index++; + tx_desc++; + tx_buffer++; + if (index == tx_ring->desc_num) { + index = 0; + tx_ring->avail_used_flags ^= + 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + tx_desc = NBL_TX_DESC(tx_ring, 0); + tx_buffer = NBL_TX_BUF(tx_ring, 0); + } + frag++; + } + + *desc_index = index; + return 0; +} + +static inline void nbl_tx_fill_tx_extend_header_bootis(union nbl_tx_extend_head *pkthdr, + struct nbl_tx_hdr_param *param) +{ + pkthdr->bootis.tso = param->tso; + pkthdr->bootis.mss = param->mss; + pkthdr->bootis.dport_info = 0; + pkthdr->bootis.dport_id = param->dport_id; + pkthdr->bootis.dport = NBL_TX_DPORT_ETH; + /* 0x0: drop, 0x1: normal fwd, 0x2: rsv, 0x3: cpu set dport */ + pkthdr->bootis.fwd = NBL_TX_FWD_TYPE_CPU_ASSIGNED; + pkthdr->bootis.rss_lag_en = param->rss_lag_en; + + pkthdr->bootis.mac_len = param->mac_len; + pkthdr->bootis.ip_len = param->ip_len; + pkthdr->bootis.inner_ip_type = param->inner_ip_type; + pkthdr->bootis.l3_csum_en = param->l3_csum_en; + + pkthdr->bootis.l4_len = param->l4_len; + pkthdr->bootis.l4_type = param->l4_type; + pkthdr->bootis.l4_csum_en = param->l4_csum_en; +} + +static inline void nbl_tx_fill_tx_extend_header_leonis(union nbl_tx_extend_head *pkthdr, + struct nbl_tx_hdr_param *param) +{ + pkthdr->mac_len = param->mac_len; + pkthdr->ip_len = param->ip_len; + pkthdr->l4_len = param->l4_len; + pkthdr->l4_type = param->l4_type; + pkthdr->inner_ip_type = param->inner_ip_type; + + pkthdr->l4s_sid = param->l4s_sid; + pkthdr->l4s_sync_ind = param->l4s_sync_ind; + pkthdr->l4s_hdl_ind = param->l4s_hdl_ind; + pkthdr->l4s_pbrac_mode = param->l4s_pbrac_mode; + + pkthdr->mss = param->mss; + pkthdr->tso = param->tso; + + pkthdr->fwd = param->fwd; + pkthdr->rss_lag_en = param->rss_lag_en; + pkthdr->dport = param->dport; + pkthdr->dport_id = param->dport_id; + + pkthdr->l3_csum_en = param->l3_csum_en; + pkthdr->l4_csum_en = param->l4_csum_en; +} + +static bool nbl_ktls_send_init_packet(struct nbl_resource_mgt *res_mgt, + struct nbl_res_tx_ring *tx_ring, + struct nbl_ktls_offload_context_tx *priv_tx) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + struct nbl_tx_buffer *first; + struct nbl_ring_desc *first_desc; + struct nbl_ktls_init_packet *init_packet; + struct nbl_notify_param notify_param = {0}; + dma_addr_t hdrdma; + u16 avail_used_flags = tx_ring->avail_used_flags; + u16 head = tx_ring->next_to_use; + u16 i = head; + + first = NBL_TX_BUF(tx_ring, head); + first_desc = NBL_TX_DESC(tx_ring, head); + + init_packet = kzalloc(sizeof(*init_packet), GFP_KERNEL); + if (!init_packet) + return false; + + init_packet->pkthdr.l4s_sid = priv_tx->index; + init_packet->pkthdr.l4s_sync_ind = 1; + init_packet->pkthdr.l4s_hdl_ind = 1; + init_packet->init_payload.initial = 1; + init_packet->init_payload.sync = 0; + init_packet->init_payload.sid = priv_tx->index; + memcpy(init_packet->init_payload.iv, priv_tx->iv, NBL_KTLS_IV_LEN); + memcpy(init_packet->init_payload.rec_num, priv_tx->rec_num, NBL_KTLS_REC_LEN); + /* Since the logic will add 1 to iv and rec_seq before using them, + * to ensure the consistency of software and hardware, + * the software will be delivered after subtracting 1 + */ + nbl_ktls_bigint_decrement(init_packet->init_payload.iv, NBL_KTLS_IV_LEN); + nbl_ktls_bigint_decrement(init_packet->init_payload.rec_num, NBL_KTLS_REC_LEN); + + hdrdma = dma_map_single(dma_dev, init_packet, sizeof(*init_packet), DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, hdrdma)) { + kfree(init_packet); + return false; + } + + first_desc->len = cpu_to_le32(sizeof(*init_packet)); + first_desc->addr = cpu_to_le64(hdrdma); + first_desc->id = cpu_to_le16(head); + first_desc->flags = cpu_to_le16(avail_used_flags); + + first->dma = hdrdma; + first->len = sizeof(*init_packet); + first->tls_pkthdr = &init_packet->pkthdr; + i++; + if (i == tx_ring->desc_num) { + i = 0; + tx_ring->avail_used_flags ^= 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + } + + first->next_to_watch = first_desc; + tx_ring->next_to_use = i; + + notify_param.notify_qid = tx_ring->notify_qid; + notify_param.tail_ptr = i; + phy_ops->update_tail_ptr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¬ify_param); + + return true; +} + +static enum nbl_ktls_sync_retval +nbl_tls_resync_info_get(struct nbl_ktls_offload_context_tx *priv_tx, u32 seq, + int datalen, struct nbl_tx_resync_info *info) +{ + int remaining = 0; + int i = 0; + enum nbl_ktls_sync_retval ret = NBL_KTLS_SYNC_DONE; + struct tls_record_info *record; + struct tls_offload_context_tx *tx_ctx; + unsigned long flags; + bool ends_before; + + tx_ctx = priv_tx->tx_ctx; + + spin_lock_irqsave(&tx_ctx->lock, flags); + record = tls_get_record(tx_ctx, seq, &info->rec_num); + if (!record) { + ret = NBL_KTLS_SYNC_FAIL; + goto out; + } + + ends_before = before(seq + datalen - 1, tls_record_start_seq(record)); + + if (unlikely(tls_record_is_start_marker(record))) { + ret = ends_before ? NBL_KTLS_SYNC_SKIP_NO_DATA : NBL_KTLS_SYNC_FAIL; + goto out; + } else if (ends_before) { + ret = NBL_KTLS_SYNC_FAIL; + goto out; + } + + info->resync_len = seq - tls_record_start_seq(record); + remaining = info->resync_len; + + while (remaining > 0) { + skb_frag_t *frag = &record->frags[i]; + + remaining -= skb_frag_size(frag); + info->frags[i++] = *frag; + } + + if (remaining < 0) + skb_frag_size_add(&info->frags[i - 1], remaining); + + info->nr_frags = i; +out: + spin_unlock_irqrestore(&tx_ctx->lock, flags); + return ret; +} + +static bool nbl_ktls_send_resync_one(struct nbl_resource_mgt *res_mgt, + struct nbl_res_tx_ring *tx_ring, + struct nbl_ktls_sync_packet *sync_packet, + struct nbl_tx_resync_info *info) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + struct nbl_tx_buffer *tx_buffer; + struct nbl_ring_desc *tx_desc; + struct nbl_notify_param notify_param = {0}; + dma_addr_t hdrdma; + u16 head = tx_ring->next_to_use; + u32 red_off = 0; + int len, k; + + tx_buffer = NBL_TX_BUF(tx_ring, head); + tx_desc = NBL_TX_DESC(tx_ring, head); + + dev_dbg(NBL_RING_TO_DEV(tx_ring), "send one resync packet.\n"); + for (k = 0; k < info->nr_frags; k++) { + skb_frag_t *f = &info->frags[k]; + u8 *vaddr = kmap_local_page(skb_frag_page(f)); + u32 f_off = skb_frag_off(f); + u32 fsz = skb_frag_size(f); + + memcpy(sync_packet->sync_payload.redata + red_off, vaddr + f_off, fsz); + kunmap_local(vaddr); + red_off += fsz; + } + + len = info->resync_len + NBL_KTLS_SYNC_PKT_LEN; + + hdrdma = dma_map_single(dma_dev, sync_packet, len, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, hdrdma)) { + kfree(sync_packet); + return false; + } + + tx_desc->addr = cpu_to_le64(hdrdma); + tx_desc->len = cpu_to_le32(len); + tx_desc->id = cpu_to_le16(head); + tx_desc->flags = cpu_to_le16(tx_ring->avail_used_flags); + + tx_buffer->dma = hdrdma; + tx_buffer->len = len; + tx_buffer->next_to_watch = tx_desc; + tx_buffer->tls_pkthdr = &sync_packet->pkthdr; + + if (head + 1 == tx_ring->desc_num) { + tx_ring->next_to_use = 0; + tx_ring->avail_used_flags ^= 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + } else { + tx_ring->next_to_use = head + 1; + } + + notify_param.notify_qid = tx_ring->notify_qid; + notify_param.tail_ptr = tx_ring->next_to_use; + phy_ops->update_tail_ptr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¬ify_param); + + return true; +} + +static bool nbl_ktls_send_resync_mul(struct nbl_resource_mgt *res_mgt, + struct nbl_res_tx_ring *tx_ring, + struct nbl_ktls_sync_packet *sync_packet, + struct nbl_tx_resync_info *info) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + union nbl_tx_extend_head *pkthdr; + struct nbl_tx_buffer *head_buffer; + struct nbl_ring_desc *head_desc; + struct nbl_tx_buffer *tx_buffer = NBL_TX_BUF(tx_ring, tx_ring->next_to_use); + struct nbl_ring_desc *tx_desc; + struct nbl_notify_param notify_param = {0}; + dma_addr_t hdrdma; + dma_addr_t bufdma; + dma_addr_t firstdma = 0; + skb_frag_t *frag; + u16 avail_used_flags = tx_ring->avail_used_flags; + u16 head = tx_ring->next_to_use; + u16 index = head; + u32 total_len = 0; + u32 remain_len; + int last_len; + int len, k; + unsigned int fsz; + + last_len = info->resync_len % NBL_KTLS_PER_CELL_LEN + NBL_KTLS_PER_CELL_LEN; + if (last_len > NBL_KTLS_MAX_CELL_LEN) + last_len -= NBL_KTLS_PER_CELL_LEN; + + dev_dbg(NBL_RING_TO_DEV(tx_ring), "send mul resync packet.\n"); + /* Each packet in the middle is 512 bytes */ + remain_len = info->resync_len - last_len; + + head_buffer = NBL_TX_BUF(tx_ring, head); + head_desc = NBL_TX_DESC(tx_ring, head); + + hdrdma = dma_map_single(dma_dev, sync_packet, NBL_KTLS_SYNC_PKT_LEN, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, hdrdma)) { + kfree(sync_packet); + goto dma_map_error; + } + + head_desc->addr = cpu_to_le64(hdrdma); + head_desc->len = cpu_to_le32(NBL_KTLS_SYNC_PKT_LEN); + head_desc->id = cpu_to_le16(head); + + head_buffer->dma = hdrdma; + head_buffer->len = NBL_KTLS_SYNC_PKT_LEN; + head_buffer->tls_pkthdr = &sync_packet->pkthdr; + + for (k = 0; k < info->nr_frags; k++) { + frag = &info->frags[k]; + fsz = skb_frag_size(frag); + dev_dbg(NBL_RING_TO_DEV(tx_ring), "send frag %d len %u.\n", k, fsz); + bufdma = skb_frag_dma_map(dma_dev, frag, 0, fsz, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, bufdma)) { + index++; + goto dma_map_error; + } + firstdma = bufdma; + total_len = fsz; + while (fsz) { + index++; + if (index == tx_ring->desc_num) { + index = 0; + tx_buffer = NBL_TX_BUF(tx_ring, 0); + tx_desc = NBL_TX_DESC(tx_ring, 0); + tx_ring->avail_used_flags ^= + 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + } else { + tx_buffer = NBL_TX_BUF(tx_ring, index); + tx_desc = NBL_TX_DESC(tx_ring, index); + } + + len = remain_len % NBL_KTLS_PER_CELL_LEN; + len = (len) ? (len) : min_t(unsigned int, fsz, NBL_KTLS_PER_CELL_LEN); + if (fsz < len || remain_len == 0) + len = fsz; + + tx_desc->addr = cpu_to_le64(bufdma); + tx_desc->len = cpu_to_le32(len); + tx_desc->id = cpu_to_le16(head); + tx_desc->flags = cpu_to_le16(tx_ring->avail_used_flags); + dev_dbg(NBL_RING_TO_DEV(tx_ring), + "send %u packet len %d remain_len %u.\n", head, len, remain_len); + + head_buffer->next_to_watch = tx_desc; + + bufdma += len; + fsz -= len; + if (remain_len == 0) { + last_len -= len; + if (last_len > 0) + tx_desc->flags = cpu_to_le16(le16_to_cpu(tx_desc->flags) | + NBL_PACKED_DESC_F_NEXT); + continue; + } + + remain_len -= len; + if (remain_len % NBL_KTLS_PER_CELL_LEN) { + tx_desc->flags = cpu_to_le16(le16_to_cpu(tx_desc->flags) | + NBL_PACKED_DESC_F_NEXT); + continue; + } + + head_desc->flags = cpu_to_le16(avail_used_flags | NBL_PACKED_DESC_F_NEXT); + + index++; + if (index == tx_ring->desc_num) { + index = 0; + head = 0; + head_buffer = NBL_TX_BUF(tx_ring, 0); + head_desc = NBL_TX_DESC(tx_ring, 0); + tx_ring->avail_used_flags ^= + 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + } else { + head = index; + head_buffer = NBL_TX_BUF(tx_ring, head); + head_desc = NBL_TX_DESC(tx_ring, head); + } + avail_used_flags = tx_ring->avail_used_flags; + + pkthdr = kzalloc(sizeof(*pkthdr), GFP_KERNEL); + if (!pkthdr) + goto dma_map_error; + + pkthdr->l4s_sid = sync_packet->pkthdr.l4s_sid; + pkthdr->l4s_redun_ind = 1; + pkthdr->l4s_hdl_ind = 1; + hdrdma = dma_map_single(dma_dev, pkthdr, + sizeof(union nbl_tx_extend_head), DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, hdrdma)) { + kfree(pkthdr); + goto dma_map_error; + } + + head_desc->addr = cpu_to_le64(hdrdma); + head_desc->len = cpu_to_le32(sizeof(union nbl_tx_extend_head)); + head_desc->id = cpu_to_le16(head); + + head_buffer->dma = hdrdma; + head_buffer->len = sizeof(union nbl_tx_extend_head); + head_buffer->tls_pkthdr = pkthdr; + } + tx_buffer->dma = firstdma; + tx_buffer->len = total_len; + } + /* wmb */ + + wmb(); + + head_desc->flags = cpu_to_le16(avail_used_flags | NBL_PACKED_DESC_F_NEXT); + + if (index + 1 == tx_ring->desc_num) { + tx_ring->next_to_use = 0; + tx_ring->avail_used_flags ^= 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + } else { + tx_ring->next_to_use = index + 1; + } + + notify_param.notify_qid = tx_ring->notify_qid; + notify_param.tail_ptr = tx_ring->next_to_use; + phy_ops->update_tail_ptr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¬ify_param); + + return true; + +dma_map_error: + while (index != tx_ring->next_to_use) { + if (unlikely(!index)) + index = tx_ring->desc_num; + index--; + nbl_unmap_and_free_tx_resource(tx_ring, NBL_TX_BUF(tx_ring, index), false, false); + } + tx_ring->avail_used_flags = avail_used_flags; + + return false; +} - switch (l4_proto) { - case IPPROTO_TCP: - l4_type = NBL_TX_L4T_TCP; - l4_len = l4.tcp->doff; - l4_csum = 1; - break; - case IPPROTO_UDP: - l4_type = NBL_TX_L4T_UDP; - l4_len = (sizeof(struct udphdr) >> 2); - l4_csum = 1; - break; - case IPPROTO_SCTP: - if (first->tx_flags & NBL_TX_FLAGS_TSO) - return -1; - l4_type = NBL_TX_L4T_RSV; - l4_len = (sizeof(struct sctphdr) >> 2); - l4_csum = 1; - break; - default: - if (first->tx_flags & NBL_TX_FLAGS_TSO) - return -2; +/* Set the maximum packet length to 768 bytes and occur in the first or last packets. + * The middle packets are all 512 bytes long bacause the hardware cell is 512 bytes. + * If pkt_len > 768, pkt_len -= 512, make sure the packet is greater than 256 bytes. + */ +static bool nbl_ktls_send_resync_packet(struct nbl_resource_mgt *res_mgt, + struct nbl_res_tx_ring *tx_ring, + struct nbl_ktls_offload_context_tx *priv_tx, + struct nbl_tx_resync_info *info) +{ + struct nbl_ktls_sync_packet *sync_packet; + __be64 rec_num; - /* unsopported L4 protocol, device cannot offload L4 checksum, - * so software compute L4 checskum - */ - skb_checksum_help(skb); - return 0; - } + sync_packet = kzalloc(sizeof(*sync_packet), GFP_KERNEL); + if (!sync_packet) + return false; - hdr_param->mac_len = l2_len >> 1; - hdr_param->ip_len = l3_len >> 2; - hdr_param->l4_len = l4_len; - hdr_param->l4_type = l4_type; - hdr_param->inner_ip_type = inner_ip_type; - hdr_param->l3_csum_en = 0; - hdr_param->l4_csum_en = l4_csum; + sync_packet->pkthdr.l4s_sid = priv_tx->index; + sync_packet->pkthdr.l4s_redun_ind = 1; + sync_packet->pkthdr.l4s_redun_head_ind = 1; + sync_packet->pkthdr.l4s_hdl_ind = 1; + sync_packet->sync_payload.sync = 1; + sync_packet->sync_payload.sid = priv_tx->index; - return 1; + if (info->resync_len == 0) + info->rec_num = info->rec_num - 1; + rec_num = cpu_to_be64(info->rec_num); + memcpy(sync_packet->sync_payload.rec_num, &rec_num, NBL_KTLS_REC_LEN); + + if (info->resync_len <= NBL_KTLS_MAX_CELL_LEN) { + sync_packet->sync_payload.redlen = htons(info->resync_len); + return nbl_ktls_send_resync_one(res_mgt, tx_ring, sync_packet, info); + } + + sync_packet->sync_payload.redlen = htons(NBL_KTLS_PER_CELL_LEN); + return nbl_ktls_send_resync_mul(res_mgt, tx_ring, sync_packet, info); } -static int nbl_map_skb(struct nbl_res_tx_ring *tx_ring, struct sk_buff *skb, - u16 first, u16 *desc_index) +/* Handle packet out-of-order function */ +static enum nbl_ktls_sync_retval +nbl_ktls_tx_handle_ooo(struct nbl_resource_mgt *res_mgt, struct nbl_res_tx_ring *tx_ring, + struct nbl_ktls_offload_context_tx *priv_tx, + u32 tcp_seq, int datalen) { - u16 index = *desc_index; - const skb_frag_t *frag; - unsigned int frag_num = skb_shinfo(skb)->nr_frags; - struct device *dma_dev = NBL_RING_TO_DMA_DEV(tx_ring); - struct nbl_tx_buffer *tx_buffer = NBL_TX_BUF(tx_ring, index); - struct nbl_ring_desc *tx_desc = NBL_TX_DESC(tx_ring, index); - unsigned int i; - unsigned int size; - dma_addr_t dma; + enum nbl_ktls_sync_retval ret; + struct nbl_tx_resync_info resync_info = {0}; - size = skb_headlen(skb); - dma = dma_map_single(dma_dev, skb->data, size, DMA_TO_DEVICE); - if (dma_mapping_error(dma_dev, dma)) - return -1; + ret = nbl_tls_resync_info_get(priv_tx, tcp_seq, datalen, &resync_info); + if (unlikely(ret != NBL_KTLS_SYNC_DONE)) + return ret; - tx_buffer->dma = dma; - tx_buffer->len = size; + dev_dbg(NBL_RING_TO_DEV(tx_ring), "rec_num %llu, resync_len %u, nr_frags %u.\n", + resync_info.rec_num, resync_info.resync_len, resync_info.nr_frags); + if (unlikely(!nbl_ktls_send_resync_packet(res_mgt, tx_ring, priv_tx, &resync_info))) + return NBL_KTLS_SYNC_FAIL; - tx_desc->addr = cpu_to_le64(dma); - tx_desc->len = size; - if (!first) - tx_desc->flags = cpu_to_le16(tx_ring->avail_used_flags | NBL_PACKED_DESC_F_NEXT); + return NBL_KTLS_SYNC_DONE; +} - index++; - tx_desc++; - tx_buffer++; - if (index == tx_ring->desc_num) { - index = 0; - tx_ring->avail_used_flags ^= - 1 << NBL_PACKED_DESC_F_AVAIL | - 1 << NBL_PACKED_DESC_F_USED; - tx_desc = NBL_TX_DESC(tx_ring, 0); - tx_buffer = NBL_TX_BUF(tx_ring, 0); - } +static bool nbl_ktls_tx_offload_handle(struct nbl_resource_mgt *res_mgt, + struct nbl_res_tx_ring *tx_ring, + struct sk_buff *skb, + struct nbl_tx_hdr_param *accel_state) +{ + struct net_device *netdev = tx_ring->netdev; + struct tls_context *tls_ctx; + struct nbl_ktls_offload_context_tx **ctx; + struct nbl_ktls_offload_context_tx *priv_tx; + enum nbl_ktls_sync_retval ret; + u32 tcp_seq = 0; + int datalen = 0; - if (!frag_num) { - *desc_index = index; + datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (!datalen) return 0; - } - frag = &skb_shinfo(skb)->frags[0]; - for (i = 0; i < frag_num; i++) { - size = skb_frag_size(frag); - dma = skb_frag_dma_map(dma_dev, frag, 0, size, DMA_TO_DEVICE); - if (dma_mapping_error(dma_dev, dma)) { - *desc_index = index; - return -1; + tls_ctx = tls_get_ctx(skb->sk); + if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) + goto err_out; + + ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); + priv_tx = *ctx; + /* config data to hardware */ + if (priv_tx->ctx_post_pending) { + priv_tx->ctx_post_pending = false; + if (!nbl_ktls_send_init_packet(res_mgt, tx_ring, priv_tx)) + goto err_out; + } + + tcp_seq = ntohl(tcp_hdr(skb)->seq); + dev_dbg(NBL_RING_TO_DEV(tx_ring), "ktls tx tcp_seq %u.\n", tcp_seq); + if (unlikely(priv_tx->expected_tcp != tcp_seq)) { + dev_dbg(NBL_RING_TO_DEV(tx_ring), "ktls tx tcp_seq %u, but expected_tcp %u.\n", + tcp_seq, priv_tx->expected_tcp); + ret = nbl_ktls_tx_handle_ooo(res_mgt, tx_ring, priv_tx, tcp_seq, datalen); + tx_ring->tx_stats.tls_ooo_packets++; + switch (ret) { + case NBL_KTLS_SYNC_DONE: + break; + case NBL_KTLS_SYNC_SKIP_NO_DATA: + if (likely(!skb->decrypted)) + goto out; + WARN_ON_ONCE(1); + goto err_out; + case NBL_KTLS_SYNC_FAIL: + goto err_out; } + } - tx_buffer->dma = dma; - tx_buffer->len = size; - tx_buffer->page = 1; + priv_tx->expected_tcp = tcp_seq + datalen; + accel_state->l4s_sid = priv_tx->index; + accel_state->l4s_pbrac_mode = 0; + accel_state->l4s_hdl_ind = 1; - tx_desc->addr = cpu_to_le64(dma); - tx_desc->len = size; - tx_desc->flags = cpu_to_le16(tx_ring->avail_used_flags | NBL_PACKED_DESC_F_NEXT); - index++; - tx_desc++; - tx_buffer++; - if (index == tx_ring->desc_num) { - index = 0; - tx_ring->avail_used_flags ^= - 1 << NBL_PACKED_DESC_F_AVAIL | - 1 << NBL_PACKED_DESC_F_USED; - tx_desc = NBL_TX_DESC(tx_ring, 0); - tx_buffer = NBL_TX_BUF(tx_ring, 0); - } - frag++; - } + tx_ring->tx_stats.tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; + tx_ring->tx_stats.tls_encrypted_bytes += datalen; - *desc_index = index; +out: return 0; + +err_out: + dev_kfree_skb_any(skb); + return 1; } -static inline void nbl_tx_fill_tx_extend_header_leonis(union nbl_tx_extend_head *pkthdr, - struct nbl_tx_hdr_param *param) +static bool nbl_tx_map_need_broadcast_check(struct sk_buff *skb) { - pkthdr->mac_len = param->mac_len; - pkthdr->ip_len = param->ip_len; - pkthdr->l4_len = param->l4_len; - pkthdr->l4_type = param->l4_type; - pkthdr->inner_ip_type = param->inner_ip_type; - - pkthdr->l4s_sid = param->l4s_sid; - pkthdr->l4s_sync_ind = param->l4s_sync_ind; - pkthdr->l4s_hdl_ind = param->l4s_hdl_ind; - pkthdr->l4s_pbrac_mode = param->l4s_pbrac_mode; - - pkthdr->mss = param->mss; - pkthdr->tso = param->tso; + __be16 protocol; - pkthdr->fwd = param->fwd; - pkthdr->rss_lag_en = param->rss_lag_en; - pkthdr->dport = param->dport; - pkthdr->dport_id = param->dport_id; + protocol = vlan_get_protocol(skb); - pkthdr->l3_csum_en = param->l3_csum_en; - pkthdr->l4_csum_en = param->l4_csum_en; + if (protocol == htons(ETH_P_ARP)) { + return true; + } else if (protocol == htons(ETH_P_IPV6)) { + if (pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)) && + ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { + struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1); + + if (m->icmph.icmp6_code == 0 && (m->icmph.icmp6_type == + NDISC_NEIGHBOUR_SOLICITATION || + m->icmph.icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) { + return true; + } + } + } + return false; } static bool nbl_skb_is_lacp_or_lldp(struct sk_buff *skb) @@ -1395,6 +2605,7 @@ static int nbl_tx_map(struct nbl_res_tx_ring *tx_ring, struct sk_buff *skb, u16 avail_used_flags = tx_ring->avail_used_flags; u32 pkthdr_len; bool can_push; + bool doorbell = true; first_desc = NBL_TX_DESC(tx_ring, desc_index); first = NBL_TX_BUF(tx_ring, desc_index); @@ -1478,24 +2689,24 @@ static int nbl_tx_map(struct nbl_res_tx_ring *tx_ring, struct sk_buff *skb, tx_desc = NBL_TX_DESC(tx_ring, (desc_index == 0 ? tx_ring->desc_num : desc_index) - 1); tx_desc->flags &= cpu_to_le16(~NBL_PACKED_DESC_F_NEXT); - first->next_to_watch = tx_desc; first_desc->len += (hdr_param->total_hlen << NBL_TX_TOTAL_HEADERLEN_SHIFT); first_desc->id = cpu_to_le16(skb_shinfo(skb)->gso_size); + tx_ring->next_to_use = desc_index; + nbl_maybe_stop_tx(tx_ring, DESC_NEEDED); /* wmb */ wmb(); + first->next_to_watch = tx_desc; /* first desc last set flag */ if (first_desc == tx_desc) first_desc->flags = cpu_to_le16(avail_used_flags); else first_desc->flags = cpu_to_le16(avail_used_flags | NBL_PACKED_DESC_F_NEXT); - tx_ring->next_to_use = desc_index; - - nbl_maybe_stop_tx(tx_ring, DESC_NEEDED); /* kick doorbell passthrough for performace */ - writel(tx_ring->notify_qid, tx_ring->notify_addr); + if (doorbell) + writel(tx_ring->notify_qid, tx_ring->notify_addr); // nbl_trace(tx_map_ok, tx_ring, skb, head, first_desc, pkthdr); @@ -1543,8 +2754,9 @@ static netdev_tx_t nbl_res_txrx_rep_xmit(struct sk_buff *skb, WARN_ON(count > MAX_DESC_NUM_PER_PKT); if (unlikely(nbl_maybe_stop_tx(tx_ring, count))) { if (net_ratelimit()) - dev_warn(NBL_RING_TO_DEV(tx_ring), "There is not enough descriptor to transmit packet in queue %u\n", - tx_ring->queue_index); + dev_dbg(NBL_RING_TO_DEV(tx_ring), "There is no enough " + "descriptor to transmit packet in queue %u\n", + tx_ring->queue_index); tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } @@ -1580,8 +2792,9 @@ static netdev_tx_t nbl_res_txrx_self_test_start_xmit(struct sk_buff *skb, struct WARN_ON(count > MAX_DESC_NUM_PER_PKT); if (unlikely(nbl_maybe_stop_tx(tx_ring, count))) { if (net_ratelimit()) - dev_warn(NBL_RING_TO_DEV(tx_ring), "There is not enough descriptor to transmit packet in queue %u\n", - tx_ring->queue_index); + dev_dbg(NBL_RING_TO_DEV(tx_ring), "There is no enough " + "descriptor to transmit packet in queue %u\n", + tx_ring->queue_index); tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } @@ -1610,6 +2823,9 @@ static netdev_tx_t nbl_res_txrx_start_xmit(struct sk_buff *skb, .l4_len = 20 >> 2, .mss = 256, }; + u16 vlan_tci; + u16 vlan_proto; + struct sk_buff *skb2 = NULL; unsigned int count; int ret = 0; @@ -1620,12 +2836,33 @@ static netdev_tx_t nbl_res_txrx_start_xmit(struct sk_buff *skb, WARN_ON(count > MAX_DESC_NUM_PER_PKT); if (unlikely(nbl_maybe_stop_tx(tx_ring, count))) { if (net_ratelimit()) - dev_warn(NBL_RING_TO_DEV(tx_ring), "There is not enough descriptor to transmit packet in queue %u\n", - tx_ring->queue_index); + dev_dbg(NBL_RING_TO_DEV(tx_ring), "There is no enough " + "descriptor to transmit packet in queue %u\n", + tx_ring->queue_index); tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } + if (tx_ring->vlan_proto || skb_vlan_tag_present(skb)) { + if (tx_ring->vlan_proto) { + vlan_proto = htons(tx_ring->vlan_proto); + vlan_tci = tx_ring->vlan_tci; + } + + if (skb_vlan_tag_present(skb)) { + vlan_proto = skb->vlan_proto; + vlan_tci = skb_vlan_tag_get(skb); + } + + skb = vlan_insert_tag_set_proto(skb, vlan_proto, vlan_tci); + if (!skb) + return NETDEV_TX_OK; + } + + if (nbl_ktls_device_offload(skb)) + if (nbl_ktls_tx_offload_handle(res_mgt, tx_ring, skb, &hdr_param)) + return NETDEV_TX_OK; + /* for dstore and eth, min packet len is 60 */ eth_skb_pad(skb); @@ -1633,12 +2870,43 @@ static netdev_tx_t nbl_res_txrx_start_xmit(struct sk_buff *skb, hdr_param.fwd = 1; hdr_param.rss_lag_en = 0; + /* ipro fwd to eth port */ + if (tx_ring->mode == NBL_ESWITCH_OFFLOADS) { + hdr_param.fwd = NBL_TX_FWD_TYPE_CPU_ASSIGNED; + hdr_param.dport = NBL_TX_DPORT_ETH; + if (txrx_mgt->bond_info.bond_enable && !nbl_skb_is_lacp_or_lldp(skb)) { + hdr_param.dport_id = txrx_mgt->bond_info.lag_id << + NBL_TX_DPORT_ID_LAG_OFFSET; + hdr_param.rss_lag_en = 1; + } + } + if (nbl_skb_is_lacp_or_lldp(skb)) { hdr_param.fwd = NBL_TX_FWD_TYPE_CPU_ASSIGNED; hdr_param.dport = NBL_TX_DPORT_ETH; } - ret = nbl_tx_map(tx_ring, skb, &hdr_param); + /* for unicast packet tx_map all */ + if (txrx_mgt->bond_info.bond_enable && nbl_tx_map_need_broadcast_check(skb)) { + int ret2; + + hdr_param.fwd = NBL_TX_FWD_TYPE_CPU_ASSIGNED; + hdr_param.dport = NBL_TX_DPORT_ETH; + hdr_param.dport_id = txrx_mgt->bond_info.eth_id[0]; + hdr_param.rss_lag_en = 0; + + skb2 = skb_copy(skb, GFP_ATOMIC); + ret |= nbl_tx_map(tx_ring, skb, &hdr_param); + if (likely(skb2)) { + hdr_param.dport_id = txrx_mgt->bond_info.eth_id[1]; + ret2 = nbl_tx_map(tx_ring, skb2, &hdr_param); + if (ret2) + dev_kfree_skb_any(skb2); + } + + } else { + ret = nbl_tx_map(tx_ring, skb, &hdr_param); + } return ret; } @@ -1660,7 +2928,7 @@ static int nbl_res_txring_is_invalid(struct nbl_resource_mgt *res_mgt, { struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); struct nbl_res_tx_ring *tx_ring; - u8 ring_num = txrx_mgt->tx_ring_num; + u16 ring_num = txrx_mgt->tx_ring_num; if (index >= ring_num) { seq_printf(m, "Invalid tx index %d, max ring num is %d\n", index, ring_num); @@ -1681,7 +2949,7 @@ static int nbl_res_rxring_is_invalid(struct nbl_resource_mgt *res_mgt, { struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); struct nbl_res_rx_ring *rx_ring; - u8 ring_num = txrx_mgt->rx_ring_num; + u16 ring_num = txrx_mgt->rx_ring_num; if (index >= ring_num) { seq_printf(m, "Invalid rx index %d, max ring num is %d\n", index, ring_num); @@ -1819,7 +3087,7 @@ static int nbl_res_txrx_dump_ring_stats(void *priv, struct seq_file *m, bool is_ return nbl_res_rx_dump_ring_stats(res_mgt, m, index); } -static struct napi_struct *nbl_res_txrx_get_vector_napi(void *priv, u16 index) +static struct nbl_napi_struct *nbl_res_txrx_get_vector_napi(void *priv, u16 index) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); @@ -1830,7 +3098,7 @@ static struct napi_struct *nbl_res_txrx_get_vector_napi(void *priv, u16 index) return NULL; } - return &txrx_mgt->vectors[index]->napi; + return &txrx_mgt->vectors[index]->nbl_napi; } static void nbl_res_txrx_set_vector_info(void *priv, u8 *irq_enable_base, @@ -1856,6 +3124,7 @@ static void nbl_res_get_pt_ops(void *priv, struct nbl_resource_pt_ops *pt_ops) pt_ops->rep_xmit = nbl_res_txrx_rep_xmit; pt_ops->self_test_xmit = nbl_res_txrx_self_test_start_xmit; pt_ops->napi_poll = nbl_res_napi_poll; + pt_ops->xdp_xmit = nbl_res_txrx_xdp_xmit; } static u32 nbl_res_txrx_get_tx_headroom(void *priv) @@ -1889,10 +3158,23 @@ static void nbl_res_txrx_get_queue_stats(void *priv, u8 queue_id, } while (u64_stats_fetch_retry(syncp, start)); } +static bool nbl_res_is_ctrlq(struct nbl_txrx_mgt *txrx_mgt, u16 qid) +{ + u16 ring_num = txrx_mgt->vsi_info[NBL_VSI_CTRL].ring_num; + u16 ring_offset = txrx_mgt->vsi_info[NBL_VSI_CTRL].ring_offset; + + if (qid >= ring_offset && qid < ring_offset + ring_num) + return true; + + return false; +} + static void nbl_res_txrx_get_net_stats(void *priv, struct nbl_stats *net_stats) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *rx_ring; + struct nbl_res_tx_ring *tx_ring; int i; u64 bytes = 0, packets = 0; u64 tso_packets = 0, tso_bytes = 0; @@ -1900,6 +3182,11 @@ static void nbl_res_txrx_get_net_stats(void *priv, struct nbl_stats *net_stats) u64 rx_csum_packets = 0, rx_csum_errors = 0; u64 tx_multicast_packets = 0, tx_unicast_packets = 0; u64 rx_multicast_packets = 0, rx_unicast_packets = 0; + u64 tls_encrypted_packets = 0; + u64 tls_encrypted_bytes = 0; + u64 tls_ooo_packets = 0; + u64 tls_decrypted_packets = 0; + u64 tls_resync_req_num = 0; u64 tx_busy = 0, tx_dma_busy = 0; u64 tx_desc_addr_err_cnt = 0; u64 tx_desc_len_err_cnt = 0; @@ -1911,28 +3198,40 @@ static void nbl_res_txrx_get_net_stats(void *priv, struct nbl_stats *net_stats) u64 rx_cache_busy = 0; u64 rx_cache_waive = 0; u64 tx_skb_free = 0; + u64 xdp_tx_packets = 0; + u64 xdp_redirect_packets = 0; + u64 xdp_oversize_packets = 0; + u64 xdp_drop_packets = 0; unsigned int start; rcu_read_lock(); for (i = 0; i < txrx_mgt->rx_ring_num; i++) { - struct nbl_res_rx_ring *ring = NBL_RES_MGT_TO_RX_RING(res_mgt, i); + if (nbl_res_is_ctrlq(txrx_mgt, i)) + continue; + rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, i); do { - start = u64_stats_fetch_begin(&ring->syncp); - bytes += ring->stats.bytes; - packets += ring->stats.packets; - rx_csum_packets += ring->rx_stats.rx_csum_packets; - rx_csum_errors += ring->rx_stats.rx_csum_errors; - rx_multicast_packets += ring->rx_stats.rx_multicast_packets; - rx_unicast_packets += ring->rx_stats.rx_unicast_packets; - rx_desc_addr_err_cnt += ring->rx_stats.rx_desc_addr_err_cnt; - rx_alloc_buf_err_cnt += ring->rx_stats.rx_alloc_buf_err_cnt; - rx_cache_reuse += ring->rx_stats.rx_cache_reuse; - rx_cache_full += ring->rx_stats.rx_cache_full; - rx_cache_empty += ring->rx_stats.rx_cache_empty; - rx_cache_busy += ring->rx_stats.rx_cache_busy; - rx_cache_waive += ring->rx_stats.rx_cache_waive; - } while (u64_stats_fetch_retry(&ring->syncp, start)); + start = u64_stats_fetch_begin(&rx_ring->syncp); + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + rx_csum_packets += rx_ring->rx_stats.rx_csum_packets; + rx_csum_errors += rx_ring->rx_stats.rx_csum_errors; + rx_multicast_packets += rx_ring->rx_stats.rx_multicast_packets; + rx_unicast_packets += rx_ring->rx_stats.rx_unicast_packets; + rx_desc_addr_err_cnt += rx_ring->rx_stats.rx_desc_addr_err_cnt; + rx_alloc_buf_err_cnt += rx_ring->rx_stats.rx_alloc_buf_err_cnt; + rx_cache_reuse += rx_ring->rx_stats.rx_cache_reuse; + rx_cache_full += rx_ring->rx_stats.rx_cache_full; + rx_cache_empty += rx_ring->rx_stats.rx_cache_empty; + rx_cache_busy += rx_ring->rx_stats.rx_cache_busy; + rx_cache_waive += rx_ring->rx_stats.rx_cache_waive; + xdp_tx_packets += rx_ring->rx_stats.xdp_tx_packets; + xdp_redirect_packets += rx_ring->rx_stats.xdp_redirect_packets; + xdp_oversize_packets += rx_ring->rx_stats.xdp_oversize_packets; + xdp_drop_packets += rx_ring->rx_stats.xdp_drop_packets; + tls_decrypted_packets += rx_ring->rx_stats.tls_decrypted_packets; + tls_resync_req_num += rx_ring->rx_stats.tls_resync_req_num; + } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); } net_stats->rx_packets = packets; @@ -1942,28 +3241,39 @@ static void nbl_res_txrx_get_net_stats(void *priv, struct nbl_stats *net_stats) net_stats->rx_csum_errors = rx_csum_errors; net_stats->rx_multicast_packets = rx_multicast_packets; net_stats->rx_unicast_packets = rx_unicast_packets; + net_stats->xdp_tx_packets = xdp_tx_packets; + net_stats->xdp_redirect_packets = xdp_redirect_packets; + net_stats->xdp_oversize_packets = xdp_oversize_packets; + net_stats->xdp_drop_packets = xdp_drop_packets; + net_stats->tls_decrypted_packets = tls_decrypted_packets; + net_stats->tls_resync_req_num = tls_resync_req_num; bytes = 0; packets = 0; for (i = 0; i < txrx_mgt->tx_ring_num; i++) { - struct nbl_res_tx_ring *ring = NBL_RES_MGT_TO_TX_RING(res_mgt, i); + if (nbl_res_is_ctrlq(txrx_mgt, i)) + continue; + tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, i); do { - start = u64_stats_fetch_begin(&ring->syncp); - bytes += ring->stats.bytes; - packets += ring->stats.packets; - tso_packets += ring->tx_stats.tso_packets; - tso_bytes += ring->tx_stats.tso_bytes; - tx_csum_packets += ring->tx_stats.tx_csum_packets; - tx_busy += ring->tx_stats.tx_busy; - tx_dma_busy += ring->tx_stats.tx_dma_busy; - tx_multicast_packets += ring->tx_stats.tx_multicast_packets; - tx_unicast_packets += ring->tx_stats.tx_unicast_packets; - tx_skb_free += ring->tx_stats.tx_skb_free; - tx_desc_addr_err_cnt += ring->tx_stats.tx_desc_addr_err_cnt; - tx_desc_len_err_cnt += ring->tx_stats.tx_desc_len_err_cnt; - } while (u64_stats_fetch_retry(&ring->syncp, start)); + start = u64_stats_fetch_begin(&tx_ring->syncp); + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + tso_packets += tx_ring->tx_stats.tso_packets; + tso_bytes += tx_ring->tx_stats.tso_bytes; + tx_csum_packets += tx_ring->tx_stats.tx_csum_packets; + tx_busy += tx_ring->tx_stats.tx_busy; + tx_dma_busy += tx_ring->tx_stats.tx_dma_busy; + tx_multicast_packets += tx_ring->tx_stats.tx_multicast_packets; + tx_unicast_packets += tx_ring->tx_stats.tx_unicast_packets; + tx_skb_free += tx_ring->tx_stats.tx_skb_free; + tx_desc_addr_err_cnt += tx_ring->tx_stats.tx_desc_addr_err_cnt; + tx_desc_len_err_cnt += tx_ring->tx_stats.tx_desc_len_err_cnt; + tls_encrypted_packets += tx_ring->tx_stats.tls_encrypted_packets; + tls_encrypted_bytes += tx_ring->tx_stats.tls_encrypted_bytes; + tls_ooo_packets += tx_ring->tx_stats.tls_ooo_packets; + } while (u64_stats_fetch_retry(&tx_ring->syncp, start)); } rcu_read_unlock(); @@ -1987,6 +3297,9 @@ static void nbl_res_txrx_get_net_stats(void *priv, struct nbl_stats *net_stats) net_stats->rx_cache_empty = rx_cache_empty; net_stats->rx_cache_busy = rx_cache_busy; net_stats->rx_cache_waive = rx_cache_waive; + net_stats->tls_encrypted_packets = tls_encrypted_packets; + net_stats->tls_encrypted_bytes = tls_encrypted_bytes; + net_stats->tls_ooo_packets = tls_ooo_packets; } static u16 nbl_res_txrx_get_max_desc_num(void) @@ -2108,48 +3421,163 @@ static struct sk_buff *nbl_res_txrx_clean_rx_lb_test(void *priv, u32 ring_index) return skb; } +static int nbl_res_txrx_cfg_duppkt_info(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_txrx_bond_info *bond_info = &txrx_mgt->bond_info; + int i = 0; + + if (!param->duppkt_enable) { + memset(bond_info, 0, sizeof(*bond_info)); + return 0; + } else if (param->lag_num > 1) { + for (i = 0; i < param->lag_num && i < NBL_LAG_MAX_NUM; i++) + bond_info->eth_id[i] = param->member_list[i].eth_id; + bond_info->bond_enable = 1; + bond_info->lag_id = param->lag_id; + } + + return 0; +} + +static int +nbl_res_queue_stop_abnormal_sw_queue(void *priv, u16 local_queue_id, int type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_res_vector *vector = NULL; + struct nbl_res_tx_ring *tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, local_queue_id); + + if (type != NBL_TX) + return 0; + + if (tx_ring && !nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_XDP], local_queue_id)) + vector = NBL_RES_MGT_TO_VECTOR(res_mgt, local_queue_id); + + if (!tx_ring->valid) + return -EINVAL; + + if (vector && !vector->started) + return -EINVAL; + + if (vector) { + vector->started = false; + napi_synchronize(&vector->nbl_napi.napi); + netif_stop_subqueue(tx_ring->netdev, local_queue_id); + } + + return 0; +} + static dma_addr_t nbl_res_txrx_restore_abnormal_ring(void *priv, int ring_index, int type) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - struct nbl_res_vector *vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); + struct nbl_res_vector *vector = NULL; + struct nbl_res_tx_ring *tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, ring_index); + struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index); - vector->started = false; - napi_synchronize(&vector->napi); + if (tx_ring && !nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_XDP], ring_index)) + vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); switch (type) { case NBL_TX: - nbl_res_txrx_stop_tx_ring(res_mgt, ring_index); - return nbl_res_txrx_start_tx_ring(res_mgt, ring_index); + if (tx_ring && tx_ring->valid) { + nbl_res_txrx_stop_tx_ring(res_mgt, ring_index); + return nbl_res_txrx_start_tx_ring(res_mgt, ring_index); + } else { + return (dma_addr_t)NULL; + } + break; case NBL_RX: - nbl_res_txrx_stop_rx_ring(res_mgt, ring_index); - return nbl_res_txrx_start_rx_ring(res_mgt, ring_index, true); + if (rx_ring && rx_ring->valid) { + nbl_res_txrx_stop_rx_ring(res_mgt, ring_index); + return nbl_res_txrx_start_rx_ring(res_mgt, ring_index, true); + } else { + return (dma_addr_t)NULL; + } + break; default: break; } - return -EINVAL; + return (dma_addr_t)NULL; } static int nbl_res_txrx_restart_abnormal_ring(void *priv, int ring_index, int type) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_res_tx_ring *tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, ring_index); - struct nbl_res_vector *vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); + struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index); + struct nbl_res_vector *vector = NULL; + int ret = 0; + + if (tx_ring && !nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_XDP], ring_index)) + vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); switch (type) { case NBL_TX: - writel(tx_ring->notify_qid, tx_ring->notify_addr); + if (tx_ring && tx_ring->valid) { + writel(tx_ring->notify_qid, tx_ring->notify_addr); + netif_start_subqueue(tx_ring->netdev, ring_index); + } else { + ret = -EINVAL; + } break; case NBL_RX: - nbl_res_txrx_kick_rx_ring(res_mgt, ring_index); + if (rx_ring && rx_ring->valid) + nbl_res_txrx_kick_rx_ring(res_mgt, ring_index); + else + ret = -EINVAL; break; default: break; } - vector->started = true; + if (vector) { + if (vector->net_msix_mask_en) + writel(vector->irq_data, vector->irq_enable_base); + vector->started = true; + } - return 0; + return ret; +} + +static void nbl_res_txrx_set_xdp_prog(void *priv, void *prog) +{ + int i; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *rx_ring; + struct nbl_res_tx_ring *tx_ring; + + for (i = 0; i < txrx_mgt->xdp_ring_num; i++) { + rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, i); + if (!rx_ring) + continue; + + WRITE_ONCE(rx_ring->xdp_prog, prog); + } + + for (i = 0; i < txrx_mgt->xdp_ring_num; i++) { + tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, i + txrx_mgt->xdp_ring_offset); + if (!tx_ring) + continue; + + WRITE_ONCE(tx_ring->xdp_prog, prog); + } +} + +static int nbl_res_get_max_mtu(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *rx_ring; + + rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, 0); + + if (!!(txrx_mgt->xdp_ring_num) && rx_ring->xdp_prog) + return rx_ring->buf_len - NBL_BUFFER_HDR_LEN - ETH_HLEN - (2 * VLAN_HLEN); + return NBL_MAX_JUMBO_FRAME_SIZE - NBL_PKT_HDR_PAD; } /* NBL_TXRX_SET_OPS(ops_name, func) @@ -2180,9 +3608,16 @@ do { \ NBL_TXRX_SET_OPS(set_tx_desc_num, nbl_res_txrx_set_tx_desc_num); \ NBL_TXRX_SET_OPS(set_rx_desc_num, nbl_res_txrx_set_rx_desc_num); \ NBL_TXRX_SET_OPS(clean_rx_lb_test, nbl_res_txrx_clean_rx_lb_test); \ + NBL_TXRX_SET_OPS(cfg_duppkt_info, nbl_res_txrx_cfg_duppkt_info); \ + NBL_TXRX_SET_OPS(stop_abnormal_sw_queue, nbl_res_queue_stop_abnormal_sw_queue); \ NBL_TXRX_SET_OPS(restore_abnormal_ring, nbl_res_txrx_restore_abnormal_ring); \ NBL_TXRX_SET_OPS(restart_abnormal_ring, nbl_res_txrx_restart_abnormal_ring); \ NBL_TXRX_SET_OPS(register_vsi_ring, nbl_txrx_register_vsi_ring); \ + NBL_TXRX_SET_OPS(cfg_txrx_vlan, nbl_res_txrx_cfg_txrx_vlan); \ + NBL_TXRX_SET_OPS(set_rings_xdp_prog, nbl_res_txrx_set_xdp_prog); \ + NBL_TXRX_SET_OPS(register_xdp_rxq, nbl_res_txrx_register_xdp_rxq); \ + NBL_TXRX_SET_OPS(unregister_xdp_rxq, nbl_res_txrx_unregister_xdp_rxq); \ + NBL_TXRX_SET_OPS(get_max_mtu, nbl_res_get_max_mtu); \ } while (0) /* Structure starts here, adding an op should not modify anything below */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.h index 86130a9a7240961e571580fd354befd0bc6b7cbb..c6d6fba44ee3ac3d7e484cf17cfd8050cda14e11 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.h @@ -16,12 +16,11 @@ #define NBL_MIN_DESC_NUM 128 #define NBL_MAX_DESC_NUM 32768 -#define NBL_PACKED_DESC_F_NEXT 1 -#define NBL_PACKED_DESC_F_WRITE 2 - #define DEFAULT_MAX_PF_QUEUE_PAIRS_NUM 16 #define DEFAULT_MAX_VF_QUEUE_PAIRS_NUM 2 +#define NBL_PACKED_DESC_F_NEXT 1 +#define NBL_PACKED_DESC_F_WRITE 2 #define NBL_PACKED_DESC_F_AVAIL 7 #define NBL_PACKED_DESC_F_USED 15 @@ -30,27 +29,21 @@ #define NBL_TX_BUF(tx_ring, i) (&(((tx_ring)->tx_bufs)[i])) #define NBL_RX_BUF(rx_ring, i) (&(((rx_ring)->rx_bufs)[i])) -#define DESC_NEEDED (MAX_SKB_FRAGS + 4) - -#define NBL_TX_POLL_WEIGHT 256 - #define NBL_RX_BUF_256 256 #define NBL_RX_HDR_SIZE NBL_RX_BUF_256 -#define NBL_RX_BUF_WRITE 16 -#define NBL_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD - NBL_BUFFER_HDR_LEN) +#define NBL_BUFFER_HDR_LEN (sizeof(struct nbl_rx_extend_head)) +#define NBL_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) +#define NBL_RX_BUFSZ (2048) +#define NBL_RXBUF_MIN_ORDER (10) +#define NBL_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#define NBL_TX_TOTAL_HEADERLEN_SHIFT 24 +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#define NBL_TX_POLL_WEIGHT 256 #define NBL_TXD_DATALEN_BITS 16 #define NBL_TXD_DATALEN_MAX BIT(NBL_TXD_DATALEN_BITS) - #define MAX_DESC_NUM_PER_PKT (32) -#define NBL_RX_BUFSZ (2048) -#define NBL_RX_BUFSZ_ORDER (11) - -#define NBL_BUFFER_HDR_LEN (sizeof(struct nbl_rx_extend_head)) - -#define NBL_ETH_FRAME_MIN_SIZE 60 - #define NBL_TX_TSO_MSS_MIN (256) #define NBL_TX_TSO_MSS_MAX (16383) #define NBL_TX_TSO_L2L3L4_HDR_LEN_MIN (42) @@ -59,10 +52,10 @@ #define IP_VERSION_V4 (4) #define NBL_TX_FLAGS_TSO BIT(0) -#define NBL_TX_TOTAL_HEADERLEN_SHIFT 24 - -#define NBL_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) -#define NBL_RX_PAGE_PER_FRAGS (PAGE_SIZE >> NBL_RX_BUFSZ_ORDER) +#define NBL_KTLS_INIT_PAD_LEN 28 +#define NBL_KTLS_SYNC_PKT_LEN 30 +#define NBL_KTLS_PER_CELL_LEN 4096 +#define NBL_KTLS_MAX_CELL_LEN 6144 /* TX inner IP header type */ enum nbl_tx_iipt { @@ -213,6 +206,75 @@ struct nbl_rx_extend_head { uint32_t num_buffers :8; } __packed; +struct nbl_ktls_init_payload { + /* DW0 */ + u16 initial:1; + u16 rsv1:7; + u16 sync:1; + u16 rsv2:7; + u16 sid:10; + u16 rsv3:6; + /* DW1 */ + u16 rsv4; + u16 rsv5; + /* DWX */ + u8 rec_num[NBL_KTLS_REC_LEN]; + u8 iv[NBL_KTLS_IV_LEN]; + u8 pad[NBL_KTLS_INIT_PAD_LEN]; +}; + +struct nbl_ktls_sync_payload { + /* DW0 */ + u16 initial:1; + u16 rsv1:7; + u16 sync:1; + u16 rsv2:7; + u16 sid:10; + u16 rsv3:6; + /* DW1 */ + u16 rsv4; + u16 rsv5; + /* DWX */ + u8 rec_num[NBL_KTLS_REC_LEN]; + __be16 redlen; + u8 redata[NBL_KTLS_MAX_CELL_LEN]; +}; + +struct nbl_ktls_init_packet { + union nbl_tx_extend_head pkthdr; + struct nbl_ktls_init_payload init_payload; +}; + +struct nbl_ktls_sync_packet { + union nbl_tx_extend_head pkthdr; + struct nbl_ktls_sync_payload sync_payload; +}; + +enum nbl_ktls_sync_retval { + NBL_KTLS_SYNC_DONE, + NBL_KTLS_SYNC_SKIP_NO_DATA, + NBL_KTLS_SYNC_FAIL, +}; + +struct nbl_tx_resync_info { + u64 rec_num; + u32 resync_len; + u32 nr_frags; + skb_frag_t frags[MAX_SKB_FRAGS]; +}; + +#define NBL_XDP_FLAG_TX BIT(0) +#define NBL_XDP_FLAG_REDIRECT BIT(1) +#define NBL_XDP_FLAG_DROP BIT(2) +#define NBL_XDP_FLAG_OVERSIZE BIT(3) +#define NBL_XDP_FLAG_MULTICAST BIT(4) + +struct nbl_xdp_output { + u64 bytes; + u16 desc_done_num; + u16 flags; +}; +DECLARE_STATIC_KEY_FALSE(nbl_xdp_locking_key); static inline u16 nbl_unused_rx_desc_count(struct nbl_res_rx_ring *ring) { u16 ntc = ring->next_to_clean; @@ -229,4 +291,47 @@ static inline u16 nbl_unused_tx_desc_count(struct nbl_res_tx_ring *ring) return ((ntc > ntu) ? 0 : ring->desc_num) + ntc - ntu - 1; } +static inline bool nbl_ktls_device_offload(struct sk_buff *skb) +{ + return tls_is_skb_tx_device_offloaded(skb); +} + +static inline void nbl_ktls_bigint_decrement(u8 *data, int len) +{ + int i; + + for (i = len - 1; i >= 0; i--) { + if (data[i] == 0) { + data[i] = 0xFF; + } else { + --data[i]; + break; + } + } +} + +static inline +struct nbl_res_tx_ring *nbl_res_txrx_select_xdp_ring(struct nbl_txrx_mgt *txrx_mgt) +{ + int ring_idx; + int cpu_id = smp_processor_id(); + struct nbl_res_tx_ring *xdp_ring; + + if (!txrx_mgt->xdp_ring_num) + return NULL; + + if (static_key_enabled(&nbl_xdp_locking_key)) + ring_idx = cpu_id % txrx_mgt->xdp_ring_num; + else + ring_idx = cpu_id; + + xdp_ring = txrx_mgt->tx_rings[ring_idx + txrx_mgt->xdp_ring_offset]; + return xdp_ring; +} + +static inline bool nbl_res_txrx_is_xdp_ring(struct nbl_res_tx_ring *ring) +{ + return READ_ONCE(ring->xdp_prog) ? true : false; +} + #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.c index 1c9caf981576a2849ff7cd53d9c8f92e60cc0039..182a4a920b0b28a98d6671d3ba71f2b3205e3409 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.c @@ -23,9 +23,18 @@ static int nbl_res_set_promisc_mode(void *priv, u16 vsi_id, u16 mode) static int nbl_res_set_spoof_check_addr(void *priv, u16 vsi_id, u8 *mac) { + u16 func_id; struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + /* if pf has cfg vf-mac, and the vf has active. it can change spoof mac. */ + if (!is_zero_ether_addr(vsi_info->mac_info[func_id].mac) && + nbl_res_check_func_active_by_queue(res_mgt, func_id)) { + return 0; + } + return phy_ops->set_spoof_check_addr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, mac); } @@ -43,20 +52,24 @@ static int nbl_res_set_vf_spoof_check(void *priv, u16 vsi_id, int vfid, u8 enabl static u16 nbl_res_get_vf_function_id(void *priv, u16 vsi_id, int vfid) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - struct nbl_sriov_info *sriov_info; u16 vf_vsi; int pfid = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); - sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[pfid]; - - if (vfid >= sriov_info->active_vf_num) - return U16_MAX; - vf_vsi = vfid == -1 ? vsi_id : nbl_res_pfvfid_to_vsi_id(res_mgt, pfid, vfid, NBL_VSI_DATA); return nbl_res_vsi_id_to_func_id(res_mgt, vf_vsi); } +static u16 nbl_res_get_vf_vsi_id(void *priv, u16 vsi_id, int vfid) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 vf_vsi; + int pfid = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + + vf_vsi = vfid == -1 ? vsi_id : nbl_res_pfvfid_to_vsi_id(res_mgt, pfid, vfid, NBL_VSI_DATA); + return vf_vsi; +} + static int nbl_res_vsi_init_chip_module(void *priv) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; @@ -103,13 +116,88 @@ static void nbl_res_get_phy_caps(void *priv, u8 eth_id, struct nbl_phy_caps *phy phy_caps->pause_param = 0x3; } -static void nbl_res_get_phy_state(void *priv, u8 eth_id, struct nbl_phy_state *phy_state) +static void nbl_res_register_func_mac(void *priv, u8 *mac, u16 func_id) { - /*TODO need to get it through adminq*/ - phy_state->current_speed = SPEED_10000; - phy_state->fec_mode = ETHTOOL_FEC_OFF; - phy_state->fc.tx_pause = 1; - phy_state->fc.rx_pause = 1; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + + if (func_id >= NBL_MAX_FUNC) + return; + + ether_addr_copy(vsi_info->mac_info[func_id].mac, mac); +} + +static int nbl_res_register_func_link_forced(void *priv, u16 func_id, u8 link_forced, + bool *should_notify) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + + if (func_id >= NBL_MAX_FUNC) + return -EINVAL; + + resource_info->link_forced_info[func_id] = link_forced; + *should_notify = test_bit(func_id, resource_info->func_bitmap); + + return 0; +} + +static int nbl_res_get_link_forced(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + if (func_id >= NBL_MAX_FUNC) + return -EINVAL; + + return resource_info->link_forced_info[func_id]; +} + +static int nbl_res_register_func_trust(void *priv, u16 func_id, + bool trusted, bool *should_notify) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + + if (func_id >= NBL_MAX_FUNC) + return -EINVAL; + + vsi_info->mac_info[func_id].trusted = trusted; + *should_notify = test_bit(func_id, resource_info->func_bitmap); + + return 0; +} + +static int nbl_res_register_func_vlan(void *priv, u16 func_id, + u16 vlan_tci, u16 vlan_proto, bool *should_notify) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + + if (func_id >= NBL_MAX_FUNC) + return -EINVAL; + + vsi_info->mac_info[func_id].vlan_proto = vlan_proto; + vsi_info->mac_info[func_id].vlan_tci = vlan_tci; + *should_notify = test_bit(func_id, resource_info->func_bitmap); + + return 0; +} + +static int nbl_res_register_rate(void *priv, u16 func_id, int rate) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + + if (func_id >= NBL_MAX_FUNC) + return -EINVAL; + + vsi_info->mac_info[func_id].rate = rate; + + return 0; } /* NBL_vsi_SET_OPS(ops_name, func) @@ -124,8 +212,14 @@ do { \ NBL_VSI_SET_OPS(set_spoof_check_addr, nbl_res_set_spoof_check_addr); \ NBL_VSI_SET_OPS(set_vf_spoof_check, nbl_res_set_vf_spoof_check); \ NBL_VSI_SET_OPS(get_phy_caps, nbl_res_get_phy_caps); \ - NBL_VSI_SET_OPS(get_phy_state, nbl_res_get_phy_state); \ NBL_VSI_SET_OPS(get_vf_function_id, nbl_res_get_vf_function_id); \ + NBL_VSI_SET_OPS(get_vf_vsi_id, nbl_res_get_vf_vsi_id); \ + NBL_VSI_SET_OPS(register_func_mac, nbl_res_register_func_mac); \ + NBL_VSI_SET_OPS(register_func_link_forced, nbl_res_register_func_link_forced); \ + NBL_VSI_SET_OPS(register_func_vlan, nbl_res_register_func_vlan); \ + NBL_VSI_SET_OPS(get_link_forced, nbl_res_get_link_forced); \ + NBL_VSI_SET_OPS(register_func_rate, nbl_res_register_rate); \ + NBL_VSI_SET_OPS(register_func_trust, nbl_res_register_func_trust); \ } while (0) /* Structure starts here, adding an op should not modify anything below */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_channel.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_channel.h index f4413ea383159f397290bed4cc0393bb8eb2a702..3fc943af5ed7caabb90721b881cf92cc7820c5fd 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_channel.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_channel.h @@ -227,10 +227,76 @@ enum nbl_chan_msg_type { NBL_CHAN_MSG_SET_BRIDGE_MODE, NBL_CHAN_MSG_GET_VF_FUNCTION_ID, - NBL_CHAN_MSG_SET_VF_LINK_STATE, + NBL_CHAN_MSG_NOTIFY_LINK_FORCED, NBL_CHAN_MSG_SET_PMD_DEBUG, + NBL_CHAN_MSG_REGISTER_FUNC_MAC, + NBL_CHAN_MSG_SET_TX_RATE, + + NBL_CHAN_MSG_REGISTER_FUNC_LINK_FORCED, + NBL_CHAN_MSG_GET_LINK_FORCED, + + NBL_CHAN_MSG_REGISTER_FUNC_VLAN, + + NBL_CHAN_MSG_GET_FD_FLOW, + NBL_CHAN_MSG_GET_FD_FLOW_CNT, + NBL_CHAN_MSG_GET_FD_FLOW_ALL, + NBL_CHAN_MSG_GET_FD_FLOW_MAX, + NBL_CHAN_MSG_REPLACE_FD_FLOW, + NBL_CHAN_MSG_REMOVE_FD_FLOW, + NBL_CHAN_MSG_CFG_FD_FLOW_STATE, + + NBL_CHAN_MSG_REGISTER_FUNC_RATE, + NBL_CHAN_MSG_NOTIFY_VLAN, + NBL_CHAN_MSG_GET_XDP_QUEUE_INFO, + + NBL_CHAN_MSG_STOP_ABNORMAL_SW_QUEUE, + NBL_CHAN_MSG_STOP_ABNORMAL_HW_QUEUE, + NBL_CHAN_MSG_NOTIFY_RESET_EVENT, + NBL_CHAN_MSG_ACK_RESET_EVENT, + NBL_CHAN_MSG_GET_VF_VSI_ID, + + NBL_CHAN_MSG_CONFIGURE_QOS, + NBL_CHAN_MSG_GET_PFC_BUFFER_SIZE, + NBL_CHAN_MSG_SET_PFC_BUFFER_SIZE, + NBL_CHAN_MSG_GET_VF_STATS, + NBL_CHAN_MSG_REGISTER_FUNC_TRUST, + NBL_CHAN_MSG_NOTIFY_TRUST, + NBL_CHAN_CHECK_VF_IS_ACTIVE, + NBL_CHAN_MSG_GET_ETH_ABNORMAL_STATS, + NBL_CHAN_MSG_GET_ETH_CTRL_STATS, + NBL_CHAN_MSG_GET_PAUSE_STATS, + NBL_CHAN_MSG_GET_ETH_MAC_STATS, + NBL_CHAN_MSG_GET_FEC_STATS, + NBL_CHAN_MSG_CFG_MULTI_MCAST_RULE, + NBL_CHAN_MSG_GET_LINK_DOWN_COUNT, + NBL_CHAN_MSG_GET_LINK_STATUS_OPCODE, + NBL_CHAN_MSG_GET_RMON_STATS, + NBL_CHAN_MSG_REGISTER_PF_NAME, + NBL_CHAN_MSG_GET_PF_NAME, + NBL_CHAN_MSG_CONFIGURE_RDMA_BW, + NBL_CHAN_MSG_SET_RATE_LIMIT, + NBL_CHAN_MSG_SET_TC_WGT, + NBL_CHAN_MSG_REMOVE_QUEUE, + NBL_CHAN_MSG_GET_MIRROR_TABLE_ID, + NBL_CHAN_MSG_CONFIGURE_MIRROR, + NBL_CHAN_MSG_CONFIGURE_MIRROR_TABLE, + NBL_CHAN_MSG_CLEAR_MIRROR_CFG, + NBL_CHAN_MSG_MIRROR_OUTPUTPORT_NOTIFY, + NBL_CHAN_MSG_CHECK_FLOWTABLE_SPEC, + NBL_CHAN_CHECK_VF_IS_VDPA, + NBL_CHAN_MSG_GET_VDPA_VF_STATS, + NBL_CHAN_MSG_SET_RX_RATE, + NBL_CHAN_GET_UVN_PKT_DROP_STATS, + NBL_CHAN_GET_USTORE_PKT_DROP_STATS, + NBL_CHAN_GET_USTORE_TOTAL_PKT_DROP_STATS, + NBL_CHAN_MSG_SET_WOL, + + NBL_CHAN_MSG_MTU_SET = 501, + NBL_CHAN_MSG_SET_RXFH_INDIR = 506, + NBL_CHAN_MSG_SET_RXFH_RSS_ALG_SEL = 508, + /* mailbox msg end */ NBL_CHAN_MSG_MAILBOX_MAX, @@ -239,14 +305,18 @@ enum nbl_chan_msg_type { NBL_CHAN_MSG_ADMINQ_GET_NVM_VERSION = 0x8102, NBL_CHAN_MSG_ADMINQ_REBOOT = 0x8104, NBL_CHAN_MSG_ADMINQ_FLR_NOTIFY = 0x8105, + NBL_CHAN_MSG_ADMINQ_NOTITY_FW_RESET = 0x8106, NBL_CHAN_MSG_ADMINQ_LOAD_P4 = 0x8107, NBL_CHAN_MSG_ADMINQ_LOAD_P4_DEFAULT = 0x8108, + NBL_CHAN_MSG_ADMINQ_EXT_ALERT = 0x8109, NBL_CHAN_MSG_ADMINQ_FLASH_ERASE = 0x8201, NBL_CHAN_MSG_ADMINQ_FLASH_READ = 0x8202, NBL_CHAN_MSG_ADMINQ_FLASH_WRITE = 0x8203, NBL_CHAN_MSG_ADMINQ_FLASH_ACTIVATE = 0x8204, NBL_CHAN_MSG_ADMINQ_RESOURCE_WRITE = 0x8205, NBL_CHAN_MSG_ADMINQ_RESOURCE_READ = 0x8206, + NBL_CHAN_MSG_ADMINQ_REGISTER_WRITE = 0x8207, + NBL_CHAN_MSG_ADMINQ_REGISTER_READ = 0x8208, NBL_CHAN_MSG_ADMINQ_GET_NVM_BANK_INDEX = 0x820B, NBL_CHAN_MSG_ADMINQ_VERIFY_NVM_BANK = 0x820C, NBL_CHAN_MSG_ADMINQ_FLASH_LOCK = 0x820D, @@ -255,8 +325,8 @@ enum nbl_chan_msg_type { NBL_CHAN_MSG_ADMINQ_PORT_NOTIFY = 0x8301, NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM = 0x8302, NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS = 0x8303, + NBL_CHAN_MSG_ADMINQ_GET_FEC_STATS = 0x8305, /* TODO: new kernel and ethtool support show fec stats */ - NBL_CHAN_MSG_ADMINQ_GET_FEC_STATS = 0x408, NBL_CHAN_MSG_ADMINQ_EMP_CONSOLE_WRITE = 0x8F01, NBL_CHAN_MSG_ADMINQ_EMP_CONSOLE_READ = 0x8F02, @@ -273,6 +343,7 @@ struct nbl_chan_vsi_qid_info { enum nbl_chan_state { NBL_CHAN_INTERRUPT_READY, NBL_CHAN_RESETTING, + NBL_CHAN_ABNORMAL, NBL_CHAN_STATE_NBITS }; @@ -288,6 +359,11 @@ struct nbl_chan_param_del_macvlan { u16 vsi; }; +struct nbl_chan_param_cfg_multi_mcast { + u16 vsi; + u16 enable; +}; + struct nbl_chan_param_register_net_info { u16 pf_bdf; u64 vf_bar_start; @@ -296,6 +372,7 @@ struct nbl_chan_param_register_net_info { u16 offset; u16 stride; u64 pf_bar_start; + u16 is_vdpa; }; struct nbl_chan_param_alloc_txrx_queues { @@ -323,6 +400,7 @@ struct nbl_chan_param_cfg_dsch { struct nbl_chan_param_setup_cqs { u16 vsi_id; u16 real_qps; + bool rss_indir_set; }; struct nbl_chan_param_set_promisc_mode { @@ -355,6 +433,7 @@ struct nbl_chan_param_get_eth_id { u16 vsi_id; u8 eth_mode; u8 eth_id; + u8 logic_eth_id; }; struct nbl_chan_param_get_queue_info { @@ -395,6 +474,11 @@ struct nbl_chan_param_get_rxfh_indir { u32 rxfh_indir_size; }; +struct nbl_chan_param_set_rxfh_rss_alg_sel { + u16 vsi_id; + u8 rss_alg_sel; +}; + struct nbl_chan_result_get_real_bdf { u8 bus; u8 dev; @@ -438,7 +522,7 @@ struct nbl_chan_resource_write_param { u32 resid; u32 offset; u32 len; - u8 data[]; + u8 data[0]; }; struct nbl_chan_resource_read_param { @@ -447,6 +531,15 @@ struct nbl_chan_resource_read_param { u32 len; }; +struct nbl_chan_adminq_reg_read_param { + u32 reg; +}; + +struct nbl_chan_adminq_reg_write_param { + u32 reg; + u32 value; +}; + struct nbl_chan_param_flash_write { u32 bank_id; u32 offset; @@ -463,7 +556,7 @@ struct nbl_chan_param_load_p4 { u32 section_offset; u32 load_start; u32 load_end; - u8 data[]; + u8 data[0]; }; struct nbl_chan_result_flash_activate { @@ -487,7 +580,8 @@ struct nbl_chan_param_module_eeprom_info { u8 page; u8 bank; u32 write:1; - u32 rsvd:31; + u32 version:2; + u32 rsvd:29; u16 offset; u16 length; #define NBL_MODULE_EEPRO_WRITE_MAX_LEN (4) @@ -499,11 +593,193 @@ struct nbl_chan_param_eth_rep_notify_link_state { u8 link_state; }; +struct nbl_chan_param_set_rxfh_indir { + u16 vsi_id; + u32 indir_size; +#define NBL_RXFH_INDIR_MAX_SIZE (512) + u32 indir[NBL_RXFH_INDIR_MAX_SIZE]; +}; + +struct nbl_chan_cfg_ktls_keymat { + u32 index; + u8 mode; +#define NBL_CHAN_SALT_LEN 4 +#define NBL_CHAN_KEY_LEN 32 + u8 salt[NBL_CHAN_SALT_LEN]; + u8 key[NBL_CHAN_KEY_LEN]; + u8 key_len; +}; + +struct nbl_chan_cfg_ktls_record { + bool init; + u32 index; + u32 tcp_sn; + u64 rec_num; +}; + +struct nbl_chan_cfg_ktls_flow { + u32 index; + u32 vsi; +#define NBL_CHAN_KTLS_FLOW_LEN 12 + u32 data[NBL_CHAN_KTLS_FLOW_LEN]; +}; + +struct nbl_chan_ipsec_index { + int index; + struct nbl_ipsec_cfg_info cfg_info; +}; + +struct nbl_chan_cfg_ipsec_sad { + u32 index; + struct nbl_ipsec_sa_entry sa_entry; +}; + +struct nbl_chan_cfg_ipsec_flow { + u32 index; + u32 vsi; +#define NBL_CHAN_IPSEC_FLOW_LEN 12 + u32 data[NBL_CHAN_IPSEC_FLOW_LEN]; +}; + +/* for PMD driver */ +struct nbl_chan_param_get_rep_vsi_id { + u16 pf_id; + u16 vf_id; +}; + +struct nbl_chan_param_register_net_rep { + u16 pf_id; + u16 vf_id; +}; + struct nbl_chan_param_set_eth_mac_addr { u8 mac[ETH_ALEN]; u8 eth_id; }; +struct nbl_chan_cmdq_init_info { + u64 pa; + u32 len; + u16 vsi_id; + u16 bdf_num; +}; + +struct nbl_chan_rep_cfg_info { + u16 vsi_id; + u8 inner_type; + u8 outer_type; + u8 rep_type; +}; + +struct nbl_flow_prf_data { + u16 pp_id; + u16 prf_id; +}; + +struct nbl_flow_prf_upcall_info { + u32 item_cnt; +#define NBL_MAX_PP_NUM 64 + struct nbl_flow_prf_data prf_data[NBL_MAX_PP_NUM]; +}; + +struct nbl_acl_cfg_param { + u32 acl_enable:1; + u32 acl_key_width:9; + u32 acl_key_cap:16; + u32 acl_tcam_idx:4; + u32 acl_stage:1; + u32 loop_en:1; +#define NBL_ACL_TCAM_CFG_NUM 4 +#define NBL_ACL_AD_CFG_NUM 4 + u32 tcam_cfg[NBL_ACL_TCAM_CFG_NUM]; + u32 action_cfg[NBL_ACL_AD_CFG_NUM]; +}; + +struct nbl_chan_flow_init_info { + u8 acl_switch; + u16 vsi_id; + u16 acl_loop_en; +#define NBL_ACL_CFG_CNT 2 + struct nbl_acl_cfg_param acl_cfg[NBL_ACL_CFG_CNT]; + struct nbl_flow_prf_upcall_info flow_cfg; +}; + +#pragma pack(1) + +struct nbl_chan_regs_info { + union { + u16 depth; + struct { + u16 ram_id:5; + u16 s_depth:11; + }; + }; + u16 data_len:6; /* align to u32 */ + u16 tbl_name:7; + u16 mode:3; + u32 data[]; +}; + +struct nbl_chan_bulk_regs_info { + u32 item_cnt:9; + u32 rsv:7; + u32 data_len:16; /* align to u32 */ + u32 data[]; +}; + +#pragma pack() + +struct nbl_chan_param_get_queue_cxt { + u16 vsi_id; + u16 local_queue; +}; + +struct nbl_chan_param_cfg_log { + u16 vsi_id; + u16 qps; + bool vld; +}; + +struct nbl_chan_vdpaq_init_info { + u64 pa; + u32 size; +}; + +struct nbl_chan_param_cfg_lag_hash_algorithm { + u16 eth_id; + u16 lag_id; + enum netdev_lag_hash hash_type; +}; + +struct nbl_chan_param_cfg_lag_member_fwd { + u16 eth_id; + u16 lag_id; + u8 fwd; +}; + +struct nbl_chan_param_cfg_lag_member_up_attr { + u16 eth_id; + u16 lag_id; + bool enable; +}; + +struct nbl_chan_param_cfg_lag_mcc { + u16 eth_id; + u16 lag_id; + bool enable; +}; + +struct nbl_chan_param_cfg_bond_shaping { + u8 eth_id; + bool enable; +}; + +struct nbl_chan_param_cfg_bgid_back_pressure { + u8 main_eth_id; + u8 other_eth_id; + bool enable; +}; + struct nbl_chan_param_ctrl_port_led { u32 eth_id; enum nbl_led_reg_ctrl led_status; @@ -520,9 +796,14 @@ struct nbl_chan_param_get_private_stat_data { u32 data_len; }; -struct nbl_chan_param_get_module_tempetature { - u8 eth_id; - enum nbl_module_temp_type type; +struct nbl_chan_param_get_hwmon { + u32 senser_id; + enum nbl_hwmon_type type; +}; + +struct nbl_chan_param_nd_upcall { + u16 vsi_id; + bool for_pmd; }; struct nbl_chan_param_restore_queue { @@ -542,33 +823,515 @@ struct nbl_chan_param_restore_hw_queue { int type; }; +struct nbl_chan_param_stop_abnormal_sw_queue { + u16 local_queue_id; + int type; +}; + +struct nbl_chan_param_stop_abnormal_hw_queue { + u16 vsi_id; + u16 local_queue_id; + int type; +}; + struct nbl_chan_param_get_vf_func_id { u16 vsi_id; int vf_id; }; +struct nbl_chan_param_get_vf_vsi_id { + u16 vsi_id; + int vf_id; +}; + +struct nbl_chan_param_register_func_mac { + u16 func_id; + u8 mac[ETH_ALEN]; +}; + +struct nbl_chan_param_register_trust { + u16 func_id; + bool trusted; +}; + +struct nbl_chan_param_register_vlan { + u16 func_id; + u16 vlan_tci; + u16 vlan_proto; +}; + +struct nbl_chan_param_set_tx_rate { + u16 func_id; + int tx_rate; +}; + +struct nbl_chan_param_set_txrx_rate { + u16 func_id; + int txrx_rate; + int burst; +}; + +struct nbl_chan_param_register_func_link_forced { + u16 func_id; + u8 link_forced; + bool should_notify; +}; + struct nbl_chan_param_notify_link_state { u8 link_state; u32 link_speed; }; +struct nbl_chan_param_set_mtu { + u16 vsi_id; + u16 mtu; +}; + +struct nbl_chan_param_get_uvn_pkt_drop_stats { + u16 vsi_id; + u16 num_queues; +}; + +struct nbl_register_net_param { + u16 pf_bdf; + u64 vf_bar_start; + u64 vf_bar_size; + u16 total_vfs; + u16 offset; + u16 stride; + u64 pf_bar_start; + u16 is_vdpa; +}; + +struct nbl_register_net_result { + u16 tx_queue_num; + u16 rx_queue_num; + u16 queue_size; + u16 rdma_enable; + + u64 hw_features; + u64 features; + + u16 max_mtu; + u16 queue_offset; + + u8 mac[ETH_ALEN]; + u16 vlan_proto; + u16 vlan_tci; + u32 rate; + bool trusted; + + u64 vlan_features; + u64 hw_enc_features; +}; + +#define NBL_CHAN_FDIR_FLOW_RULE_SIZE 1024 +enum nbl_chan_fdir_flow_type { + NBL_CHAN_FDIR_FLOW_FULL, /* for DPDK isolate flow */ + NBL_CHAN_FDIR_FLOW_ETHER, + NBL_CHAN_FDIR_FLOW_IPv4, + NBL_CHAN_FDIR_FLOW_IPv6, + NBL_CHAN_FDIR_FLOW_TCP_IPv4, + NBL_CHAN_FDIR_FLOW_TCP_IPv6, + NBL_CHAN_FDIR_FLOW_UDP_IPv4, + NBL_CHAN_FDIR_FLOW_UDP_IPv6, + NBL_CHAN_FDIR_FLOW_MAX_TYPE, +}; + +enum nbl_chan_fdir_rule_type { + NBL_CHAN_FDIR_RULE_NORMAL, + NBL_CHAN_FDIR_RULE_ISOLATE, + NBL_CHAN_FDIR_RULE_MAX, +}; + +enum nbl_chan_fdir_component_type { + NBL_CHAN_FDIR_KEY_SRC_MAC, + NBL_CHAN_FDIR_KEY_DST_MAC, + NBL_CHAN_FDIR_KEY_PROTO, + NBL_CHAN_FDIR_KEY_SRC_IPv4, + NBL_CHAN_FDIR_KEY_DST_IPv4, + NBL_CHAN_FDIR_KEY_L4PROTO, + NBL_CHAN_FDIR_KEY_SRC_IPv6, + NBL_CHAN_FDIR_KEY_DST_IPv6, + NBL_CHAN_FDIR_KEY_SPORT, + NBL_CHAN_FDIR_KEY_DPORT, + NBL_CHAN_FDIR_KEY_UDF, + NBL_CHAN_FDIR_ACTION_QUEUE, + NBL_CHAN_FDIR_ACTION_VSI +}; + +enum { + NBL_FD_STATE_OFF = 0, + NBL_FD_STATE_ON, + NBL_FD_STATE_FLUSH, + NBL_FD_STATE_MAX, +}; + +struct nbl_chan_param_fdir_replace { + enum nbl_chan_fdir_flow_type flow_type; + enum nbl_chan_fdir_rule_type rule_type; + u32 base_length; + u32 vsi; + u32 location; + u16 vf; + u16 ring; + u16 dport; + u16 global_queue_id; + bool order; + u32 tlv_length; + u8 tlv[]; +}; + +#define NBL_CHAN_FDIR_FLOW_TLV_SIZE (1024 - sizeof(struct nbl_chan_param_fdir_replace)) +#define NBL_CHAN_FDIR_TLV_HEADER_LEN 4 + +struct nbl_chan_param_fdir_del { + enum nbl_chan_fdir_rule_type rule_type; + u32 location; + u16 vsi; +}; + +struct nbl_chan_param_fdir_flowcnt { + enum nbl_chan_fdir_rule_type rule_type; + u16 vsi; +}; + +struct nbl_chan_param_get_fd_flow { + u32 location; + enum nbl_chan_fdir_rule_type rule_type; + u16 vsi_id; +}; + +#define NBL_CHAN_GET_FD_LOCS_MAX 512 +struct nbl_chan_param_get_fd_flow_all { + enum nbl_chan_fdir_rule_type rule_type; + u16 start; + u16 num; + u16 vsi_id; +}; + +struct nbl_chan_result_get_fd_flow_all { + u32 rule_locs[NBL_CHAN_GET_FD_LOCS_MAX]; +}; + +struct nbl_chan_param_config_fd_flow_state { + enum nbl_chan_fdir_rule_type rule_type; + u16 vsi_id; + u16 state; +}; + +struct nbl_lag_mem_list_info { + u16 vsi_id; + u8 eth_id; + bool active; +}; + +struct nbl_lag_member_list_param { + struct net_device *bond_netdev; + u16 lag_num; + u16 lag_id; + /* port_list only contains ports that are active */ + u8 port_list[NBL_LAG_MAX_PORTS]; + /* member_list always contains all registered member */ + struct nbl_lag_mem_list_info member_list[NBL_LAG_MAX_PORTS]; + bool duppkt_enable; +}; + +struct nbl_queue_err_stats { + u16 dvn_pkt_drop_cnt; + u32 uvn_stat_pkt_drop; +}; + +struct nbl_eth_mac_stats { + u64 frames_txd_ok; + u64 frames_rxd_ok; + u64 octets_txd_ok; + u64 octets_rxd_ok; + u64 multicast_frames_txd_ok; + u64 broadcast_frames_txd_ok; + u64 multicast_frames_rxd_ok; + u64 broadcast_frames_rxd_ok; +}; + +enum rmon_range { + ETHER_STATS_PKTS_64_OCTETS, + ETHER_STATS_PKTS_65_TO_127_OCTETS, + ETHER_STATS_PKTS_128_TO_255_OCTETS, + ETHER_STATS_PKTS_256_TO_511_OCTETS, + ETHER_STATS_PKTS_512_TO_1023_OCTETS, + ETHER_STATS_PKTS_1024_TO_1518_OCTETS, + ETHER_STATS_PKTS_1519_TO_2047_OCTETS, + ETHER_STATS_PKTS_2048_TO_MAX_OCTETS, + ETHER_STATS_PKTS_MAX, +}; + +struct nbl_rmon_stats { + u64 undersize_frames_rxd_goodfcs; + u64 oversize_frames_rxd_goodfcs; + u64 undersize_frames_rxd_badfcs; + u64 oversize_frames_rxd_badfcs; + + u64 rmon_rx_range[ETHER_STATS_PKTS_MAX]; + u64 rmon_tx_range[ETHER_STATS_PKTS_MAX]; +}; + +struct nbl_rdma_register_param { + bool has_rdma; + u32 mem_type; + int intr_num; + int id; +}; + +struct nbl_phy_caps { + u32 speed; /* enum nbl_eth_speed */ + u32 fec_ability; + u32 pause_param; /* bit0 tx, bit1 rx */ +}; + +struct nbl_fc_info { + u32 rx_pause; + u32 tx_pause; +}; + +/* for pmd driver */ +struct nbl_register_net_rep_result { + u16 vsi_id; + u16 func_id; +}; + +/* emp to ctrl dev notify */ +struct nbl_port_notify { + u32 id; + u32 speed; /* in 10 Mbps units */ + u8 link_state:1; /* 0:down, 1:up */ + u8 module_inplace:1; /* 0: not inplace, 1:inplace */ + u8 revd0:6; + u8 flow_ctrl; /* enum nbl_flow_ctrl */ + u8 fec; /* enum nbl_port_fec */ + u8 active_lanes; + u8 rsvd1[4]; + u64 advertising; /* enum nbl_port_cap */ + u64 lp_advertising; /* enum nbl_port_cap */ +}; + +#define NBL_EMP_LOG_MAX_SIZE (256) +struct nbl_emp_alert_log_event { + u64 uptime; + u8 level; + u8 data[256]; +}; + +#define NBL_EMP_ALERT_DATA_MAX_SIZE (4032) +struct nbl_chan_param_emp_alert_event { + u16 type; + u16 len; + u8 data[NBL_EMP_ALERT_DATA_MAX_SIZE]; +}; + +struct nbl_fec_stats { + u32 corrected_blocks; + u32 uncorrectable_blocks; + u32 corrected_bits; + u32 corrected_lane[4]; + u32 uncorrectable_lane[4]; + u32 corrected_bits_lane[4]; +}; + +struct nbl_port_state { + u64 port_caps; + u64 port_advertising; + u64 port_lp_advertising; + u32 link_speed; + u8 active_fc; + u8 active_fec; /* enum nbl_port_fec */ + u8 link_state; + u8 module_inplace; + u8 port_type; /* enum nbl_port_type */ + u8 port_max_rate; /* enum nbl_port_max_rate */ + u8 fw_port_max_speed; /* enum nbl_fw_port_speed */ + u8 module_repluged; +}; + +struct nbl_eth_ctrl_stats { + u64 macctrl_frames_txd_ok; + u64 macctrl_frames_rxd; + u64 unsupported_opcodes_rx; +}; + +struct nbl_pause_stats { + u64 rx_pause_frames; + u64 tx_pause_frames; +}; +struct nbl_port_advertising { + u8 eth_id; + u64 speed_advert; + u8 active_fc; + u8 active_fec; /* enum nbl_port_fec */ + u8 autoneg; +}; + +struct nbl_eth_link_info { + u8 link_status; + u32 link_speed; +}; + +struct nbl_board_port_info { + u8 eth_num; + u8 eth_speed; + u8 p4_version; + u8 rsv[5]; +}; + +struct nbl_bond_port_info { + u16 vsi_id; + u8 eth_id; + u8 is_active; +}; + +struct nbl_bond_info { + struct nbl_bond_port_info port[NBL_LAG_MAX_PORTS]; + u8 lag_id; + u8 mem_num; +}; + +struct nbl_bond_param { + struct nbl_bond_info info[NBL_LAG_MAX_NUM]; + u8 lag_num; +}; + +/* to support channel req and response use different driver version, + * to define the struct to same with struct ethtool_coalesce + */ +struct nbl_chan_param_get_coalesce { + u32 cmd; + u32 rx_coalesce_usecs; + u32 rx_max_coalesced_frames; + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + u32 tx_coalesce_usecs; + u32 tx_max_coalesced_frames; + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + u32 stats_block_coalesce_usecs; + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + u32 rate_sample_interval; +}; + +enum nbl_fw_reset_type { + NBL_FW_HIGH_TEMP_RESET, + NBL_FW_RESET_TYPE_MAX, +}; + +struct nbl_chan_param_notify_fw_reset_info { + u16 type; /* enum nbl_fw_reset_type */ + u16 len; + u16 data[0]; +}; + +struct nbl_chan_param_configure_rdma_bw { + u8 eth_id; + int rdma_bw; +}; + +struct nbl_chan_param_configure_qos { + u8 eth_id; + u8 trust; + u8 pfc[NBL_MAX_PFC_PRIORITIES]; + u8 dscp2prio_map[NBL_DSCP_MAX]; +}; + +struct nbl_chan_param_set_pfc_buffer_size { + u8 eth_id; + u8 prio; + int xoff; + int xon; +}; + +struct nbl_chan_param_get_pfc_buffer_size { + u8 eth_id; + u8 prio; +}; + +struct nbl_chan_param_get_pfc_buffer_size_resp { + int xoff; + int xon; +}; + +struct nbl_chan_param_set_rate_limit { + enum nbl_traffic_type type; + u32 rate; +}; + +struct nbl_chan_param_pf_name { + u16 vsi_id; + char dev_name[IFNAMSIZ]; +}; + +struct nbl_chan_param_set_tc_wgt { + u16 vsi_id; + u8 num_tc; + u8 weight[NBL_MAX_TC_NUM]; +}; + +struct nbl_chan_param_get_mirror_table_id { + u16 vsi_id; + int dir; + bool mirror_en; + u8 mt_id; +}; + +struct nbl_chan_param_mirror { + int dir; + bool mirror_en; + u8 mt_id; +}; + +struct nbl_chan_param_mirror_table { + bool mirror_en; + u8 mt_id; + u16 func_id; +}; + +struct nbl_chan_param_check_flow_spec { + u16 vlan_list_cnt; + u16 unicast_mac_cnt; + u16 multi_mac_cnt; +}; + +struct nbl_chan_param_set_wol { + u8 eth_id; + bool enable; +}; + struct nbl_chan_send_info { - u16 dstid; - u16 msg_type; void *arg; size_t arg_len; void *resp; size_t resp_len; + u16 dstid; + u16 msg_type; u16 ack; + u16 ack_len; }; struct nbl_chan_ack_info { + void *data; + int err; + u32 data_len; u16 dstid; u16 msg_type; u16 msgid; - int err; - void *data; - u32 data_len; }; enum nbl_channel_type { @@ -588,6 +1351,7 @@ struct nbl_channel_ops { int (*send_msg)(void *priv, struct nbl_chan_send_info *chan_send); int (*send_ack)(void *priv, struct nbl_chan_ack_info *chan_ack); int (*register_msg)(void *priv, u16 msg_type, nbl_chan_resp func, void *callback_priv); + void (*unregister_msg)(void *priv, u16 msg_type); int (*cfg_chan_qinfo_map_table)(void *priv, u8 chan_type); bool (*check_queue_exist)(void *priv, u8 chan_type); int (*setup_queue)(void *priv, u8 chan_type); @@ -595,14 +1359,17 @@ struct nbl_channel_ops { int (*set_listener_msgtype)(void *priv, int msgtype); void (*clear_listener_info)(void *priv); int (*teardown_queue)(void *priv, u8 chan_type); - int (*set_queue_interrupt_state)(void *priv, u8 chan_type, bool ready); void (*clean_queue_subtask)(void *priv, u8 chan_type); int (*dump_txq)(void *priv, struct seq_file *m, u8 type); int (*dump_rxq)(void *priv, struct seq_file *m, u8 type); u32 (*get_adminq_tx_buf_size)(void *priv); + int (*init_cmdq)(struct device *dev, void *priv); + int (*deinit_cmdq)(struct device *dev, void *priv, u8 inst_id); + int (*send_cmd)(void *priv, const void *hdr, void *cmd); int (*setup_keepalive)(void *priv, u16 dest_id, u8 chan_type); void (*remove_keepalive)(void *priv, u8 chan_type); void (*register_chan_task)(void *priv, u8 chan_type, struct work_struct *task); + void (*set_queue_state)(void *priv, enum nbl_chan_state state, u8 chan_type, u8 set); }; struct nbl_channel_ops_tbl { @@ -613,4 +1380,66 @@ struct nbl_channel_ops_tbl { int nbl_chan_init_common(void *p, struct nbl_init_param *param); void nbl_chan_remove_common(void *p); +enum nbl_cmd_opcode_list { + NBL_CMD_OP_WRITE, + NBL_CMD_OP_READ, + NBL_CMD_OP_SEARCH, + NBL_CMD_OP_DELETE, +}; + +enum nbl_flow_opcode_list { + NBL_OPCODE_QUERY, + NBL_OPCODE_ADD, + NBL_OPCODE_UPDATE, + NBL_OPCODE_DELETE, +}; + +/* command header structure */ +struct nbl_cmd_hdr { + u8 block; + u8 module; + u8 table; + u16 opcode; +}; + +struct nbl_cmd_content { + u32 in_length; + u32 out_length; + u64 in_params; + u64 out_params; + u16 entries; + u32 idx; + u64 in; + u64 out; + void *in_va; + void *out_va; + u32 wait; +}; + +#define NBL_CMDQ_MAX_OP_CODE 16 +/* register block, module and table info */ +enum nbl_flow_opcode { + NBL_FEM_KTAT_WRITE, + NBL_FEM_KTAT_READ, + NBL_FEM_KTAT_SEARCH, + NBL_FEM_HT_WRITE, + NBL_FEM_HT_READ, + NBL_ACL_TCAM_WRITE, + NBL_ACL_TCAM_READ, + NBL_ACL_TCAM_QUERY, + NBL_ACL_FLOWID_READ, + NBL_ACL_STATID_READ, +}; + +#define NBL_BLOCK_PPE 0 +#define NBL_BLOCK_DP 1 +#define NBL_BLOCK_IFC 2 +#define NBL_MODULE_FEM 0 +#define NBL_MODULE_ACL 1 +#define NBL_TABLE_FEM_KTAT 0 +#define NBL_TABLE_FEM_HT 1 +#define NBL_TABLE_ACL_TCAM 0 +#define NBL_TABLE_ACL_FLOWID 1 +#define NBL_TABLE_ACL_STATID 2 + #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_common.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_common.h index 12b8fead495ff74fb8033c69bf1ee0b211517114..581482f7708bf74fa1d7eff8ffb8440a52ce5635 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_common.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_common.h @@ -168,6 +168,31 @@ do { \ dev_dbg(NBL_COMMON_TO_DEV(_common), fmt, ##__VA_ARGS__); \ } while (0) +static void __maybe_unused nbl_printk(struct device *dev, int level, const char *format, ...) +{ + struct va_format vaf; + va_list args; + + if (WARN_ONCE(level < LOGLEVEL_EMERG || level > LOGLEVEL_DEBUG, + "Level %d is out of range, set to default level\n", level)) + level = LOGLEVEL_DEFAULT; + + va_start(args, format); + vaf.fmt = format; + vaf.va = &args; + + dev_printk_emit(level, dev, "%s %s: %pV", dev_driver_string(dev), dev_name(dev), + &vaf); + va_end(args); +} + +/* support LOGLEVEL_EMERG/LOGLEVEL_CRIT logvel */ +#define nbl_log(common, level, format, ...) \ +do { \ + typeof(common) _common = (common); \ + nbl_printk(NBL_COMMON_TO_DEV(_common), level, format, ##__VA_ARGS__); \ +} while (0) + #define NBL_COMMON_TO_PDEV(common) ((common)->pdev) #define NBL_COMMON_TO_DEV(common) ((common)->dev) #define NBL_COMMON_TO_DMA_DEV(common) ((common)->dma_dev) @@ -176,19 +201,27 @@ do { \ #define NBL_COMMON_TO_ETH_MODE(common) ((common)->eth_mode) #define NBL_COMMON_TO_DEBUG_LVL(common) ((common)->debug_lvl) #define NBL_COMMON_TO_VF_CAP(common) ((common)->is_vf) +#define NBL_COMMON_TO_OCP_CAP(common) ((common)->is_ocp) #define NBL_COMMON_TO_PCI_USING_DAC(common) ((common)->pci_using_dac) #define NBL_COMMON_TO_MGT_PF(common) ((common)->mgt_pf) #define NBL_COMMON_TO_PCI_FUNC_ID(common) ((common)->function) #define NBL_COMMON_TO_BOARD_ID(common) ((common)->board_id) +#define NBL_COMMON_TO_LOGIC_ETH_ID(common) ((common)->logic_eth_id) +#define NBL_COMMON_TO_ETH_MAX_SPEED(common) ((common)->eth_max_speed) #define NBL_ONE_ETHERNET_PORT (1) #define NBL_TWO_ETHERNET_PORT (2) #define NBL_FOUR_ETHERNET_PORT (4) +#define NBL_DEFAULT_VSI_ID_GAP (1024) #define NBL_TWO_ETHERNET_VSI_ID_GAP (512) #define NBL_FOUR_ETHERNET_VSI_ID_GAP (256) -#define NBL_VSI_ID_GAP(mode) ((mode) == NBL_FOUR_ETHERNET_PORT ? \ - NBL_FOUR_ETHERNET_VSI_ID_GAP : \ - NBL_TWO_ETHERNET_VSI_ID_GAP) + +#define NBL_VSI_ID_GAP(m) \ +({ \ + typeof(m) _m = (m); \ + _m == NBL_FOUR_ETHERNET_PORT ? NBL_FOUR_ETHERNET_VSI_ID_GAP : \ + (_m == NBL_TWO_ETHERNET_PORT ? NBL_TWO_ETHERNET_VSI_ID_GAP : NBL_DEFAULT_VSI_ID_GAP); \ +}) #define NBL_BOOTIS_ECPU_ETH0_FUNCTION (2) #define NBL_BOOTIS_ECPU_ETH1_FUNCTION (3) @@ -200,8 +233,9 @@ do { \ #define NBL_SKB_FILL_VSI_ID_OFF (32) #define NBL_SKB_FILL_EXT_HDR_OFF (34) +#define NBL_INVALID_QUEUE_ID (0xFFFF) -#define NBL_INDEX_SIZE_MAX (64 * 1024) /* index max sise */ +#define NBL_INDEX_SIZE_MAX (512 * 1024) /* index max sise */ #define NBL_INDEX_TBL_KEY_INIT(key, dev_arg, start_index_arg, index_size_arg, key_size_arg) \ do { \ @@ -216,33 +250,41 @@ struct nbl_common_info { struct pci_dev *pdev; struct device *dev; struct device *dma_dev; + struct devlink_port *devlink_port; u32 debug_lvl; u32 msg_enable; u16 vsi_id; u8 eth_id; + u8 logic_eth_id; u8 eth_mode; u8 is_vf; u8 function; u8 devid; u8 bus; + /* only valid for ctrldev */ + u8 hw_bus; u16 mgt_pf; u8 board_id; bool pci_using_dac; u8 tc_inst_id; /* for tc flow and cmdq */ + u8 is_ocp; enum nbl_product_type product_type; + + u32 eth_max_speed; + bool wol_ena; }; -struct nbl_netdev_rep_attr { +struct nbl_netdev_name_attr { struct attribute attr; ssize_t (*show)(struct device *dev, - struct nbl_netdev_rep_attr *attr, char *buf); + struct nbl_netdev_name_attr *attr, char *buf); ssize_t (*store)(struct device *dev, - struct nbl_netdev_rep_attr *attr, const char *buf, size_t len); - int rep_id; + struct nbl_netdev_name_attr *attr, const char *buf, size_t len); + char net_dev_name[IFNAMSIZ]; }; struct nbl_index_tbl_key { @@ -252,6 +294,50 @@ struct nbl_index_tbl_key { u32 key_size; }; +struct nbl_index_key_extra { + u32 index_num; + /* begin_idx % begin_idx_multiple = 0; eg value = 2 (the begin_idx must be even num) */ + u32 begin_idx_multiple; + /* true: not alloc a new node, index_num and multiple value not care in this case */ + bool not_alloc_new_node; +}; + +#define NBL_INDEX_EXTRA_KEY_INIT(key, idx_num_arg, multiple_arg, not_alloc_arg) \ +do { \ + typeof(key) __key = key; \ + __key->index_num = idx_num_arg; \ + __key->begin_idx_multiple = multiple_arg; \ + __key->not_alloc_new_node = not_alloc_arg; \ +} while (0) + +struct nbl_index_tbl_del_key { + void *action_priv; + void (*action_func)(void *priv, int index, void *data); +}; + +#define NBL_INDEX_TBL_DEL_KEY_INIT(key, priv_arg, act_func_arg) \ +do { \ + typeof(key) __key = key; \ + __key->action_priv = priv_arg; \ + __key->action_func = act_func_arg; \ +} while (0) + +struct nbl_index_tbl_scan_key { + bool del; + u8 resv[3]; + void *action_priv; + void (*action_func)(void *priv, int index, void *data); +}; + +#define NBL_INDEX_TBL_SCAN_KEY_INIT(key, del_arg, priv_arg, act_func_arg) \ +do { \ + typeof(key) __key = key; \ + __key->del = del_arg; \ + memset(__key->resv, 0, sizeof(__key->resv)); \ + __key->action_priv = priv_arg; \ + __key->action_func = act_func_arg; \ +} while (0) + struct nbl_hash_tbl_key { struct device *dev; u16 key_size; @@ -403,7 +489,7 @@ do { \ void nbl_convert_mac(u8 *mac, u8 *reverse_mac); void nbl_common_queue_work(struct work_struct *task, bool ctrl_task, bool singlethread); -void nbl_common_queue_work_rdma(struct work_struct *task); +void nbl_common_queue_work_rdma(struct work_struct *task, bool singlethread); void nbl_common_queue_delayed_work(struct delayed_work *task, u32 msec, bool ctrl_task, bool singlethread); void nbl_common_queue_delayed_work_keepalive(struct delayed_work *task, u32 msec); @@ -419,22 +505,35 @@ int nbl_common_create_wq(void); void nbl_debugfs_func_init(void *p, struct nbl_init_param *param); void nbl_debugfs_func_remove(void *p); +int nbl_dma_iommu_change_translate(struct nbl_common_info *common); +void nbl_dma_iommu_exit_translate(struct nbl_common_info *common); bool nbl_dma_iommu_status(struct pci_dev *pdev); -bool nbl_dma_remap_status(struct pci_dev *pdev); -void nbl_net_addr_rep_attr(struct nbl_netdev_rep_attr *rep_attr, int rep_id); +bool nbl_dma_remap_status(struct pci_dev *pdev, u64 *dma_limit); u32 nbl_common_pf_id_subtraction_mgtpf_id(struct nbl_common_info *common, u32 pf_id); void *nbl_common_init_index_table(struct nbl_index_tbl_key *key); -void nbl_common_remove_index_table(void *priv); -int nbl_common_get_index(void *priv, void *key, u32 key_size); -void nbl_common_free_index(void *priv, void *key, u32 key_size); +void nbl_common_remove_index_table(void *priv, struct nbl_index_tbl_del_key *key); +void nbl_common_scan_index_table(void *priv, struct nbl_index_tbl_scan_key *key); +int nbl_common_get_index(void *priv, void *key, struct nbl_index_key_extra *extra_key); +int nbl_common_get_index_with_data(void *priv, void *key, struct nbl_index_key_extra *extra_key, + void *data, u32 data_size, void **output_data); +int nbl_common_alloc_index(void *priv, void *key, struct nbl_index_key_extra *extra_key, + void *data, u32 data_size, void **output_data); +void nbl_common_free_index(void *priv, void *key); +int nbl_common_find_available_idx(unsigned long *addr, u32 size, u32 idx_num, u32 multiple); /* ---- EVENT-NOTIFIER ---- */ enum nbl_event_type { - NBL_EVENT_LAG_UPDATE = 0, + NBL_EVENT_RDMA_BOND_UPDATE = 0, NBL_EVENT_OFFLOAD_STATUS_CHANGED, NBL_EVENT_LINK_STATE_UPDATE, - NBL_EVENT_DEV_MODE_SWITCH, - NBL_EVENT_RDMA_ADEV_UPDATE, + NBL_EVENT_ACL_STATE_UPDATE, + NBL_EVENT_NETDEV_STATE_CHANGE, + NBL_EVENT_RESET_EVENT, + NBL_EVENT_QUEUE_ALLOC, + NBL_EVENT_CHANGE_MTU, + NBL_EVENT_MIRROR_OUTPUTPORT, + NBL_EVENT_MIRROR_OUTPUTPORT_DEVLAYER, /* for dev layer */ + NBL_EVENT_MIRROR_SELECTPORT, NBL_EVENT_MAX, }; @@ -443,11 +542,44 @@ struct nbl_event_callback { void *callback_data; }; -struct nbl_event_dev_mode_switch_data { - int op; - int ret; +enum nbl_rdma_subevent_type { + NBL_SUBEVENT_CREATE_ADEV = 1, + NBL_SUBEVENT_RELEASE_ADEV, + NBL_SUBEVENT_CREATE_BOND_ADEV, + NBL_SUBEVENT_RELEASE_BOND_ADEV, + NBL_SUBEVENT_UPDATE_BOND_MEMBER, + NBL_SUBEVENT_UPDATE_MTU, + NBL_SUBEVENT_MAX, }; +struct nbl_event_param { + enum nbl_rdma_subevent_type subevent; + struct nbl_lag_member_list_param param; + int mtu; +}; + +struct nbl_event_offload_status_data { + u16 pf_vsi_id; + bool status; +}; + +enum nbl_dev_mode_switch_op { + NBL_DEV_KERNEL_TO_USER, + NBL_DEV_USER_TO_KERNEL, +}; + +struct nbl_event_acl_state_update_data { + bool is_offload; +}; + +struct nbl_event_queue_update_data { + u16 func_id; + u16 ring_num; + u16 *map; +}; + +typedef int (*handle_tlv)(u16 type, u16 length, u8 *val, void *data); + void nbl_event_notify(enum nbl_event_type type, void *event_data, u16 src_vsi_id, u16 board_id); int nbl_event_register(enum nbl_event_type type, struct nbl_event_callback *callback, u16 src_vsi_id, u16 board_id); @@ -458,7 +590,7 @@ void nbl_event_remove(void); void *nbl_common_init_hash_table(struct nbl_hash_tbl_key *key); void nbl_common_remove_hash_table(void *priv, struct nbl_hash_tbl_del_key *key); -int nbl_common_alloc_hash_node(void *priv, void *key, void *data); +int nbl_common_alloc_hash_node(void *priv, void *key, void *data, void **out_data); void *nbl_common_get_hash_node(void *priv, void *key); void nbl_common_free_hash_node(void *priv, void *key); void nbl_common_scan_hash_node(void *priv, struct nbl_hash_tbl_scan_key *key); @@ -471,4 +603,6 @@ void *nbl_common_get_hash_xy_node(void *priv, void *x_key, void *y_key); void nbl_common_free_hash_xy_node(void *priv, void *x_key, void *y_key); u16 nbl_common_scan_hash_xy_node(void *priv, struct nbl_hash_xy_tbl_scan_key *key); u16 nbl_common_get_hash_xy_node_num(void *priv); + +void nbl_flow_direct_parse_tlv_data(u8 *tlv, u32 length, handle_tlv callback, void *data); #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dev.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dev.h index 4e42b2b15ef23ae64dedc499a25eb5f29672bde5..ed6717e11bf72c65358f9cc2c87f2accd7168354 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dev.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dev.h @@ -24,10 +24,18 @@ int nbl_dev_init(void *p, struct nbl_init_param *param); void nbl_dev_remove(void *p); int nbl_dev_start(void *p, struct nbl_init_param *param); void nbl_dev_stop(void *p); +int nbl_dev_init_emp_class(void); +void nbl_dev_destroy_emp_class(void); void nbl_dev_user_module_init(void); void nbl_dev_user_module_destroy(void); +int nbl_dev_create_rep(void *p, int num_vfs); +int nbl_dev_destroy_rep(void *p); +int nbl_dev_setup_vf_config(void *p, int num_vfs); +void nbl_dev_remove_vf_config(void *p); +void nbl_dev_register_dev_name(void *p); +void nbl_dev_get_dev_name(void *p, char *dev_name); int nbl_dev_resume(void *p); int nbl_dev_suspend(void *p); #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dispatch.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dispatch.h index 9107cb22b80f7d6dde93399730236dd68fa5a13e..65573ded91bb3f8ca56ee068d857b1e74bc3de14 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dispatch.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dispatch.h @@ -33,25 +33,25 @@ struct nbl_dispatch_ops { int (*enable_adminq_irq)(void *p, u16 vector_id, bool enable_msix); u16 (*get_global_vector)(void *priv, u16 vsi_id, u16 local_vector_id); u16 (*get_msix_entry_id)(void *priv, u16 vsi_id, u16 local_vector_id); - u32 (*get_chip_temperature)(void *priv); - u32 (*get_chip_temperature_max)(void *priv); - u32 (*get_chip_temperature_crit)(void *priv); - int (*get_module_temperature)(void *priv, u8 eth_id, enum nbl_module_temp_type type); + u32 (*get_chip_temperature)(void *priv, enum nbl_hwmon_type type, u32 senser_id); + int (*get_module_temperature)(void *priv, u8 eth_id, enum nbl_hwmon_type type); int (*get_mbx_irq_num)(void *priv); int (*get_adminq_irq_num)(void *priv); int (*get_abnormal_irq_num)(void *priv); - int (*alloc_rings)(void *priv, struct net_device *netdev, u16 tx_num, - u16 rx_num, u16 tx_desc_num, u16 rx_desc_num); + int (*alloc_rings)(void *priv, struct net_device *netdev, struct nbl_ring_param *param); void (*remove_rings)(void *priv); dma_addr_t (*start_tx_ring)(void *priv, u8 ring_index); void (*stop_tx_ring)(void *priv, u8 ring_index); dma_addr_t (*start_rx_ring)(void *priv, u8 ring_index, bool use_napi); void (*stop_rx_ring)(void *priv, u8 ring_index); void (*kick_rx_ring)(void *priv, u16 index); + void (*set_rings_xdp_prog)(void *priv, void *prog); + int (*register_xdp_rxq)(void *priv, u8 ring_index); + void (*unregister_xdp_rxq)(void *priv, u8 ring_index); int (*dump_ring)(void *priv, struct seq_file *m, bool is_tx, int index); int (*dump_ring_stats)(void *priv, struct seq_file *m, bool is_tx, int index); - struct napi_struct *(*get_vector_napi)(void *priv, u16 index); + struct nbl_napi_struct *(*get_vector_napi)(void *priv, u16 index); void (*set_vector_info)(void *priv, u8 *irq_enable_base, u32 irq_data, u16 index, bool mask_en); int (*register_net)(void *priv, struct nbl_register_net_param *register_param, @@ -61,6 +61,7 @@ struct nbl_dispatch_ops { int (*alloc_txrx_queues)(void *priv, u16 vsi_id, u16 queue_num); void (*free_txrx_queues)(void *priv, u16 vsi_id); int (*setup_queue)(void *priv, struct nbl_txrx_queue_param *param, bool is_tx); + int (*remove_queue)(void *priv, struct nbl_txrx_queue_param *param, bool is_tx); void (*remove_all_queues)(void *priv, u16 vsi_id); int (*register_vsi2q)(void *priv, u16 vsi_index, u16 vsi_id, u16 queue_offset, u16 queue_num); @@ -69,13 +70,15 @@ struct nbl_dispatch_ops { int (*setup_rss)(void *priv, u16 vsi_id); void (*remove_rss)(void *priv, u16 vsi_id); int (*cfg_dsch)(void *priv, u16 vsi_id, bool vld); - int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps); + int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps, bool rss_indir_set); void (*remove_cqs)(void *priv, u16 vsi_id); + int (*cfg_qdisc_mqprio)(void *priv, struct nbl_tc_qidsc_param *param); void (*clear_queues)(void *priv, u16 vsi_id); + int (*check_offload_status)(void *priv, bool *is_down); + u16 (*get_vsi_global_qid)(void *priv, u16 vsi_id, u16 local_qid); u16 (*get_local_queue_id)(void *priv, u16 vsi_id, u16 global_queue_id); u16 (*get_vsi_global_queue_id)(void *priv, u16 vsi_id, u16 local_qid); - int (*enable_msix_irq)(void *priv, u16 global_vector_id); u8* (*get_msix_irq_enable_info)(void *priv, u16 global_vector_id, u32 *irq_data); int (*set_spoof_check_addr)(void *priv, u16 vsi_id, u8 *mac); int (*set_vf_spoof_check)(void *priv, u16 vsi_id, int vfid, u8 enable); @@ -89,16 +92,40 @@ struct nbl_dispatch_ops { void (*del_lldp_flow)(void *priv, u16 vsi); int (*add_multi_rule)(void *priv, u16 vsi); void (*del_multi_rule)(void *priv, u16 vsi); + int (*cfg_multi_mcast)(void *priv, u16 vsi, u16 enable); int (*setup_multi_group)(void *priv); void (*remove_multi_group)(void *priv); + void (*clear_accel_flow)(void *priv, u16 vsi_id); void (*clear_flow)(void *priv, u16 vsi_id); void (*dump_flow)(void *priv, struct seq_file *m); u16 (*get_vsi_id)(void *priv, u16 func_id, u16 type); - void (*get_eth_id)(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id); + void (*get_eth_id)(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id, u8 *logic_eth_id); int (*set_promisc_mode)(void *priv, u16 vsi_id, u16 mode); + int (*set_mtu)(void *priv, u16 vsi_id, u16 mtu); + int (*get_max_mtu)(void *priv); u32 (*get_tx_headroom)(void *priv); + void (*get_rep_feature)(void *priv, struct nbl_register_net_result *register_result); + void (*get_rep_queue_info)(void *priv, u16 *queue_num, u16 *queue_size); void (*get_user_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + void (*set_eswitch_mode)(void *priv, u16 switch_mode); + u16 (*get_eswitch_mode)(void *priv); + int (*alloc_rep_data)(void *priv, int num_vfs, u16 vf_base_vsi_id); + void (*free_rep_data)(void *priv); + void (*set_rep_netdev_info)(void *priv, void *rep_data); + void (*unset_rep_netdev_info)(void *priv); + struct net_device *(*get_rep_netdev_info)(void *priv, u16 rep_data_index); + int (*disable_phy_flow)(void *priv, u8 eth_id); + int (*enable_phy_flow)(void *priv, u8 eth_id); + void (*init_acl)(void *priv); + void (*uninit_acl)(void *priv); + int (*set_upcall_rule)(void *priv, u8 eth_id, u16 vsi_id); + int (*unset_upcall_rule)(void *priv, u8 eth_id); + void (*set_shaping_dport_vld)(void *priv, u8 eth_id, bool vld); + void (*set_dport_fc_th_vld)(void *priv, u8 eth_id, bool vld); + void (*get_rep_stats)(void *priv, u16 rep_vsi_id, + struct nbl_rep_stats *rep_stats, bool is_tx); + u16 (*get_rep_index)(void *priv, u16 vsi_id); void (*get_firmware_version)(void *priv, char *firmware_verion, u8 max_len); int (*get_driver_info)(void *priv, struct nbl_driver_info *driver_info); @@ -106,51 +133,94 @@ struct nbl_dispatch_ops { struct nbl_queue_stats *queue_stats, bool is_tx); int (*get_queue_err_stats)(void *priv, u8 queue_id, struct nbl_queue_err_stats *queue_err_stats, bool is_tx); + int (*get_eth_mac_stats)(void *priv, u32 eth_id, + struct nbl_eth_mac_stats *eth_mac_stats, u32 data_len); + int (*get_rmon_stats)(void *priv, u32 eth_id, + struct nbl_rmon_stats *rmon_stats, u32 data_len); void (*get_net_stats)(void *priv, struct nbl_stats *queue_stats); + int (*get_eth_ctrl_stats)(void *priv, u32 eth_id, + struct nbl_eth_ctrl_stats *eth_ctrl_stats, u32 data_len); + int (*get_pause_stats)(void *priv, u32 eth_id, + struct nbl_pause_stats *pause_stats, u32 data_len); void (*get_private_stat_len)(void *priv, u32 *len); void (*get_private_stat_data)(void *priv, u32 eth_id, u64 *data, u32 data_len); void (*fill_private_stat_strings)(void *priv, u8 *strings); + int (*get_eth_abnormal_stats)(void *priv, u8 eth_id, + struct nbl_eth_abnormal_stats *eth_abnormal_stats); u16 (*get_max_desc_num)(void *priv); u16 (*get_min_desc_num)(void *priv); u16 (*get_tx_desc_num)(void *priv, u32 ring_index); u16 (*get_rx_desc_num)(void *priv, u32 ring_index); void (*set_tx_desc_num)(void *priv, u32 ring_index, u16 desc_num); void (*set_rx_desc_num)(void *priv, u32 ring_index, u16 desc_num); - void (*get_coalesce)(void *priv, u16 vector_id, struct ethtool_coalesce *ec); + void (*get_coalesce)(void *priv, u16 vector_id, struct nbl_chan_param_get_coalesce *ec); void (*set_coalesce)(void *priv, u16 vector_id, u16 num_net_msix, u16 pnum, u16 rate); u16 (*get_intr_suppress_level)(void *priv, u64 rate, u16 last_level); void (*set_intr_suppress_level)(void *priv, u16 vector_id, u16 num_net_msix, u16 level); void (*get_rxfh_indir_size)(void *priv, u16 vsi_id, u32 *rxfh_indir_size); void (*get_rxfh_indir)(void *priv, u16 vsi_id, u32 *indir, u32 indir_size); + int (*set_rxfh_indir)(void *priv, u16 vsi_id, const u32 *indir, u32 indir_size); void (*get_rxfh_rss_key_size)(void *priv, u32 *rxfh_rss_key_size); void (*get_rxfh_rss_key)(void *priv, u8 *rss_key, u32 rss_key_size); - void (*get_rxfh_rss_alg_sel)(void *priv, u8 *alg_sel, u8 eth_id); + void (*get_rxfh_rss_alg_sel)(void *priv, u16 vsi_id, u8 *alg_sel); + int (*set_rxfh_rss_alg_sel)(void *priv, u16 vsi_id, u8 alg_sel); int (*get_port_attributes)(void *priv); int (*enable_port)(void *priv, bool enable); + void (*init_port)(void *priv); + int (*cfg_eth_bond_info)(void *priv, struct nbl_lag_member_list_param *param); + int (*get_eth_bond_info)(void *priv, struct nbl_bond_param *param); void (*recv_port_notify)(void *priv); int (*get_port_state)(void *priv, u8 eth_id, struct nbl_port_state *port_state); + int (*get_fec_stats)(void *priv, u8 eth_id, struct nbl_fec_stats *fec_stats); int (*set_port_advertising)(void *priv, struct nbl_port_advertising *port_advertising); int (*get_module_info)(void *priv, u8 eth_id, struct ethtool_modinfo *info); int (*get_module_eeprom)(void *priv, u8 eth_id, struct ethtool_eeprom *eeprom, u8 *data); int (*get_link_state)(void *priv, u8 eth_id, struct nbl_eth_link_info *eth_link_info); + int (*get_link_down_count)(void *priv, u8 eth_id, u64 *link_down_count); + int (*get_link_status_opcode)(void *priv, u8 eth_id, u32 *link_status_opcode); int (*set_eth_mac_addr)(void *priv, u8 *mac, u8 eth_id); int (*process_abnormal_event)(void *priv, struct nbl_abnormal_event_info *abnomal_info); int (*ctrl_port_led)(void *priv, u8 eth_id, enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg); int (*nway_reset)(void *priv, u8 eth_id); + int (*set_wol)(void *priv, u8 eth_id, bool enable); void (*adapt_desc_gother)(void *priv); + void (*set_desc_high_throughput)(void *priv); void (*flr_clear_net)(void *priv, u16 vfid); void (*flr_clear_queues)(void *priv, u16 vfid); + void (*flr_clear_accel_flow)(void *priv, u16 vfid); void (*flr_clear_flows)(void *priv, u16 vfid); void (*flr_clear_interrupt)(void *priv, u16 vfid); + void (*flr_clear_accel)(void *priv, u16 vfid); + void (*flr_clear_rdma)(void *priv, u16 vfid); + u16 (*covert_vfid_to_vsi_id)(void *priv, u16 vfid); void (*unmask_all_interrupts)(void *priv); void (*keep_alive)(void *priv); + void (*cfg_eth_bond_event)(void *priv, bool enable); int (*set_bridge_mode)(void *priv, u16 bmode); + void (*cfg_txrx_vlan)(void *priv, u16 vlan_tci, u16 vlan_proto, u8 vsi_index); + void (*setup_rdma_id)(void *priv); + void (*remove_rdma_id)(void *priv); + void (*register_rdma)(void *priv, u16 vsi_id, struct nbl_rdma_register_param *param); + void (*unregister_rdma)(void *priv, u16 vsi_id); + void (*register_rdma_bond)(void *priv, struct nbl_lag_member_list_param *list_param, + struct nbl_rdma_register_param *register_param); + void (*unregister_rdma_bond)(void *priv, u16 lag_id); u8 __iomem * (*get_hw_addr)(void *priv, size_t *size); u64 (*get_real_hw_addr)(void *priv, u16 vsi_id); u16 (*get_function_id)(void *priv, u16 vsi_id); void (*get_real_bdf)(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function); + int (*enable_lag_protocol)(void *priv, u16 eth_id, bool lag_en); + int (*cfg_lag_hash_algorithm)(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type); + int (*cfg_lag_member_fwd)(void *priv, u16 eth_id, u16 lag_id, u8 fwd); + int (*cfg_lag_member_list)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_lag_member_up_attr)(void *priv, u16 eth_id, u16 lag_id, bool enable); + int (*cfg_duppkt_info)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_duppkt_mcc)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_bond_shaping)(void *priv, u8 eth_id, bool enable); + void (*cfg_bgid_back_pressure)(void *priv, u8 main_eth_id, u8 other_eth_id, bool enable); bool (*check_fw_heartbeat)(void *priv); bool (*check_fw_reset)(void *priv); @@ -160,14 +230,16 @@ struct nbl_dispatch_ops { int (*flash_image)(void *priv, u32 module, const u8 *data, size_t len); int (*flash_activate)(void *priv); void (*get_phy_caps)(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps); - void (*get_phy_state)(void *priv, u8 eth_id, struct nbl_phy_state *phy_state); int (*set_sfp_state)(void *priv, u8 eth_id, u8 state); int (*set_eth_loopback)(void *priv, u8 enable); struct sk_buff *(*clean_rx_lb_test)(void *priv, u32 ring_index); int (*passthrough_fw_cmd)(void *priv, struct nbl_passthrough_fw_cmd_param *param, struct nbl_passthrough_fw_cmd_param *result); int (*update_ring_num)(void *priv); - int (*set_ring_num)(void *priv, struct nbl_fw_cmd_ring_num_param *param); + int (*update_rdma_cap)(void *priv); + int (*update_rdma_mem_type)(void *priv); + u16 (*get_rdma_cap_num)(void *priv); + int (*set_ring_num)(void *priv, struct nbl_fw_cmd_net_ring_num_param *param); u32 (*check_active_vf)(void *priv); int (*get_board_id)(void *priv); @@ -179,9 +251,58 @@ struct nbl_dispatch_ops { int (*emp_console_write)(void *priv, char *buf, size_t count); bool (*get_product_flex_cap)(void *priv, enum nbl_flex_cap_type cap_type); bool (*get_product_fix_cap)(void *priv, enum nbl_fix_cap_type cap_type); + int (*alloc_ktls_tx_index)(void *priv, u16 vsi); + void (*free_ktls_tx_index)(void *priv, u32 index); + void (*cfg_ktls_tx_keymat)(void *priv, u32 index, u8 mode, u8 *salt, u8 *key, u8 key_len); + int (*alloc_ktls_rx_index)(void *priv, u16 vsi); + void (*free_ktls_rx_index)(void *priv, u32 index); + void (*cfg_ktls_rx_keymat)(void *priv, u32 index, u8 mode, u8 *salt, u8 *key, u8 key_len); + void (*cfg_ktls_rx_record)(void *priv, u32 index, u32 tcp_sn, u64 rec_num, bool init); + int (*add_ktls_rx_flow)(void *priv, u32 index, u32 *data, u16 vsi); + void (*del_ktls_rx_flow)(void *priv, u32 index); + + int (*alloc_ipsec_tx_index)(void *priv, struct nbl_ipsec_cfg_info *cfg_info); + void (*free_ipsec_tx_index)(void *priv, u32 index); + int (*alloc_ipsec_rx_index)(void *priv, struct nbl_ipsec_cfg_info *cfg_info); + void (*free_ipsec_rx_index)(void *priv, u32 index); + void (*cfg_ipsec_tx_sad)(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry); + void (*cfg_ipsec_rx_sad)(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry); + int (*add_ipsec_tx_flow)(void *priv, u32 index, u32 *data, u16 vsi); + void (*del_ipsec_tx_flow)(void *priv, u32 index); + int (*add_ipsec_rx_flow)(void *priv, u32 index, u32 *data, u16 vsi); + void (*del_ipsec_rx_flow)(void *priv, u32 index); + bool (*check_ipsec_status)(void *priv); + u32 (*get_dipsec_lft_info)(void *priv); + void (*handle_dipsec_soft_expire)(void *priv, u32 index); + void (*handle_dipsec_hard_expire)(void *priv, u32 index); + u32 (*get_uipsec_lft_info)(void *priv); + void (*handle_uipsec_soft_expire)(void *priv, u32 index); + void (*handle_uipsec_hard_expire)(void *priv, u32 index); + void (*get_board_info)(void *priv, struct nbl_board_port_info *board_info); void (*dummy_func)(void *priv); + void (*configure_virtio_dev_msix)(void *priv, u16 vector); + void (*configure_rdma_msix_off)(void *priv, u16 vector); + void (*configure_virtio_dev_ready)(void *priv); + + int (*switchdev_init_cmdq)(void *priv); + int (*switchdev_deinit_cmdq)(void *priv); + int (*add_tc_flow)(void *priv, struct nbl_tc_flow_param *param); + int (*del_tc_flow)(void *priv, struct nbl_tc_flow_param *param); + int (*flow_index_lookup)(void *priv, struct nbl_flow_index_key key); + + bool (*tc_tun_encap_lookup)(void *priv, struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param); + int (*tc_tun_encap_del)(void *priv, struct nbl_encap_key *key); + int (*tc_tun_encap_add)(void *priv, struct nbl_rule_action *action); + + int (*set_tc_flow_info)(void *priv); + int (*unset_tc_flow_info)(void *priv); + int (*get_tc_flow_info)(void *priv); + int (*query_tc_stats)(void *priv, struct nbl_stats_param *param); + + u32 (*get_p4_version)(void *priv); int (*get_p4_info)(void *priv, char *verify_code); int (*load_p4)(void *priv, struct nbl_load_p4_param *param); int (*load_p4_default)(void *priv); @@ -189,11 +310,84 @@ struct nbl_dispatch_ops { int (*set_p4_used)(void *priv, int p4_type); u16 (*get_vf_base_vsi_id)(void *priv, u16 pf_id); + int (*add_nd_upcall_flow)(void *priv, u16 vsi_id, bool for_pmd); + void (*del_nd_upcall_flow)(void *priv); + dma_addr_t (*restore_abnormal_ring)(void *priv, int ring_index, int type); int (*restart_abnormal_ring)(void *priv, int ring_index, int type); int (*restore_hw_queue)(void *priv, u16 vsi_id, u16 local_queue_id, dma_addr_t dma, int type); + int (*stop_abnormal_sw_queue)(void *priv, u16 local_queue_id, int type); + int (*stop_abnormal_hw_queue)(void *priv, u16 vsi_id, u16 local_queue_id, int type); u16 (*get_vf_function_id)(void *priv, u16 vsi_id, int vf_id); + u16 (*get_vf_vsi_id)(void *priv, u16 vsi_id, int vf_id); + bool (*check_vf_is_active)(void *priv, u16 func_id); + int (*check_vf_is_vdpa)(void *priv, u16 func_id, u8 *is_vdpa); + int (*get_vdpa_vf_stats)(void *priv, u16 func_id, struct nbl_vf_stats *vf_stats); + int (*get_uvn_pkt_drop_stats)(void *priv, u16 vsi_id, + u16 num_queues, u32 *uvn_stat_pkt_drop); + int (*get_ustore_pkt_drop_stats)(void *priv); + int (*get_ustore_total_pkt_drop_stats)(void *priv, u8 eth_id, + struct nbl_ustore_stats *ustore_stats); + int (*set_pmd_debug)(void *priv, bool pmd_debug); + + void (*register_func_mac)(void *priv, u8 *mac, u16 func_id); + int (*register_func_trust)(void *priv, u16 func_id, bool trusted, + bool *should_notify); + int (*register_func_vlan)(void *priv, u16 func_id, u16 vlan_tci, + u16 vlan_proto, bool *should_notify); + int (*register_func_rate)(void *priv, u16 func_id, int rate); + int (*register_func_link_forced)(void *priv, u16 func_id, u8 link_forced, + bool *should_notify); + int (*get_link_forced)(void *priv, u16 vsi_id); + int (*set_tx_rate)(void *priv, u16 func_id, int tx_rate, int burst); + int (*set_rx_rate)(void *priv, u16 func_id, int rx_rate, int burst); + + void (*get_driver_version)(void *priv, char *ver, int len); + + void (*register_dev_name)(void *priv, u16 vsi_id, char *name); + void (*get_dev_name)(void *priv, u16 vsi_id, char *name); + + int (*get_mirror_table_id)(void *priv, u16 vsi_id, int dir, + bool mirror_en, u8 *mt_id); + int (*configure_mirror)(void *priv, u16 func_id, bool mirror_en, int dir, + u8 mt_id); + int (*configure_mirror_table)(void *priv, bool mirror_en, u16 func_id, u8 mt_id); + int (*clear_mirror_cfg)(void *priv, u16 func_id); + + int (*get_fd_flow)(void *priv, u16 vsi_id, u32 location, + enum nbl_chan_fdir_rule_type rule_type, + struct nbl_chan_param_fdir_replace *cmd); + int (*get_fd_flow_cnt)(void *priv, enum nbl_chan_fdir_rule_type rule_type, u16 vsi_id); + int (*config_fd_flow_state)(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u16 vsi_id, u16 state); + int (*get_fd_flow_all)(void *priv, struct nbl_chan_param_get_fd_flow_all *param, + u32 *rule_locs); + int (*get_fd_flow_max)(void *priv); + + int (*replace_fd_flow)(void *priv, struct nbl_chan_param_fdir_replace *info); + int (*remove_fd_flow)(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u32 loc, u16 vsi_id); + + void (*cfg_fd_update_event)(void *priv, bool enable); + void (*dump_fd_flow)(void *priv, struct seq_file *m); + + void (*get_xdp_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + void (*set_hw_status)(void *priv, enum nbl_hw_status hw_status); + void (*get_active_func_bitmaps)(void *priv, unsigned long *bitmap, int max_func); + int (*configure_qos)(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map); + int (*configure_rdma_bw)(void *priv, u8 eth_id, int rdma_bw); + int (*get_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon); + int (*set_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int xoff, int xon); + int (*set_rate_limit)(void *priv, enum nbl_traffic_type type, u32 rate); + int (*set_tc_wgt)(void *priv, u16 vsi_id, u8 *weight, u8 num_tc); + + u32 (*get_perf_dump_length)(void *priv); + u32 (*get_perf_dump_data)(void *priv, u8 *buffer, u32 size); + void (*cfg_mirror_outputport_event)(void *priv, bool enable); + int (*check_flow_table_spec)(void *priv, u16 vlan_cnt, u16 unicast_cnt, u16 multicast_cnt); + u32 (*get_dvn_desc_req)(void *priv); + void (*set_dvn_desc_req)(void *priv, u32 desc_req); }; struct nbl_dispatch_ops_tbl { diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_phy.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_phy.h index d61b17b01b07eddeb191e876851cf1580d2c59f4..2a20d0c972b6721a917a651c94714963ae050c9c 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_phy.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_phy.h @@ -38,19 +38,24 @@ struct nbl_phy_ops { int (*cfg_q2tc_tcid)(void *priv, u16 queue_id, u16 tcid); int (*set_tc_wgt)(void *priv, u16 func_id, u8 *weight, u16 num_tc); int (*set_tc_spwrr)(void *priv, u16 func_id, u8 spwrr); - int (*set_shaping)(void *priv, u16 func_id, u64 total_tx_rate, u8 vld, bool active); + int (*set_shaping)(void *priv, u16 func_id, u64 total_tx_rate, u64 burst, + u8 vld, bool active); void (*active_shaping)(void *priv, u16 func_id); void (*deactive_shaping)(void *priv, u16 func_id); + int (*set_ucar)(void *priv, u16 func_id, u64 total_tx_rate, u64 burst, + u8 vld); int (*cfg_dsch_net_to_group)(void *priv, u16 func_id, u16 group_id, u16 vld); int (*cfg_dsch_group_to_port)(void *priv, u16 group_id, u16 dport, u16 vld); int (*init_epro_rss_key)(void *priv); void (*read_rss_key)(void *priv, u8 *rss_key); void (*read_rss_indir)(void *priv, u16 vsi_id, u32 *rss_indir, u16 rss_ret_base, u16 rss_entry_size); - void (*get_rss_alg_sel)(void *priv, u8 eth_id, u8 *rss_alg_sel); + void (*get_rss_alg_sel)(void *priv, u16 vsi_id, u8 *alg_sel); + int (*set_rss_alg_sel)(void *priv, u16 vsi_id, u8 alg_sel); int (*init_epro_vpt_tbl)(void *priv, u16 vsi_id); int (*set_epro_rss_default)(void *priv, u16 vsi_id); - int (*cfg_epro_rss_ret)(void *priv, u32 index, u8 size_type, u32 q_num, u16 *queue_list); + int (*cfg_epro_rss_ret)(void *priv, u32 index, u8 size_type, u32 q_num, + u16 *queue_list, const u32 *indir); int (*set_epro_rss_pt)(void *priv, u16 vsi_id, u16 rss_ret_base, u16 rss_entry_size); int (*clear_epro_rss_pt)(void *priv, u16 vsi_id); int (*disable_dvn)(void *priv, u16 queue_id); @@ -65,7 +70,9 @@ struct nbl_phy_ops { struct nbl_queue_err_stats *queue_err_stats); void (*setup_queue_switch)(void *priv, u16 eth_id); void (*init_pfc)(void *priv, u8 ether_ports); - u32 (*get_chip_temperature)(void *priv); + int (*cfg_phy_flow)(void *priv, u16 vsi_id, u16 count, u8 eth_id, bool status); + u32 (*get_chip_temperature)(void *priv, enum nbl_hwmon_type type, u32 senser_id); + int (*cfg_eth_port_priority_replace)(void *priv, u8 eth_id, bool status); int (*cfg_epro_vpt_tbl)(void *priv, u16 vsi_id); void (*set_promisc_mode)(void *priv, u16 vsi_id, u16 eth_id, u16 mode); @@ -77,6 +84,8 @@ struct nbl_phy_ops { void (*get_coalesce)(void *priv, u16 interrupt_id, u16 *pnum, u16 *rate); void (*set_coalesce)(void *priv, u16 interrupt_id, u16 pnum, u16 rate); + void (*write_ped_tbl)(void *priv, u8 *data, u16 idx, enum nbl_flow_ped_type ped_type); + void (*update_mailbox_queue_tail_ptr)(void *priv, u16 tail_ptr, u8 txrx); void (*config_mailbox_rxq)(void *priv, dma_addr_t dma_addr, int size_bwid); void (*config_mailbox_txq)(void *priv, dma_addr_t dma_addr, int size_bwid); @@ -85,7 +94,10 @@ struct nbl_phy_ops { u16 (*get_mailbox_rx_tail_ptr)(void *priv); bool (*check_mailbox_dma_err)(void *priv, bool tx); u32 (*get_host_pf_mask)(void *priv); - u32 (*get_host_pf_fid)(void *priv, u8 func_id); + u32 (*get_host_pf_fid)(void *priv, u16 func_id); + u32 (*get_real_bus)(void *priv); + u64 (*get_pf_bar_addr)(void *priv, u16 func_id); + u64 (*get_vf_bar_addr)(void *priv, u16 func_id); void (*cfg_mailbox_qinfo)(void *priv, u16 func_id, u16 bus, u16 devid, u16 function); void (*enable_mailbox_irq)(void *priv, u16 func_id, bool enable_msix, u16 global_vector_id); void (*enable_abnormal_irq)(void *priv, bool enable_msix, u16 global_vector_id); @@ -106,8 +118,35 @@ struct nbl_phy_ops { int (*set_spoof_check_addr)(void *priv, u16 vsi_id, u8 *mac); int (*set_spoof_check_enable)(void *priv, u16 vsi_id, u8 enable); + int (*set_vsi_mtu)(void *priv, u16 vsi_id, u16 mtu_sel); u8 __iomem * (*get_hw_addr)(void *priv, size_t *size); + int (*enable_lag_protocol)(void *priv, u16 eth_id, void *data); + int (*cfg_lag_hash_algorithm)(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type); + int (*cfg_lag_member_fwd)(void *priv, u16 eth_id, u16 lag_id, u8 fwd); + int (*set_sfp_state)(void *priv, u8 eth_id, u8 state); + int (*cfg_lag_member_list)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_lag_member_up_attr)(void *priv, u16 eth_id, u16 lag_id, bool enable); + bool (*get_lag_fwd)(void *priv, u16 eth_id); + + int (*cfg_bond_shaping)(void *priv, u8 eth_id, u8 speed, bool enable); + void (*cfg_bgid_back_pressure)(void *priv, u8 main_eth_id, u8 other_eth_id, + bool enable, u8 speed); + + void (*clear_acl)(void *priv); + int (*set_fd_udf)(void *priv, u8 lxmode, u8 offset); + int (*clear_fd_udf)(void *priv); + int (*set_fd_tcam_cfg_default)(void *priv); + int (*set_fd_tcam_cfg_lite)(void *priv); + int (*set_fd_tcam_cfg_full)(void *priv); + int (*set_fd_tcam_ram)(void *priv, struct nbl_acl_tcam_param *data, + struct nbl_acl_tcam_param *mask, u16 ram_index, u32 depth_index); + int (*set_fd_action_ram)(void *priv, u32 action, u16 ram_index, u32 depth_index); + void (*set_hw_status)(void *priv, enum nbl_hw_status hw_status); + enum nbl_hw_status (*get_hw_status)(void *priv); + int (*set_mtu)(void *priv, u16 mtu_index, u16 mtu); + u16 (*get_mtu_index)(void *priv, u16 vsi_id); /* For leonis */ int (*set_ht)(void *priv, u16 hash, u16 hash_other, u8 ht_table, @@ -116,20 +155,88 @@ struct nbl_phy_ops { int (*search_key)(void *priv, u8 *key, u8 key_type); int (*add_tcam)(void *priv, u32 index, u8 *key, u32 *action, u8 key_type, u8 pp_type); void (*del_tcam)(void *priv, u32 index, u8 key_type, u8 pp_type); - int (*add_mcc)(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 action); + int (*add_mcc)(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 next_mcc_id, u16 action); void (*del_mcc)(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 next_mcc_id); + void (*update_mcc_next_node)(void *priv, u16 mcc_id, u16 next_mcc_id); + int (*add_tnl_encap)(void *priv, const u8 encap_buf[], u16 encap_idx, + union nbl_flow_encap_offset_tbl_u encap_idx_info); + void (*del_tnl_encap)(void *priv, u16 encap_idx); int (*init_fem)(void *priv); + void (*init_acl)(void *priv); + void (*uninit_acl)(void *priv); + int (*set_upcall_rule)(void *priv, u8 idx, u16 vsi_id); + int (*unset_upcall_rule)(void *priv, u8 idx); + void (*set_shaping_dport_vld)(void *priv, u8 eth_id, bool vld); + void (*set_dport_fc_th_vld)(void *priv, u8 eth_id, bool vld); + void (*cfg_ktls_tx_keymat)(void *priv, u32 index, u8 mode, u8 *salt, u8 *key, u8 key_len); + void (*cfg_ktls_rx_keymat)(void *priv, u32 index, u8 mode, u8 *salt, u8 *key, u8 key_len); + void (*cfg_ktls_rx_record)(void *priv, u32 index, u32 tcp_sn, u64 rec_num, bool init); + int (*init_acl_stats)(void *priv); + + void (*cfg_dipsec_nat)(void *priv, u16 sport); + void (*cfg_dipsec_sad_iv)(void *priv, u32 index, u64 iv); + void (*cfg_dipsec_sad_esn)(void *priv, u32 index, u32 sn, u32 esn, u8 wrap_en, u8 enable); + void (*cfg_dipsec_sad_lifetime)(void *priv, u32 index, u32 lft_cnt, + u32 lft_diff, u8 limit_enable, u8 limit_type); + void (*cfg_dipsec_sad_crypto)(void *priv, u32 index, u32 *key, u32 salt, + u32 crypto_type, u8 tunnel_mode, u8 icv_len); + void (*cfg_dipsec_sad_encap)(void *priv, u32 index, u8 nat_flag, + u16 dport, u32 spi, u32 *ip_data); + u32 (*read_dipsec_status)(void *priv); + u32 (*reset_dipsec_status)(void *priv); + u32 (*read_dipsec_lft_info)(void *priv); + void (*cfg_dipsec_lft_info)(void *priv, u32 index, u32 lifetime_diff, + u32 flag_wen, u32 msb_wen); + void (*init_dprbac)(void *priv); + void (*cfg_uipsec_nat)(void *priv, u8 nat_flag, u16 dport); + void (*cfg_uipsec_sad_esn)(void *priv, u32 index, u32 sn, u32 esn, u8 overlap, u8 enable); + void (*cfg_uipsec_sad_lifetime)(void *priv, u32 index, u32 lft_cnt, + u32 lft_diff, u8 limit_enable, u8 limit_type); + void (*cfg_uipsec_sad_crypto)(void *priv, u32 index, u32 *key, u32 salt, + u32 crypto_type, u8 tunnel_mode, u8 icv_len); + void (*cfg_uipsec_sad_window)(void *priv, u32 index, u8 window_en, u8 option); + void (*cfg_uipsec_em_tcam)(void *priv, u16 tcam_index, u32 *data); + void (*cfg_uipsec_em_ad)(void *priv, u16 tcam_index, u32 index); + void (*clear_uipsec_tcam_ad)(void *priv, u16 tcam_index); + void (*cfg_uipsec_em_ht)(void *priv, u32 index, u16 ht_table, u16 ht_index, + u16 ht_other_index, u16 ht_bucket); + void (*cfg_uipsec_em_kt)(void *priv, u32 index, u32 *data); + void (*clear_uipsec_ht_kt)(void *priv, u32 index, u16 ht_table, + u16 ht_index, u16 ht_bucket); + u32 (*read_uipsec_status)(void *priv); + u32 (*reset_uipsec_status)(void *priv); + u32 (*read_uipsec_lft_info)(void *priv); + void (*cfg_uipsec_lft_info)(void *priv, u32 index, u32 lifetime_diff, + u32 flag_wen, u32 msb_wen); + void (*init_uprbac)(void *priv); - unsigned long (*get_fw_ping)(void *priv); - void (*set_fw_ping)(void *priv, unsigned long ping); - unsigned long (*get_fw_pong)(void *priv); - void (*set_fw_pong)(void *priv, unsigned long pong); + u32 (*get_fw_ping)(void *priv); + void (*set_fw_ping)(void *priv, u32 ping); + u32 (*get_fw_pong)(void *priv); + void (*set_fw_pong)(void *priv, u32 pong); + + int (*init_vdpaq)(void *priv, u16 func_id, u16 bdf, u64 pa, u32 size); + void (*destroy_vdpaq)(void *priv); void (*get_reg_dump)(void *priv, u32 *data, u32 len); int (*get_reg_dump_len)(void *priv); int (*process_abnormal_event)(void *priv, struct nbl_abnormal_event_info *abnomal_info); u32 (*get_uvn_desc_entry_stats)(void *priv); void (*set_uvn_desc_wr_timeout)(void *priv, u16 timeout); + void (*set_tc_kgen_cvlan_zero)(void *priv); + void (*unset_tc_kgen_cvlan)(void *priv); + void (*set_ped_tab_vsi_type)(void *priv, u32 port_id, u16 eth_proto); + void (*load_p4)(void *priv, u32 addr, u32 size, u8 *data); + void (*configure_qos)(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map); + void (*configure_rdma_bw)(void *priv, u8 eth_id, int rdma_bw); + void (*get_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon); + int (*set_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int xoff, int xon); + void (*set_rate_limit)(void *priv, u16 func_id, enum nbl_traffic_type type, u32 rate); + int (*get_dstat_vsi_stat)(void *priv, u16 vsi_id, u64 *fwd_pkt, u64 *fwd_byte); + int (*get_ustat_vsi_stat)(void *priv, u16 vsi_id, u64 *fwd_pkt, u64 *fwd_byte); + int (*get_uvn_pkt_drop_stats)(void *priv, u16 global_queue_id, u32 *uvn_stat_pkt_drop); + int (*get_ustore_pkt_drop_stats)(void *priv, u8 eth_id, + struct nbl_ustore_stats *ustore_stats); int (*setup_loopback)(void *priv, u32 eth_id, u32 enable); int (*ctrl_port_led)(void *priv, u8 eth_id, enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg); @@ -138,6 +245,44 @@ struct nbl_phy_ops { u32 (*get_fw_eth_num)(void *priv); u32 (*get_fw_eth_map)(void *priv); void (*get_board_info)(void *priv, struct nbl_board_port_info *board); + u32 (*get_quirks)(void *priv); + + /* for userspace */ + int (*init_offload_fwd)(void *priv, u16 vsi_id); + int (*init_cmdq)(void *priv, void *data, u16 func_id); + int (*reset_cmdq)(void *priv); + int (*destroy_cmdq)(void *priv); + void (*update_cmdq_tail)(void *priv, u32 doorbell); + int (*init_rep)(void *priv, u16 vsi_id, u8 inner_type, + u8 outer_type, u8 rep_type); + int (*init_flow)(void *priv, void *data); + int (*deinit_flow)(void *priv); + int (*offload_flow_rule)(void *priv, void *data); + int (*get_flow_acl_switch)(void *priv, u8 *acl_enable); + void (*get_line_rate_info)(void *priv, void *data, void *result); + void (*set_eth_stats_snapshot)(void *priv, u32 eth_id, u8 snapshot); + void (*get_eth_ip_reg)(void *priv, u32 eth_id, u64 addr_off, u32 *data); + int (*set_eth_fec_mode)(void *priv, u32 eth_id, enum nbl_port_mode mode); + void (*clear_profile_table_action)(void *priv); + void (*ipro_chksum_err_ctrl)(void *priv, u8 status); + void (*get_common_cfg)(void *priv, u32 offset, void *buf, u32 len); + void (*set_common_cfg)(void *priv, u32 offset, void *buf, u32 len); + void (*get_device_cfg)(void *priv, u32 offset, void *buf, u32 len); + void (*set_device_cfg)(void *priv, u32 offset, void *buf, u32 len); + bool (*get_rdma_capability)(void *priv); + + u32 (*get_perf_dump_length)(void *priv); + u32 (*get_perf_dump_data)(void *priv, u8 *buffer, u32 size); + + int (*get_mirror_table_id)(void *priv, u16 vsi_id, int dir, + bool mirror_en, u8 *mt_id); + int (*configure_mirror)(void *priv, u16 vsi_id, bool mirror_en, int dir, + u8 mt_id); + int (*configure_mirror_table)(void *priv, bool mirror_en, + u16 mirror_vsi_id, u16 mirror_queue_id, u8 mt_id); + int (*clear_mirror_cfg)(void *priv, u16 vsi_id); + u32 (*get_dvn_desc_req)(void *priv); + void (*set_dvn_desc_req)(void *priv, u32 desc_req); }; struct nbl_phy_ops_tbl { diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_resource.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_resource.h index 6a0cc88776c5acaf4840c73568ac360fefd23fc5..34c5831656b85598e343ab65b0601d65e960bf8b 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_resource.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_resource.h @@ -17,6 +17,7 @@ struct nbl_resource_pt_ops { netdev_tx_t (*rep_xmit)(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t (*self_test_xmit)(struct sk_buff *skb, struct net_device *netdev); int (*napi_poll)(struct napi_struct *napi, int budget); + int (*xdp_xmit)(struct net_device *netdev, int n, struct xdp_frame **frame, u32 flags); }; struct nbl_resource_ops { @@ -32,16 +33,13 @@ struct nbl_resource_ops { int (*enable_adminq_irq)(void *p, u16 vector_id, bool enable_msix); u16 (*get_global_vector)(void *priv, u16 vsi_id, u16 local_vector_id); u16 (*get_msix_entry_id)(void *priv, u16 vsi_id, u16 local_vector_id); - u32 (*get_chip_temperature)(void *priv); - u32 (*get_chip_temperature_max)(void *priv); - u32 (*get_chip_temperature_crit)(void *priv); - int (*get_module_temperature)(void *priv, u8 eth_id, enum nbl_module_temp_type type); + u32 (*get_chip_temperature)(void *priv, enum nbl_hwmon_type type, u32 senser_id); + int (*get_module_temperature)(void *priv, u8 eth_id, enum nbl_hwmon_type type); int (*get_mbx_irq_num)(void *priv); int (*get_adminq_irq_num)(void *priv); int (*get_abnormal_irq_num)(void *priv); - int (*alloc_rings)(void *priv, struct net_device *netdev, u16 tx_num, - u16 rx_num, u16 tx_desc_num, u16 rx_desc_num); + int (*alloc_rings)(void *priv, struct net_device *netdev, struct nbl_ring_param *param); void (*remove_rings)(void *priv); dma_addr_t (*start_tx_ring)(void *priv, u8 ring_index); void (*stop_tx_ring)(void *priv, u8 ring_index); @@ -51,7 +49,10 @@ struct nbl_resource_ops { void (*kick_rx_ring)(void *priv, u16 index); int (*dump_ring)(void *priv, struct seq_file *m, bool is_tx, int index); int (*dump_ring_stats)(void *priv, struct seq_file *m, bool is_tx, int index); - struct napi_struct *(*get_vector_napi)(void *priv, u16 index); + void (*set_rings_xdp_prog)(void *priv, void *prog); + int (*register_xdp_rxq)(void *priv, u8 ring_index); + void (*unregister_xdp_rxq)(void *priv, u8 ring_index); + struct nbl_napi_struct *(*get_vector_napi)(void *priv, u16 index); void (*set_vector_info)(void *priv, u8 *irq_enable_base, u32 irq_data, u16 index, bool mask_en); void (*register_vsi_ring)(void *priv, u16 vsi_index, u16 ring_offset, u16 ring_num); @@ -68,14 +69,17 @@ struct nbl_resource_ops { int (*setup_rss)(void *priv, u16 vsi_id); void (*remove_rss)(void *priv, u16 vsi_id); int (*setup_queue)(void *priv, struct nbl_txrx_queue_param *param, bool is_tx); + int (*remove_queue)(void *priv, struct nbl_txrx_queue_param *param, bool is_tx); void (*remove_all_queues)(void *priv, u16 vsi_id); int (*cfg_dsch)(void *priv, u16 vsi_id, bool vld); - int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps); + int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps, bool rss_indir_set); void (*remove_cqs)(void *priv, u16 vsi_id); + int (*cfg_qdisc_mqprio)(void *priv, struct nbl_tc_qidsc_param *param); void (*clear_queues)(void *priv, u16 vsi_id); + int (*check_offload_status)(void *priv, bool *is_down); u16 (*get_local_queue_id)(void *priv, u16 vsi_id, u16 global_queue_id); + u16 (*get_global_queue_id)(void *priv, u16 vsi_id, u16 local_queue_id); - int (*enable_msix_irq)(void *priv, u16 global_vector_id); u8* (*get_msix_irq_enable_info)(void *priv, u16 global_vector_id, u32 *irq_data); int (*set_spoof_check_addr)(void *priv, u16 vsi_id, u8 *mac); @@ -90,25 +94,57 @@ struct nbl_resource_ops { void (*del_lldp_flow)(void *priv, u16 vsi); int (*add_multi_rule)(void *priv, u16 vsi); void (*del_multi_rule)(void *priv, u16 vsi); + int (*add_multi_mcast)(void *priv, u16 vsi); + void (*del_multi_mcast)(void *priv, u16 vsi); int (*setup_multi_group)(void *priv); void (*remove_multi_group)(void *priv); + void (*clear_accel_flow)(void *priv, u16 vsi_id); void (*clear_flow)(void *priv, u16 vsi_id); void (*dump_flow)(void *priv, struct seq_file *m); u16 (*get_vsi_id)(void *priv, u16 func_id, u16 type); - void (*get_eth_id)(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id); + void (*get_eth_id)(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id, u8 *logic_eth_id); int (*set_promisc_mode)(void *priv, u16 vsi_id, u16 mode); u32 (*get_tx_headroom)(void *priv); + void (*get_rep_feature)(void *priv, struct nbl_register_net_result *register_result); + void (*get_rep_queue_info)(void *priv, u16 *queue_num, u16 *queue_size); void (*get_user_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + int (*set_mtu)(void *priv, u16 vsi_id, u16 mtu); + int (*get_max_mtu)(void *priv); + void (*set_eswitch_mode)(void *priv, u16 switch_mode); + u16 (*get_eswitch_mode)(void *priv); + int (*alloc_rep_data)(void *priv, int num_vfs, u16 vf_base_vsi_id); + void (*free_rep_data)(void *priv); + void (*set_rep_netdev_info)(void *priv, void *rep_data); + void (*unset_rep_netdev_info)(void *priv); + struct net_device *(*get_rep_netdev_info)(void *priv, u16 rep_data_index); + int (*disable_phy_flow)(void *priv, u8 eth_id); + int (*enable_phy_flow)(void *priv, u8 eth_id); + void (*init_acl)(void *priv); + void (*uninit_acl)(void *priv); + int (*set_upcall_rule)(void *priv, u8 eth_id, u16 vsi_id); + int (*unset_upcall_rule)(void *priv, u8 eth_id); + void (*set_shaping_dport_vld)(void *priv, u8 eth_id, bool vld); + void (*set_dport_fc_th_vld)(void *priv, u8 eth_id, bool vld); + void (*get_rep_stats)(void *priv, u16 rep_vsi_id, + struct nbl_rep_stats *rep_stats, bool is_tx); + u16 (*get_rep_index)(void *priv, u16 vsi_id); void (*get_queue_stats)(void *priv, u8 queue_id, struct nbl_queue_stats *queue_stats, bool is_tx); int (*get_queue_err_stats)(void *priv, u16 func_id, u8 queue_id, struct nbl_queue_err_stats *queue_err_stats, bool is_tx); void (*get_net_stats)(void *priv, struct nbl_stats *queue_stats); + int (*get_eth_ctrl_stats)(void *priv, u32 eth_id, + struct nbl_eth_ctrl_stats *eth_ctrl_stats); void (*get_private_stat_len)(void *priv, u32 *len); - void (*get_private_stat_data)(void *priv, u32 eth_id, u64 *data); + void (*get_private_stat_data)(void *priv, u32 eth_id, u64 *data, u32 data_len); + int (*get_pause_stats)(void *priv, u32 eth_id, struct nbl_pause_stats *pause_stats); + int (*get_eth_mac_stats)(void *priv, u32 eth_id, struct nbl_eth_mac_stats *eth_mac_stats); + int (*get_rmon_stats)(void *priv, u32 eth_id, struct nbl_rmon_stats *rmon_stats); void (*fill_private_stat_strings)(void *priv, u8 *strings); + int (*get_eth_abnormal_stats)(void *priv, u32 eth_id, + struct nbl_eth_abnormal_stats *eth_abnormal_stats); u16 (*get_max_desc_num)(void); u16 (*get_min_desc_num)(void); u16 (*get_tx_desc_num)(void *priv, u32 ring_index); @@ -116,7 +152,7 @@ struct nbl_resource_ops { void (*set_tx_desc_num)(void *priv, u32 ring_index, u16 desc_num); void (*set_rx_desc_num)(void *priv, u32 ring_index, u16 desc_num); void (*get_coalesce)(void *priv, u16 func_id, u16 vector_id, - struct ethtool_coalesce *ec); + struct nbl_chan_param_get_coalesce *ec); void (*set_coalesce)(void *priv, u16 func_id, u16 vector_id, u16 num_net_msix, u16 pnum, u16 rate); u16 (*get_intr_suppress_level)(void *priv, u64 rate, u16 last_level); @@ -124,39 +160,85 @@ struct nbl_resource_ops { u16 num_net_msix, u16 level); void (*get_rxfh_indir_size)(void *priv, u16 vsi_id, u32 *rxfh_indir_size); void (*get_rxfh_indir)(void *priv, u16 vsi_id, u32 *indir); + int (*set_rxfh_indir)(void *priv, u16 vsi_id, const u32 *indir, u32 indir_size); void (*get_rxfh_rss_key_size)(void *priv, u32 *rxfh_rss_key_size); void (*get_rxfh_rss_key)(void *priv, u8 *rss_key); - void (*get_rss_alg_sel)(void *priv, u8 *alg_sel, u8 eth_id); + void (*get_rss_alg_sel)(void *priv, u16 vsi_id, u8 *alg_sel); + int (*set_rss_alg_sel)(void *priv, u16 vsi_id, u8 alg_sel); int (*get_firmware_version)(void *priv, char *firmware_verion); int (*get_driver_info)(void *priv, struct nbl_driver_info *driver_info); int (*nway_reset)(void *priv, u8 eth_id); + void (*cfg_txrx_vlan)(void *priv, u16 vlan_tci, u16 vlan_proto, u8 vsi_index); + void (*setup_rdma_id)(void *priv); + void (*remove_rdma_id)(void *priv); + void (*register_rdma)(void *priv, u16 vsi_id, struct nbl_rdma_register_param *param); + void (*unregister_rdma)(void *priv, u16 vsi_id); + void (*register_rdma_bond)(void *priv, struct nbl_lag_member_list_param *list_param, + struct nbl_rdma_register_param *register_param); + void (*unregister_rdma_bond)(void *priv, u16 lag_id); u8 __iomem * (*get_hw_addr)(void *priv, size_t *size); u64 (*get_real_hw_addr)(void *priv, u16 vsi_id); u16 (*get_function_id)(void *priv, u16 vsi_id); void (*get_real_bdf)(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function); + int (*enable_lag_protocol)(void *priv, u16 eth_id, bool lag_en); + int (*cfg_lag_hash_algorithm)(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type); + int (*cfg_lag_member_fwd)(void *priv, u16 eth_id, u16 lag_id, u8 fwd); + int (*cfg_lag_member_list)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_lag_member_up_attr)(void *priv, u16 eth_id, u16 lag_id, bool enable); + int (*cfg_duppkt_info)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_duppkt_mcc)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_bond_shaping)(void *priv, u8 eth_id, bool enable); + void (*cfg_bgid_back_pressure)(void *priv, u8 main_eth_id, u8 other_eth_id, bool enable); + + int (*init_port)(void *priv); int (*get_port_attributes)(void *priv); int (*update_ring_num)(void *priv); - int (*set_ring_num)(void *priv, struct nbl_fw_cmd_ring_num_param *param); + int (*update_rdma_cap)(void *priv); + int (*update_rdma_mem_type)(void *priv); + u16 (*get_rdma_cap_num)(void *priv); + int (*set_ring_num)(void *priv, struct nbl_fw_cmd_net_ring_num_param *param); int (*enable_port)(void *priv, bool enable); + int (*cfg_eth_bond_info)(void *priv, struct nbl_lag_member_list_param *param); + int (*get_eth_bond_info)(void *priv, struct nbl_bond_param *param); + void (*cfg_eth_bond_event)(void *priv, bool enable); void (*recv_port_notify)(void *priv, void *data); int (*get_port_state)(void *priv, u8 eth_id, struct nbl_port_state *port_state); + int (*get_fec_stats)(void *priv, u32 eth_id, struct nbl_fec_stats *fec_stats); int (*set_port_advertising)(void *priv, struct nbl_port_advertising *port_advertising); int (*get_module_info)(void *priv, u8 eth_id, struct ethtool_modinfo *info); int (*get_module_eeprom)(void *priv, u8 eth_id, struct ethtool_eeprom *eeprom, u8 *data); int (*get_link_state)(void *priv, u8 eth_id, struct nbl_eth_link_info *eth_link_info); + int (*get_link_down_count)(void *priv, u8 eth_id, u64 *link_down_count); + int (*get_link_status_opcode)(void *priv, u8 eth_id, u32 *link_status_opcode); int (*set_eth_mac_addr)(void *priv, u8 *mac, u8 eth_id); int (*process_abnormal_event)(void *priv, struct nbl_abnormal_event_info *abnomal_info); int (*ctrl_port_led)(void *priv, u8 eth_id, enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg); + int (*set_wol)(void *priv, u8 eth_id, bool enable); void (*adapt_desc_gother)(void *priv); + void (*set_desc_high_throughput)(void *priv); void (*flr_clear_net)(void *priv, u16 vfid); void (*flr_clear_queues)(void *priv, u16 vfid); + void (*flr_clear_accel_flow)(void *priv, u16 vfid); void (*flr_clear_flows)(void *priv, u16 vfid); void (*flr_clear_interrupt)(void *priv, u16 vfid); + void (*flr_clear_accel)(void *priv, u16 vfid); + void (*flr_clear_rdma)(void *priv, u16 vfid); + u16 (*covert_vfid_to_vsi_id)(void *priv, u16 vfid); void (*unmask_all_interrupts)(void *priv); int (*set_bridge_mode)(void *priv, u16 func_id, u16 bmode); u16 (*get_vf_function_id)(void *priv, u16 vsi_id, int vf_id); + u16 (*get_vf_vsi_id)(void *priv, u16 vsi_id, int vf_id); + bool (*check_vf_is_active)(void *priv, u16 func_id); + int (*check_vf_is_vdpa)(void *priv, u16 func_id, u8 *is_vdpa); + int (*get_vdpa_vf_stats)(void *priv, u16 func_id, struct nbl_vf_stats *vf_stats); + int (*get_uvn_pkt_drop_stats)(void *priv, u16 vsi_id, + u16 num_queues, u32 *uvn_stat_pkt_drop); + int (*get_ustore_pkt_drop_stats)(void *priv); + int (*get_ustore_total_pkt_drop_stats)(void *priv, u8 eth_id, + struct nbl_ustore_stats *ustore_stats); bool (*check_fw_heartbeat)(void *priv); bool (*check_fw_reset)(void *priv); @@ -166,7 +248,6 @@ struct nbl_resource_ops { int (*flash_image)(void *priv, u32 module, const u8 *data, size_t len); int (*flash_activate)(void *priv); void (*get_phy_caps)(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps); - void (*get_phy_state)(void *priv, u8 eth_id, struct nbl_phy_state *phy_state); int (*set_sfp_state)(void *priv, u8 eth_id, u8 state); int (*setup_loopback)(void *priv, u32 eth_id, u32 enable); struct sk_buff *(*clean_rx_lb_test)(void *priv, u32 ring_index); @@ -181,14 +262,122 @@ struct nbl_resource_ops { bool (*get_product_flex_cap)(void *priv, enum nbl_flex_cap_type cap_type); bool (*get_product_fix_cap)(void *priv, enum nbl_fix_cap_type cap_type); + int (*alloc_ktls_tx_index)(void *priv, u16 vsi); + void (*free_ktls_tx_index)(void *priv, u32 index); + void (*cfg_ktls_tx_keymat)(void *priv, u32 index, u8 mode, u8 *salt, u8 *key, u8 key_len); + int (*alloc_ktls_rx_index)(void *priv, u16 vsi); + void (*free_ktls_rx_index)(void *priv, u32 index); + void (*cfg_ktls_rx_keymat)(void *priv, u32 index, u8 mode, u8 *salt, u8 *key, u8 key_len); + void (*cfg_ktls_rx_record)(void *priv, u32 index, u32 tcp_sn, u64 rec_num, bool init); + int (*add_ktls_rx_flow)(void *priv, u32 index, u32 *data, u16 vsi); + void (*del_ktls_rx_flow)(void *priv, u32 index); + + int (*alloc_ipsec_tx_index)(void *priv, struct nbl_ipsec_cfg_info *cfg_info); + void (*free_ipsec_tx_index)(void *priv, u32 index); + int (*alloc_ipsec_rx_index)(void *priv, struct nbl_ipsec_cfg_info *cfg_info); + void (*free_ipsec_rx_index)(void *priv, u32 index); + void (*cfg_ipsec_tx_sad)(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry); + void (*cfg_ipsec_rx_sad)(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry); + int (*add_ipsec_tx_flow)(void *priv, u32 index, u32 *data, u16 vsi); + void (*del_ipsec_tx_flow)(void *priv, u32 index); + int (*add_ipsec_rx_flow)(void *priv, u32 index, u32 *data, u16 vsi); + void (*del_ipsec_rx_flow)(void *priv, u32 index); + bool (*check_ipsec_status)(void *priv); + u32 (*get_dipsec_lft_info)(void *priv); + void (*handle_dipsec_soft_expire)(void *priv, u32 index); + void (*handle_dipsec_hard_expire)(void *priv, u32 index); + u32 (*get_uipsec_lft_info)(void *priv); + void (*handle_uipsec_soft_expire)(void *priv, u32 index); + void (*handle_uipsec_hard_expire)(void *priv, u32 index); dma_addr_t (*restore_abnormal_ring)(void *priv, int ring_index, int type); int (*restart_abnormal_ring)(void *priv, int ring_index, int type); int (*restore_hw_queue)(void *priv, u16 vsi_id, u16 local_queue_id, dma_addr_t dma, int type); + int (*stop_abnormal_sw_queue)(void *priv, u16 local_queue_id, int type); + int (*stop_abnormal_hw_queue)(void *priv, u16 vsi_id, u16 local_queue_id, int type); + + void (*register_func_mac)(void *priv, u8 *mac, u16 func_id); + int (*register_func_link_forced)(void *priv, u16 func_id, u8 link_forced, + bool *should_notify); + int (*register_func_trust)(void *priv, u16 func_id, + bool trust, bool *should_notify); + int (*register_func_vlan)(void *priv, u16 func_id, + u16 vlan_tci, u16 vlan_proto, bool *should_notify); + int (*register_func_rate)(void *priv, u16 func_id, int rate); + int (*get_link_forced)(void *priv, u16 vsi_id); + int (*set_tx_rate)(void *priv, u16 func_id, int tx_rate, int burst); + int (*set_rx_rate)(void *priv, u16 func_id, int rx_rate, int burst); + + void (*get_driver_version)(void *priv, char *ver, int len); + + int (*get_fd_flow)(void *priv, u16 vsi_id, u32 location, + enum nbl_chan_fdir_rule_type rule_type, + struct nbl_chan_param_fdir_replace *cmd); + int (*get_fd_flow_cnt)(void *priv, enum nbl_chan_fdir_rule_type rule_type, u16 vsi_id); + int (*config_fd_flow_state)(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u16 vsi_id, u16 state); + int (*get_fd_flow_all)(void *priv, struct nbl_chan_param_get_fd_flow_all *param, + u32 *rule_locs); + int (*get_fd_flow_max)(void *priv); + int (*replace_fd_flow)(void *priv, struct nbl_chan_param_fdir_replace *info); + int (*remove_fd_flow)(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u32 loc, u16 vsi_id); + void (*dump_fd_flow)(void *priv, struct seq_file *m); + void (*cfg_fd_update_event)(void *priv, bool enable); + + /* for vdpa driver */ + int (*cfg_queue_log)(void *priv, u16 vsi_id, u16 qps, bool vld); + u16 (*get_queue_ctx)(void *priv, u16 vsi_id, u16 qid); + int (*init_vdpaq)(void *priv, u16 func_id, u64 pa, u32 size); + void (*destroy_vdpaq)(void *priv); + int (*get_upcall_port)(void *priv, u16 *bdf); + + /* for pmd driver */ + void (*register_net_rep)(void *priv, u16 pf_id, u16 vf_id, + struct nbl_register_net_rep_result *result); + void (*unregister_net_rep)(void *priv, u16 vsi_id); + void (*register_eth_rep)(void *priv, u8 eth_id); + void (*unregister_eth_rep)(void *priv, u8 eth_id); + u16 (*get_vsi_global_queue_id)(void *priv, u16 vsi_id, u16 local_qid); + void (*get_line_rate_info)(void *priv, void *data, void *result); + int (*register_upcall_port)(void *priv, u16 func_id); + void (*unregister_upcall_port)(void *priv, u16 func_id); + void (*set_offload_status)(void *priv, u16 func_id); + void (*init_offload_fwd)(void *priv, u16 vsi_id); + int (*add_nd_upcall_flow)(void *priv, u16 vsi_id, bool mode); + void (*del_nd_upcall_flow)(void *priv); + void (*init_cmdq)(void *priv, void *data, u16 func_id); + void (*reset_cmdq)(void *priv); + void (*destroy_cmdq)(void *priv); + void (*init_rep)(void *priv, u16 vsi_id, u8 inner_type, + u8 outer_type, u8 rep_type); + void (*init_flow)(void *priv, void *param); + void (*deinit_flow)(void *priv); + void (*offload_flow_rule)(void *priv, void *data); + void (*get_flow_acl_switch)(void *priv, u8 *acl_enable); void (*get_board_info)(void *priv, struct nbl_board_port_info *board_info); + void (*configure_rdma_msix_off)(void *priv, u16 vector); + + int (*switchdev_init_cmdq)(void *priv); + int (*switchdev_deinit_cmdq)(void *priv, u8 index); + int (*add_tc_flow)(void *priv, struct nbl_tc_flow_param *param); + int (*del_tc_flow)(void *priv, struct nbl_tc_flow_param *param); + int (*flow_index_lookup)(void *priv, struct nbl_flow_index_key key); + + bool (*tc_tun_encap_lookup)(void *priv, struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param); + int (*tc_tun_encap_del)(void *priv, struct nbl_encap_key *key); + int (*tc_tun_encap_add)(void *priv, struct nbl_rule_action *action); + + int (*set_tc_flow_info)(void *priv); + int (*unset_tc_flow_info)(void *priv); + int (*get_tc_flow_info)(void *priv); + int (*query_tc_stats)(void *priv, struct nbl_stats_param *param); + + u32 (*get_p4_version)(void *priv); int (*get_p4_info)(void *priv, char *verify_code); int (*load_p4)(void *priv, struct nbl_load_p4_param *param); int (*load_p4_default)(void *priv); @@ -196,7 +385,36 @@ struct nbl_resource_ops { int (*set_p4_used)(void *priv, int p4_type); u16 (*get_vf_base_vsi_id)(void *priv, u16 pf_id); - u16 (*get_vsi_global_queue_id)(void *priv, u16 vsi_id, u16 local_qid); + + int (*set_pmd_debug)(void *priv, bool pmd_debug); + + void (*get_xdp_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + void (*set_hw_status)(void *priv, enum nbl_hw_status hw_status); + void (*get_active_func_bitmaps)(void *priv, unsigned long *bitmap, int max_func); + int (*configure_qos)(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map); + int (*configure_rdma_bw)(void *priv, u8 eth_id, int rdma_bw); + int (*set_eth_pfc)(void *priv, u8 eth_id, u8 *pfc); + int (*get_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon); + int (*set_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int xoff, int xon); + int (*set_rate_limit)(void *priv, u16 func_id, enum nbl_traffic_type type, u32 rate); + int (*set_tc_wgt)(void *priv, u16 vsi_id, u8 *weight, u8 num_tc); + + u32 (*get_perf_dump_length)(void *priv); + u32 (*get_perf_dump_data)(void *priv, u8 *buffer, u32 size); + + void (*register_dev_name)(void *priv, u16 vsi_id, char *name); + void (*get_dev_name)(void *priv, u16 vsi_id, char *name); + + int (*get_mirror_table_id)(void *priv, u16 vsi_id, int dir, bool mirror_en, + u8 *mt_id); + int (*configure_mirror)(void *priv, u16 func_id, bool mirror_en, int dir, + u8 mt_id); + int (*configure_mirror_table)(void *priv, bool mirror_en, u16 func_id, u8 mt_id); + int (*clear_mirror_cfg)(void *priv, u16 func_id); + void (*cfg_mirror_outputport_event)(void *priv, bool enable); + int (*check_flow_table_spec)(void *priv, u16 vlan_cnt, u16 unicast_cnt, u16 multicast_cnt); + u32 (*get_dvn_desc_req)(void *priv); + void (*set_dvn_desc_req)(void *priv, u32 desc_req); }; struct nbl_resource_ops_tbl { diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_service.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_service.h index 76a1887098af713836e114684a7c7b37cee666a9..37d706154e58c9c8f874af0cb0076e00e48db284 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_service.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_service.h @@ -13,6 +13,7 @@ #define NBL_SERV_OPS_TBL_TO_PRIV(serv_ops_tbl) ((serv_ops_tbl)->priv) struct nbl_service_ops { + int (*clear_mirrior_table)(void *p); int (*init_chip)(void *p); int (*destroy_chip)(void *p); int (*init_p4)(void *priv); @@ -29,56 +30,82 @@ struct nbl_service_ops { void (*get_common_irq_num)(void *priv, struct nbl_common_irq_num *irq_num); void (*get_ctrl_irq_num)(void *priv, struct nbl_ctrl_irq_num *irq_num); int (*get_port_attributes)(void *p); - int (*update_ring_num)(void *priv); + int (*update_template_config)(void *priv); int (*enable_port)(void *p, bool enable); + void (*init_port)(void *priv); void (*set_netdev_carrier_state)(void *p, struct net_device *netdev, u8 link_state); int (*vsi_open)(void *priv, struct net_device *netdev, u16 vsi_index, u16 real_qps, bool use_napi); int (*vsi_stop)(void *priv, u16 vsi_index); - int (*switch_traffic_default_dest)(void *priv, u16 from_vsi, u16 to_vsi); + int (*switch_traffic_default_dest)(void *priv, int op); + int (*config_fd_flow_state)(void *priv, enum nbl_chan_fdir_rule_type type, u32 state); int (*netdev_open)(struct net_device *netdev); int (*netdev_stop)(struct net_device *netdev); - netdev_tx_t (*start_xmit)(struct sk_buff *skb, struct net_device *netdev); int (*change_mtu)(struct net_device *netdev, int new_mtu); + int (*change_rep_mtu)(struct net_device *netdev, int new_mtu); void (*get_stats64)(struct net_device *netdev, struct rtnl_link_stats64 *stats); void (*set_rx_mode)(struct net_device *dev); void (*change_rx_flags)(struct net_device *dev, int flag); int (*set_mac)(struct net_device *dev, void *p); int (*rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); int (*rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); + int (*set_features)(struct net_device *dev, netdev_features_t features); netdev_features_t (*features_check)(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); - void (*tx_timeout)(struct net_device *netdev, u32 txqueue); - + int (*setup_tc)(struct net_device *dev, enum tc_setup_type type, void *type_data); int (*get_phys_port_name)(struct net_device *dev, char *name, size_t len); int (*get_port_parent_id)(struct net_device *dev, struct netdev_phys_item_id *ppid); + int (*set_vf_spoofchk)(struct net_device *netdev, int vf_id, bool ena); + int (*set_vf_link_state)(struct net_device *dev, int vf_id, int link_state); + int (*set_vf_mac)(struct net_device *netdev, int vf_id, u8 *mac); + int (*set_vf_rate)(struct net_device *netdev, int vf_id, int min_rate, int max_rate); + int (*set_vf_vlan)(struct net_device *dev, int vf_id, u16 vlan, u8 pri, __be16 proto); + int (*get_vf_config)(struct net_device *dev, int vf_id, struct ifla_vf_info *ivi); + int (*get_vf_stats)(struct net_device *dev, int vf_id, struct ifla_vf_stats *vf_stats); + void (*tx_timeout)(struct net_device *netdev, u32 txqueue); + int (*bridge_setlink)(struct net_device *netdev, struct nlmsghdr *nlh, + u16 flags, struct netlink_ext_ack *extack); + int (*bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, int nlflags); + u16 (*select_queue)(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev); + void (*get_eth_ctrl_stats)(struct net_device *netdev, + struct ethtool_eth_ctrl_stats *eth_ctrl_stats); + void (*get_eth_mac_stats)(struct net_device *netdev, + struct ethtool_eth_mac_stats *eth_mac_stats); + void (*get_fec_stats)(struct net_device *netdev, struct ethtool_fec_stats *fec_stats); + int (*set_vf_trust)(struct net_device *netdev, int vf_id, bool trusted); int (*register_net)(void *priv, struct nbl_register_net_param *register_param, struct nbl_register_net_result *register_result); int (*unregister_net)(void *priv); int (*setup_txrx_queues)(void *priv, u16 vsi_id, u16 queue_num, u16 net_vector_id); void (*remove_txrx_queues)(void *priv, u16 vsi_id); - int (*register_vsi_info)(void *priv, u16 vsi_index, u16 vsi_id, - u16 queue_offset, u16 queue_num); + int (*register_vsi_info)(void *priv, struct nbl_vsi_param *vsi_param); + int (*init_tx_rate)(void *priv, u16 vsi_id); int (*setup_q2vsi)(void *priv, u16 vsi_id); void (*remove_q2vsi)(void *priv, u16 vsi_id); int (*setup_rss)(void *priv, u16 vsi_id); void (*remove_rss)(void *priv, u16 vsi_id); - u32 (*get_chip_temperature)(void *priv); - u32 (*get_chip_temperature_max)(void *priv); - u32 (*get_chip_temperature_crit)(void *priv); - int (*get_module_temperature)(void *priv, u8 eth_id, enum nbl_module_temp_type type); + int (*setup_rss_indir)(void *priv, u16 vsi_id); + int (*check_offload_status)(void *priv); + u32 (*get_chip_temperature)(void *priv, enum nbl_hwmon_type type, u32 senser_id); + int (*get_module_temperature)(void *priv, u8 eth_id, enum nbl_hwmon_type type); - int (*alloc_rings)(void *priv, struct net_device *dev, - u16 tx_num, u16 rx_num, u16 desc_num); + int (*alloc_rings)(void *priv, struct net_device *dev, struct nbl_ring_param *param); + void (*cpu_affinity_init)(void *priv, u16 rings_num); void (*free_rings)(void *priv); int (*enable_napis)(void *priv, u16 vsi_index); void (*disable_napis)(void *priv, u16 vsi_index); void (*set_mask_en)(void *priv, bool enable); - int (*start_net_flow)(void *priv, struct net_device *dev, u16 vsi_id); + int (*start_net_flow)(void *priv, struct net_device *dev, u16 vsi_id, u16 vid, + bool trusted); void (*stop_net_flow)(void *priv, u16 vsi_id); + void (*clear_flow)(void *priv, u16 vsi_id); + int (*set_promisc_mode)(void *priv, u16 vsi_id, u16 mode); + int (*cfg_multi_mcast)(void *priv, u16 vsi, u16 enable); int (*set_lldp_flow)(void *priv, u16 vsi_id); void (*remove_lldp_flow)(void *priv, u16 vsi_id); int (*start_mgt_flow)(void *priv); @@ -87,18 +114,71 @@ struct nbl_service_ops { int (*set_spoof_check_addr)(void *priv, u8 *mac); u16 (*get_vsi_id)(void *priv, u16 func_id, u16 type); - void (*get_eth_id)(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id); + void (*get_eth_id)(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id, u8 *logic_eth_id); void (*debugfs_init)(void *priv); void (*debugfs_netops_create)(void *priv, u16 tx_queue_num, u16 rx_queue_num); void (*debugfs_ctrlops_create)(void *priv); void (*debugfs_exit)(void *priv); - int (*setup_net_resource_mgt)(void *priv, struct net_device *dev); + int (*setup_net_resource_mgt)(void *priv, struct net_device *dev, + u16 vlan_proto, u16 vlan_tci, u32 rate); void (*remove_net_resource_mgt)(void *priv); - int (*enable_lag_protocol)(void *priv, u16 vsi_id, bool lag_en); + int (*init_hw_stats)(void *priv); + int (*remove_hw_stats)(void *priv); + int (*get_rx_dropped)(void *priv, u64 *rx_dropped); + int (*enable_lag_protocol)(void *priv, u16 eth_id, bool lag_en); + int (*cfg_lag_hash_algorithm)(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type); + int (*cfg_lag_member_fwd)(void *priv, u16 eth_id, u16 lag_id, u8 fwd); + int (*cfg_lag_member_list)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_lag_member_up_attr)(void *priv, u16 eth_id, u16 lag_id, bool enable); + int (*cfg_bond_shaping)(void *priv, u8 eth_id, bool enable); + void (*cfg_bgid_back_pressure)(void *priv, u8 main_eth_id, u8 other_eth_id, bool enable); void (*set_sfp_state)(void *priv, struct net_device *netdev, u8 eth_id, bool open, bool is_force); int (*get_board_id)(void *priv); + void (*cfg_eth_bond_event)(void *priv, bool enable); + void (*get_board_info)(void *priv, struct nbl_board_port_info *board_info); + + /* rep associated */ + int (*rep_netdev_open)(struct net_device *netdev); + int (*rep_netdev_stop)(struct net_device *netdev); + netdev_tx_t (*rep_start_xmit)(struct sk_buff *skb, struct net_device *netdev); + void (*rep_get_stats64)(struct net_device *netdev, struct rtnl_link_stats64 *stats); + void (*rep_set_rx_mode)(struct net_device *dev); + int (*rep_set_mac)(struct net_device *dev, void *p); + int (*rep_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); + int (*rep_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); + int (*rep_setup_tc)(struct net_device *dev, enum tc_setup_type type, void *type_data); + int (*rep_get_phys_port_name)(struct net_device *dev, char *name, size_t len); + int (*rep_get_port_parent_id)(struct net_device *dev, struct netdev_phys_item_id *ppid); + void (*get_rep_feature)(void *priv, struct nbl_register_net_result *register_result); + void (*get_rep_queue_num)(void *priv, u8 *base_queue_id, u8 *rep_queue_num); + int (*alloc_rep_queue_mgt)(void *priv, struct net_device *netdev); + void (*get_rep_queue_info)(void *priv, u16 *queue_num, u16 *queue_size); void (*get_user_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + int (*free_rep_queue_mgt)(void *priv); + void (*set_eswitch_mode)(void *priv, u16 switch_mode); + u16 (*get_eswitch_mode)(void *priv); + int (*alloc_rep_data)(void *priv, int num_vfs, u16 vf_base_vsi_id); + void (*free_rep_data)(void *priv); + void (*set_rep_netdev_info)(void *priv, void *rep_data); + void (*unset_rep_netdev_info)(void *priv); + int (*disable_phy_flow)(void *priv, u8 eth_id); + int (*enable_phy_flow)(void *priv, u8 eth_id); + void (*init_acl)(void *priv); + void (*uninit_acl)(void *priv); + int (*set_upcall_rule)(void *priv, u8 eth_id, u16 vsi_id); + int (*unset_upcall_rule)(void *priv, u8 eth_id); + int (*switchdev_init_cmdq)(void *priv); + int (*switchdev_deinit_cmdq)(void *priv); + int (*set_tc_flow_info)(void *priv); + int (*unset_tc_flow_info)(void *priv); + int (*get_tc_flow_info)(void *priv); + int (*register_indr_dev_tc_offload)(void *priv, struct net_device *netdev); + void (*unregister_indr_dev_tc_offload)(void *priv, struct net_device *netdev); + void (*set_lag_info)(void *priv, struct net_device *bond_netdev, u8 lag_id); + void (*unset_lag_info)(void *priv); + void (*set_netdev_ops)(void *priv, struct net_device_ops *net_device_ops, bool is_pf); /* ethtool */ void (*get_drvinfo)(struct net_device *netdev, struct ethtool_drvinfo *drvinfo); @@ -114,6 +194,9 @@ struct nbl_service_ops { void (*get_channels)(struct net_device *netdev, struct ethtool_channels *channels); int (*set_channels)(struct net_device *netdev, struct ethtool_channels *channels); u32 (*get_link)(struct net_device *netdev); + int (*get_link_ext_state)(struct net_device *netdev, + struct ethtool_link_ext_state_info *link_ext_state_info); + void (*get_link_ext_stats)(struct net_device *netdev, struct ethtool_link_ext_stats *stats); int (*get_ksettings)(struct net_device *netdev, struct ethtool_link_ksettings *cmd); int (*set_ksettings)(struct net_device *netdev, const struct ethtool_link_ksettings *cmd); void (*get_ringparam)(struct net_device *netdev, struct ethtool_ringparam *ringparam, @@ -123,6 +206,10 @@ struct nbl_service_ops { struct kernel_ethtool_ringparam *k_ringparam, struct netlink_ext_ack *extack); + int (*flash_device)(struct net_device *netdev, struct ethtool_flash *flash); + int (*get_dump_flag)(struct net_device *netdev, struct ethtool_dump *dump); + int (*get_dump_data)(struct net_device *netdev, struct ethtool_dump *dump, void *buffer); + int (*set_dump)(struct net_device *netdev, struct ethtool_dump *dump); int (*get_coalesce)(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_ec, struct netlink_ext_ack *extack); @@ -131,9 +218,11 @@ struct nbl_service_ops { struct netlink_ext_ack *extack); int (*get_rxnfc)(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs); + int (*set_rxnfc)(struct net_device *netdev, struct ethtool_rxnfc *cmd); u32 (*get_rxfh_indir_size)(struct net_device *netdev); u32 (*get_rxfh_key_size)(struct net_device *netdev); int (*get_rxfh)(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); + int (*set_rxfh)(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc); u32 (*get_msglevel)(struct net_device *netdev); void (*set_msglevel)(struct net_device *netdev, u32 msglevel); int (*get_regs_len)(struct net_device *netdev); @@ -146,6 +235,9 @@ struct nbl_service_ops { void (*self_test)(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data); u32 (*get_priv_flags)(struct net_device *netdev); int (*set_priv_flags)(struct net_device *netdev, u32 priv_flags); + void (*get_pause_stats)(struct net_device *netdev, struct ethtool_pause_stats *pause_stats); + void (*get_rmon_stats)(struct net_device *netdev, struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **range); int (*set_pause_param)(struct net_device *netdev, struct ethtool_pauseparam *param); void (*get_pause_param)(struct net_device *netdev, struct ethtool_pauseparam *param); int (*set_fec_param)(struct net_device *netdev, struct ethtool_fecparam *fec); @@ -153,7 +245,21 @@ struct nbl_service_ops { int (*get_ts_info)(struct net_device *netdev, struct ethtool_ts_info *ts_info); int (*set_phys_id)(struct net_device *netdev, enum ethtool_phys_id_state state); int (*nway_reset)(struct net_device *netdev); + void (*get_rep_strings)(struct net_device *netdev, u32 stringset, u8 *data); + int (*get_rep_sset_count)(struct net_device *netdev, int sset); + void (*get_rep_ethtool_stats)(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data); + void (*get_wol)(struct net_device *netdev, struct ethtool_wolinfo *wol); + int (*set_wol)(struct net_device *netdev, struct ethtool_wolinfo *wol); + u16 (*get_rdma_cap_num)(void *priv); + void (*setup_rdma_id)(void *priv); + void (*remove_rdma_id)(void *priv); + void (*register_rdma)(void *priv, u16 vsi_id, struct nbl_rdma_register_param *param); + void (*unregister_rdma)(void *priv, u16 vsi_id); + void (*register_rdma_bond)(void *priv, struct nbl_lag_member_list_param *list_param, + struct nbl_rdma_register_param *register_param); + void (*unregister_rdma_bond)(void *priv, u16 lag_id); u8 __iomem * (*get_hw_addr)(void *priv, size_t *size); u64 (*get_real_hw_addr)(void *priv, u16 vsi_id); u16 (*get_function_id)(void *priv, u16 vsi_id); @@ -162,6 +268,7 @@ struct nbl_service_ops { int (*process_abnormal_event)(void *priv); void (*adapt_desc_gother)(void *priv); void (*process_flr)(void *priv, u16 vfid); + u16 (*covert_vfid_to_vsi_id)(void *priv, u16 vfid); void (*recovery_abnormal)(void *priv); void (*keep_alive)(void *priv); @@ -172,15 +279,87 @@ struct nbl_service_ops { struct netlink_ext_ack *extack); u32 (*get_adminq_tx_buf_size)(void *priv); + int (*emp_console_write)(void *priv, char *buf, size_t count); bool (*check_fw_heartbeat)(void *priv); bool (*check_fw_reset)(void *priv); bool (*get_product_flex_cap)(void *priv, enum nbl_flex_cap_type cap_type); bool (*get_product_fix_cap)(void *priv, enum nbl_fix_cap_type cap_type); + int (*add_tls_dev)(struct net_device *netdev, struct sock *sk, + enum tls_offload_ctx_dir direction, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn); + void (*del_tls_dev)(struct net_device *netdev, struct tls_context *tls_ctx, + enum tls_offload_ctx_dir direction); + int (*resync_tls_dev)(struct net_device *netdev, struct sock *sk, + u32 tcp_seq, u8 *rec_num, + enum tls_offload_ctx_dir direction); + int (*add_xdo_dev_state)(struct xfrm_state *x, struct netlink_ext_ack *extack); + void (*delete_xdo_dev_state)(struct xfrm_state *x); + void (*free_xdo_dev_state)(struct xfrm_state *x); + bool (*xdo_dev_offload_ok)(struct sk_buff *skb, struct xfrm_state *x); + void (*xdo_dev_state_advance_esn)(struct xfrm_state *x); + bool (*check_ipsec_status)(void *priv); + void (*handle_ipsec_event)(void *priv); + void (*configure_virtio_dev_msix)(void *priv, u16 vector); + void (*configure_rdma_msix_off)(void *priv, u16 vector); + void (*configure_virtio_dev_ready)(void *priv); int (*setup_st)(void *priv, void *st_table_param); void (*remove_st)(void *priv, void *st_table_param); u16 (*get_vf_base_vsi_id)(void *priv, u16 func_id); + int (*setup_vf_config)(void *priv, int num_vfs, bool is_flush); + void (*remove_vf_config)(void *priv); + void (*register_dev_name)(void *priv, u16 vsi_id, char *name); + void (*get_dev_name)(void *priv, u16 vsi_id, char *name); + + void (*get_mirror_table_id)(void *priv, u16 vsi_id, int dir, bool mirror_en, + u8 *mt_id); + int (*configure_mirror)(void *priv, u16 func_id, bool mirror_en, int dir, + u8 mt_id); + int (*configure_mirror_table)(void *priv, bool mirror_en, u16 func_id, u8 mt_id); + int (*clear_mirror_cfg)(void *priv, u16 func_id); + + int (*setup_vf_resource)(void *priv, int num_vfs); + void (*remove_vf_resource)(void *priv); + void (*cfg_fd_update_event)(void *priv, bool enable); + + void (*get_xdp_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + int (*set_xdp)(struct net_device *netdev, struct netdev_bpf *xdp); + void (*set_hw_status)(void *priv, enum nbl_hw_status hw_status); + void (*get_active_func_bitmaps)(void *priv, unsigned long *bitmap, int max_func); + void (*get_rdma_bw)(void *priv, int *rdma_bw); + void (*get_rdma_rate)(void *priv, int *rdma_rate); + void (*get_net_rate)(void *priv, int *net_rate); + int (*configure_rdma_bw)(void *priv, u8 eth_id, int rdma_bw); + int (*configure_pfc)(void *priv, u8 eth_id, u8 *pfc); + int (*configure_trust)(void *priv, u8 eth_id, u8 trust); + int (*configure_dscp2prio)(void *priv, u8 eth_id, const char *buf, size_t count); + int (*set_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int xoff, int xon); + int (*set_rate_limit)(void *priv, enum nbl_traffic_type type, u32 rate); + ssize_t (*trust_mode_show)(void *priv, u8 eth_id, char *buf); + ssize_t (*pfc_show)(void *priv, u8 eth_id, char *buf); + ssize_t (*dscp2prio_show)(void *priv, u8 eth_id, char *buf); + ssize_t (*pfc_buffer_size_show)(void *priv, u8 eth_id, char *buf); + + /* dcb nl ops */ + int (*ieee_setets)(struct net_device *netdev, struct ieee_ets *ets); + int (*ieee_getets)(struct net_device *netdev, struct ieee_ets *ets); + int (*ieee_setpfc)(struct net_device *netdev, struct ieee_pfc *pfc); + int (*ieee_getpfc)(struct net_device *netdev, struct ieee_pfc *pfc); + int (*ieee_setapp)(struct net_device *netdev, struct dcb_app *app); + int (*ieee_delapp)(struct net_device *netdev, struct dcb_app *app); + void (*dcbnl_getpfccfg)(struct net_device *netdev, int prio, u8 *setting); + void (*dcbnl_setpfccfg)(struct net_device *netdev, int prio, u8 set); + int (*dcbnl_getnumtcs)(struct net_device *netdev, int tcid, u8 *num); + u8 (*ieee_getdcbx)(struct net_device *netdev); + u8 (*ieee_setdcbx)(struct net_device *netdev, u8 mode); + u8 (*dcbnl_getstate)(struct net_device *netdev); + u8 (*dcbnl_setstate)(struct net_device *netdev, u8 state); + u8 (*dcbnl_getpfcstate)(struct net_device *netdev); + u8 (*dcbnl_getcap)(struct net_device *netdev, int capid, u8 *cap); + u16 (*get_vf_function_id)(void *priv, int vf_id); + void (*cfg_mirror_outputport_event)(void *priv, bool enable); }; struct nbl_service_ops_tbl { diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_include.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_include.h index 42c381f97afed6493dac307ed112beb5322c5b9a..07343f33220bb7b0d3cf42cb3e1763324e052cf8 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_include.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_include.h @@ -20,12 +20,16 @@ #include #include #include +#include +#include #include #include #include #include #include +#include #include +#include #include #include #include @@ -35,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -43,9 +48,12 @@ #include #include #include +#include +#include /* ------ Basic definitions ------- */ #define NBL_DRIVER_NAME "nbl_core" +#define NBL_REP_DRIVER_NAME "nbl_rep" /* "product NO-V NO.R NO.B NO.SP NO" * product NO define: * 1 reserve for develop branch @@ -55,7 +63,7 @@ */ #define NBL_DRIVER_VERSION "1-1.1.100.0" -#define NBL_DRIVER_DEV_MAX 8 +#define NBL_DRIVER_DEV_MAX 24 #define NBL_PAIR_ID_GET_TX(id) ((id) * 2 + 1) #define NBL_PAIR_ID_GET_RX(id) ((id) * 2) @@ -68,6 +76,130 @@ #define NBL_FLOW_INDEX_BYTE_LEN 8 +#define NBL_RATE_MBPS_100G (100000) +#define NBL_RATE_MBPS_25G (25000) +#define NBL_RATE_MBPS_10G (10000) + +#define NBL_NEXT_ID(id, max) ({ typeof(id) _id = (id); ((_id) == (max) ? 0 : (_id) + 1); }) +#define NBL_IPV6_U32LEN 4 + +/* macro for counter */ +#define NBL_FLOW_COUNT_NUM 8 +#define NBL_COUNTER_MAX_STAT_ID 2048 +/* counter_id + stat_id */ +#define NBL_COUNTER_MAX_ID 128 * 1024 + +#define NBL_TC_MCC_MEMBER_MAX 16 + +#define NBL_IP_VERSION_V4 4 +#define NBL_IP_VERSION_V6 6 +#define NBL_MAX_FUNC (520) +#define NBL_MAX_MTU 15 + +#define NBL_FLOW_TABLE_IPV4_DEFAULT_MASK 0xFFFFFFFF +#define NBL_FLOW_TABLE_L4_PORT_DEFAULT_MASK 0xFFFF +#define NBL_TC_MAX_PED_H_IDX 512 + +#define NBL_TC_PEDIT_SET_NODE_RES_PRO(node) ((node).pedit_proto = 1) +#define NBL_TC_PEDIT_GET_NODE_RES_PRO(node) ((node).pedit_proto) + +#define NBL_TC_PEDIT_INC_NODE_RES_EDITS(node) ((node).pedits++) +#define NBL_TC_PEDIT_DEC_NODE_RES_EDITS(node, dec) ((node).pedits -= dec) + +/* key element: key flag bitmap */ +#define NBL_FLOW_KEY_TABLE_IDX_FLAG (BIT_ULL(0)) +#define NBL_FLOW_KEY_INPORT8_FLAG (BIT_ULL(1)) +#define NBL_FLOW_KEY_INPORT4_FLAG (BIT_ULL(39)) +#define NBL_FLOW_KEY_INPORT2_FLAG (BIT_ULL(40)) // error +#define NBL_FLOW_KEY_INPORT2L_FLAG (BIT_ULL(41)) // error +#define NBL_FLOW_KEY_T_DIPV4_FLAG (BIT_ULL(2)) +#define NBL_FLOW_KEY_T_DIPV6_FLAG (BIT_ULL(3)) +#define NBL_FLOW_KEY_T_OPT_DATA_FLAG (BIT_ULL(4)) +#define NBL_FLOW_KEY_T_VNI_FLAG (BIT_ULL(5)) +#define NBL_FLOW_KEY_T_DSTMAC_FLAG (BIT_ULL(6)) // error +#define NBL_FLOW_KEY_T_SRCMAC_FLAG (BIT_ULL(7)) // error +#define NBL_FLOW_KEY_T_SVLAN_FLAG (BIT_ULL(8)) // error +#define NBL_FLOW_KEY_T_CVLAN_FLAG (BIT_ULL(9)) // error +#define NBL_FLOW_KEY_T_ETHERTYPE_FLAG (BIT_ULL(10)) // error +#define NBL_FLOW_KEY_T_SRCPORT_FLAG (BIT_ULL(11)) +#define NBL_FLOW_KEY_T_DSTPORT_FLAG (BIT_ULL(12)) +#define NBL_FLOW_KEY_T_NPROTO_FLAG (BIT_ULL(13)) // delete +#define NBL_FLOW_KEY_T_OPT_CLASS_FLAG (BIT_ULL(14)) +#define NBL_FLOW_KEY_T_PROTOCOL_FLAG (BIT_ULL(15)) +#define NBL_FLOW_KEY_T_TCPSTAT_FLAG (BIT_ULL(16)) // delete +#define NBL_FLOW_KEY_T_TOS_FLAG (BIT_ULL(17)) +#define NBL_FLOW_KEY_T_TTL_FLAG (BIT_ULL(18)) +#define NBL_FLOW_KEY_SIPV4_FLAG (BIT_ULL(19)) +#define NBL_FLOW_KEY_SIPV6_FLAG (BIT_ULL(20)) +#define NBL_FLOW_KEY_DIPV4_FLAG (BIT_ULL(21)) +#define NBL_FLOW_KEY_DIPV6_FLAG (BIT_ULL(22)) +#define NBL_FLOW_KEY_DSTMAC_FLAG (BIT_ULL(23)) +#define NBL_FLOW_KEY_SRCMAC_FLAG (BIT_ULL(24)) +#define NBL_FLOW_KEY_SVLAN_FLAG (BIT_ULL(25)) +#define NBL_FLOW_KEY_CVLAN_FLAG (BIT_ULL(26)) +#define NBL_FLOW_KEY_ETHERTYPE_FLAG (BIT_ULL(27)) +#define NBL_FLOW_KEY_SRCPORT_FLAG (BIT_ULL(28)) +#define NBL_FLOW_KEY_ICMP_TYPE_FLAG (BIT_ULL(28)) +#define NBL_FLOW_KEY_DSTPORT_FLAG (BIT_ULL(29)) +#define NBL_FLOW_KEY_ICMP_CODE_FLAG (BIT_ULL(29)) +#define NBL_FLOW_KEY_ARP_OP_FLAG (BIT_ULL(30)) // error +#define NBL_FLOW_KEY_ICMPV6_TYPE_FLAG (BIT_ULL(31)) // error +#define NBL_FLOW_KEY_PROTOCOL_FLAG (BIT_ULL(32)) +#define NBL_FLOW_KEY_TCPSTAT_FLAG (BIT_ULL(33)) +#define NBL_FLOW_KEY_TOS_FLAG (BIT_ULL(34)) +#define NBL_FLOW_KEY_DSCP_FLAG (BIT_ULL(34)) +#define NBL_FLOW_KEY_TTL_FLAG (BIT_ULL(35)) +#define NBL_FLOW_KEY_HOPLIMIT_FLAG (BIT_ULL(35)) +#define NBL_FLOW_KEY_RDMA_ACK_SEQ_FLAG (BIT_ULL(36)) // error +#define NBL_FLOW_KEY_RDMA_QPN_FLAG (BIT_ULL(37)) // error +#define NBL_FLOW_KEY_RDMA_OP_FLAG (BIT_ULL(38)) // error +#define NBL_FLOW_KEY_EXEHASH_FLAG (BIT_ULL(43)) +#define NBL_FLOW_KEY_DPHASH_FLAG (BIT_ULL(44)) +#define NBL_FLOW_KEY_RECIRC_FLAG (BIT_ULL(63)) + +/* action flag */ +#define NBL_FLOW_ACTION_METADATA_FLAG (BIT_ULL(1)) +#define NBL_FLOW_ACTION_DROP (BIT_ULL(2)) +#define NBL_FLOW_ACTION_REDIRECT (BIT_ULL(3)) +#define NBL_FLOW_ACTION_MIRRED (BIT_ULL(4)) +#define NBL_FLOW_ACTION_TUNNEL_ENCAP (BIT_ULL(5)) +#define NBL_FLOW_ACTION_TUNNEL_DECAP (BIT_ULL(6)) +#define NBL_FLOW_ACTION_COUNTER (BIT_ULL(7)) +#define NBL_FLOW_ACTION_SET_IPV4_SRC_IP (BIT_ULL(8)) +#define NBL_FLOW_ACTION_SET_IPV4_DST_IP (BIT_ULL(9)) +#define NBL_FLOW_ACTION_SET_IPV6_SRC_IP (BIT_ULL(10)) +#define NBL_FLOW_ACTION_SET_IPV6_DST_IP (BIT_ULL(11)) +#define NBL_FLOW_ACTION_SET_SRC_MAC (BIT_ULL(12)) +#define NBL_FLOW_ACTION_SET_DST_MAC (BIT_ULL(13)) +#define NBL_FLOW_ACTION_SET_SRC_PORT (BIT_ULL(14)) +#define NBL_FLOW_ACTION_SET_DST_PORT (BIT_ULL(15)) +#define NBL_FLOW_ACTION_SET_TTL (BIT_ULL(16)) +#define NBL_FLOW_ACTION_SET_IPV4_DSCP (BIT_ULL(17)) +#define NBL_FLOW_ACTION_SET_IPV6_DSCP (BIT_ULL(18)) +#define NBL_FLOW_ACTION_RSS (BIT_ULL(19)) +#define NBL_FLOW_ACTION_QUEUE (BIT_ULL(20)) +#define NBL_FLOW_ACTION_MARK (BIT_ULL(21)) +#define NBL_FLOW_ACTION_PUSH_INNER_VLAN (BIT_ULL(22)) +#define NBL_FLOW_ACTION_PUSH_OUTER_VLAN (BIT_ULL(23)) +#define NBL_FLOW_ACTION_POP_INNER_VLAN (BIT_ULL(24)) +#define NBL_FLOW_ACTION_POP_OUTER_VLAN (BIT_ULL(25)) +#define NBL_FLOW_ACTION_REPLACE_INNER_VLAN (BIT_ULL(26)) +#define NBL_FLOW_ACTION_REPLACE_SINGLE_INNER_VLAN (BIT_ULL(27)) +#define NBL_FLOW_ACTION_REPLACE_OUTER_VLAN (BIT_ULL(28)) +#define NBL_FLOW_ACTION_PHY_PORT (BIT_ULL(29)) +#define NBL_FLOW_ACTION_PORT_ID (BIT_ULL(30)) +#define NBL_FLOW_ACTION_INGRESS (BIT_ULL(31)) +#define NBL_FLOW_ACTION_EGRESS (BIT_ULL(32)) +#define NBL_FLOW_ACTION_IPV4 (BIT_ULL(33)) +#define NBL_FLOW_ACTION_IPV6 (BIT_ULL(34)) +#define NBL_FLOW_ACTION_CAR (BIT_ULL(35)) +#define NBL_FLOW_ACTION_MCC (BIT_ULL(36)) +#define NBL_FLOW_ACTION_MIRRED_ENCAP (BIT_ULL(37)) +#define NBL_FLOW_ACTION_META_RECIRC (BIT_ULL(38)) +#define NBL_FLOW_ACTION_STAT (BIT_ULL(39)) +#define NBL_ACTION_FLAG_OFFSET_MAX (BIT_ULL(40)) +extern struct list_head lag_resource_head; +extern struct mutex nbl_lag_mutex; #define SET_DEV_MIN_MTU(netdev, mtu) ((netdev)->min_mtu = (mtu)) #define SET_DEV_MAX_MTU(netdev, mtu) ((netdev)->max_mtu = (mtu)) @@ -77,21 +209,35 @@ /* Used for macros to pass checkpatch */ #define NBL_NAME(x) x +#define NBL_SET_INTR_COALESCE(param, tx_usecs, tx_max_frames, rx_usecs, rx_max_frames) \ +do { \ + typeof(param) __param = param; \ + __param->tx_coalesce_usecs = tx_usecs; \ + __param->tx_max_coalesced_frames = tx_max_frames; \ + __param->rx_coalesce_usecs = rx_usecs; \ + __param->rx_max_coalesced_frames = rx_max_frames; \ +} while (0) + enum nbl_product_type { NBL_LEONIS_TYPE, NBL_PRODUCT_MAX, }; enum nbl_flex_cap_type { + NBL_DUMP_FLOW_CAP, + NBL_DUMP_FD_CAP, NBL_SECURITY_ACCEL_CAP, NBL_FLEX_CAP_NBITS }; enum nbl_fix_cap_type { + NBL_TASK_OFFLOAD_NETWORK_CAP, NBL_TASK_FW_HB_CAP, NBL_TASK_FW_RESET_CAP, NBL_TASK_CLEAN_ADMINDQ_CAP, NBL_TASK_CLEAN_MAILBOX_CAP, + NBL_TASK_IPSEC_AGE_CAP, + NBL_ETH_SUPPORT_NRZ_RS_FEC_544, NBL_RESTOOL_CAP, NBL_HWMON_TEMP_CAP, NBL_ITR_DYNAMIC, @@ -100,7 +246,16 @@ enum nbl_fix_cap_type { NBL_PROCESS_FLR_CAP, NBL_RECOVERY_ABNORMAL_STATUS, NBL_TASK_KEEP_ALIVE, - NBL_DUMP_FLOW_CAP, + NBL_PMD_DEBUG, + NBL_XDP_CAP, + NBL_TASK_RESET_CAP, + NBL_TASK_RESET_CTRL_CAP, + NBL_QOS_SYSFS_CAP, + NBL_MIRROR_SYSFS_CAP, + NBL_HIGH_THROUGHPUT_CAP, + NBL_TASK_HEALTH_REPORT_TEMP_CAP, + NBL_TASK_HEALTH_REPORT_REBOOT_CAP, + NBL_DVN_DESC_REQ_SYSFS_CAP, NBL_FIX_CAP_NBITS }; @@ -112,7 +267,8 @@ enum nbl_sfp_module_state { enum { NBL_VSI_DATA = 0,/* default vsi in kernel or independent dpdk */ NBL_VSI_CTRL, - NBL_VSI_USER,/* dpdk used vsi in coexist dpdk */ + NBL_VSI_USER, /* dpdk used vsi in coexist dpdk */ + NBL_VSI_XDP, NBL_VSI_MAX, }; @@ -126,6 +282,17 @@ enum { NBL_RX, }; +enum nbl_hw_status { + NBL_HW_NOMAL, + NBL_HW_FATAL_ERR, /* Most hw module is not work nomal exclude pcie/emp */ + NBL_HW_STATUS_MAX, +}; + +enum nbl_reset_event { + NBL_HW_FATAL_ERR_EVENT, /* Most hw module is not work nomal exclude pcie/emp */ + NBL_HW_MAX_EVENT +}; + /* ------ Params that go through multiple layers ------ */ struct nbl_driver_info { #define NBL_DRIVER_VERSION_LEN_MAX (32) @@ -142,13 +309,14 @@ struct nbl_func_caps { u32 support_lag:1; u32 has_grc:1; u32 has_factory_ctrl:1; - u32 need_pmd_debug:1; + u32 is_ocp:1; u32 rsv:23; }; struct nbl_init_param { struct nbl_func_caps caps; enum nbl_product_type product_type; + bool is_rep; bool pci_using_dac; }; @@ -169,6 +337,23 @@ struct nbl_txrx_queue_param { u16 rxcsum; }; +struct nbl_tc_qidsc_info { + u16 count; + u16 offset; + u32 pad; + u64 max_tx_rate; +}; + +#define NBL_MAX_TC_NUM (8) +struct nbl_tc_qidsc_param { + struct nbl_tc_qidsc_info info[NBL_MAX_TC_NUM]; + bool enable; + u16 num_tc; + u16 origin_qps; + u16 vsi_id; + u8 gravity; +}; + struct nbl_qid_map_table { u32 local_qid; u32 notify_addr_l; @@ -235,28 +420,6 @@ struct nbl_queue_cfg_param { u16 half_offload_en; }; -struct nbl_register_net_param { - u16 pf_bdf; - u64 vf_bar_start; - u64 vf_bar_size; - u16 total_vfs; - u16 offset; - u16 stride; - u64 pf_bar_start; -}; - -struct nbl_register_net_result { - u16 tx_queue_num; - u16 rx_queue_num; - u16 queue_size; - u16 rdma_enable; - u64 hw_features; - u64 features; - u16 max_mtu; - u16 queue_offset; - u8 mac[ETH_ALEN]; -}; - struct nbl_msix_info_param { u16 msix_num; struct msix_entry *msix_entries; @@ -268,6 +431,12 @@ struct nbl_queue_stats { u64 descs; }; +struct nbl_rep_stats { + u64 packets; + u64 bytes; + u64 dropped; +}; + struct nbl_tx_queue_stats { u64 tso_packets; u64 tso_bytes; @@ -279,6 +448,9 @@ struct nbl_tx_queue_stats { u64 tx_skb_free; u64 tx_desc_addr_err_cnt; u64 tx_desc_len_err_cnt; + u64 tls_encrypted_packets; + u64 tls_encrypted_bytes; + u64 tls_ooo_packets; }; struct nbl_rx_queue_stats { @@ -293,6 +465,12 @@ struct nbl_rx_queue_stats { u64 rx_cache_empty; u64 rx_cache_busy; u64 rx_cache_waive; + u64 tls_decrypted_packets; + u64 tls_resync_req_num; + u64 xdp_tx_packets; + u64 xdp_redirect_packets; + u64 xdp_oversize_packets; + u64 xdp_drop_packets; }; struct nbl_stats { @@ -306,6 +484,15 @@ struct nbl_stats { u64 tx_dma_busy; u64 tx_multicast_packets; u64 tx_unicast_packets; + u64 xdp_tx_packets; + u64 xdp_redirect_packets; + u64 xdp_oversize_packets; + u64 xdp_drop_packets; + u64 tls_encrypted_packets; + u64 tls_encrypted_bytes; + u64 tls_ooo_packets; + u64 tls_decrypted_packets; + u64 tls_resync_req_num; u64 rx_multicast_packets; u64 rx_unicast_packets; u64 tx_skb_free; @@ -324,19 +511,47 @@ struct nbl_stats { u64 rx_bytes; }; -struct nbl_queue_err_stats { - u16 dvn_pkt_drop_cnt; - u32 uvn_stat_pkt_drop; -}; - struct nbl_priv_stats { u64 total_dvn_pkt_drop_cnt; u64 total_uvn_stat_pkt_drop; }; -struct nbl_fc_info { - u32 rx_pause; - u32 tx_pause; +struct nbl_vf_stats { + u64 rx_packets; + u64 tx_packets; + u64 rx_bytes; + u64 tx_bytes; + u64 broadcast; + u64 multicast; + u64 rx_dropped; + u64 tx_dropped; +}; + +struct nbl_ustore_stats { + u64 rx_drop_packets; + u64 rx_trun_packets; +}; + +struct nbl_hw_stats { + u64 *total_uvn_stat_pkt_drop; + struct nbl_ustore_stats start_ustore_stats; +}; + +struct nbl_eth_abnormal_stats { + /* detailed rx_errors: */ + u64 rx_length_errors; + u64 rx_over_errors; + u64 rx_crc_errors; + u64 rx_frame_errors; + u64 rx_fifo_errors; + u64 rx_missed_errors; + + /* detailed tx_errors */ + u64 tx_aborted_errors; + u64 tx_carrier_errors; + u64 tx_fifo_errors; + u64 tx_heartbeat_errors; + u64 tx_window_errors; }; struct nbl_notify_param { @@ -344,6 +559,32 @@ struct nbl_notify_param { u16 tail_ptr; }; +#define NBL_LAG_MAX_PORTS 2 +#define NBL_LAG_VALID_PORTS 2 +#define NBL_LAG_MAX_NUM 2 +#define NBL_LAG_MAX_RESOURCE_NUM NBL_DRIVER_DEV_MAX + +struct nbl_lag_member { + struct netdev_lag_lower_state_info lower_state; + struct notifier_block notify_block; + struct netdev_net_notifier netdevice_nn; + struct list_head mem_list_node; + struct net_device *netdev; + bool is_bond_adev; + u16 vsi_id; + u8 lag_id; + u8 eth_id; + u8 logic_eth_id; + u8 bonded; +}; + +struct nbl_enable_lag_param { + bool enable; + u16 pa_ext_type_tbl_id; + u16 flow_tbl_id; + u16 upcall_queue; +}; + enum nbl_eth_speed { LINK_SPEED_100M = 0, LINK_SPEED_1000M = 1, @@ -355,56 +596,106 @@ enum nbl_eth_speed { LINK_SPEED_200G = 7 }; -struct nbl_phy_caps { - u32 speed; /* enum nbl_eth_speed */ - u32 fec_ability; - u32 pause_param; /* bit0 tx, bit1 rx */ +#define NBL_KTLS_IV_LEN 8 +#define NBL_KTLS_REC_LEN 8 + +struct nbl_ktls_offload_context_tx { + u32 index; + u32 expected_tcp; + u8 iv[NBL_KTLS_IV_LEN]; + u8 rec_num[NBL_KTLS_REC_LEN]; + bool ctx_post_pending; + struct tls_offload_context_tx *tx_ctx; }; -struct nbl_phy_state { - u32 current_speed; - u32 fec_mode; - struct nbl_fc_info fc; - __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); - __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); +struct nbl_ktls_offload_context_rx { + u32 index; + u32 tcp_seq; + u8 rec_num[NBL_KTLS_REC_LEN]; }; -struct nbl_common_irq_num { - int mbx_irq_num; +struct aes_gcm_keymat { + u8 crypto_type; + u32 salt; + u32 icv_len; +#define NBL_IPSEC_KEY_LEN 8 + u32 aes_key[NBL_IPSEC_KEY_LEN]; + u64 seq_iv; }; -struct nbl_ctrl_irq_num { - int adminq_irq_num; - int abnormal_irq_num; +struct nbl_accel_esp_xfrm_attrs { + u8 is_ipv6; + u8 nat_flag; + u8 tunnel_mode; + u16 sport; + u16 dport; + u32 spi; + xfrm_address_t saddr; + xfrm_address_t daddr; + struct aes_gcm_keymat aes_gcm; +}; + +struct nbl_ipsec_esn_state { + u32 sn; + u32 esn; + u8 wrap_en : 1; + u8 overlap : 1; + u8 enable : 1; + u8 window_en : 1; + u8 option : 2; }; -#define NBL_PORT_KEY_ILLEGAL 0x0 -#define NBL_PORT_KEY_CAPABILITIES 0x1 -#define NBL_PORT_KEY_ENABLE 0x2 /* BIT(0): NBL_PORT_FLAG_ENABLE_NOTIFY */ -#define NBL_PORT_KEY_DISABLE 0x3 -#define NBL_PORT_KEY_ADVERT 0x4 -#define NBL_PORT_KEY_LOOPBACK 0x5 /* 0: disable eth loopback, 1: enable eth loopback */ -#define NBL_PORT_KEY_MODULE_SWITCH 0x6 /* 0: sfp off, 1: sfp on */ -#define NBL_PORT_KEY_MAC_ADDRESS 0x7 -#define NBL_PORT_KRY_LED_BLINK 0x8 +struct nbl_sa_search_key { + u16 family; + u32 mark; + __be32 spi; + xfrm_address_t daddr; +}; -enum { - NBL_PORT_SUBOP_READ = 1, - NBL_PORT_SUBOP_WRITE = 2, +struct nbl_ipsec_cfg_info { + struct nbl_sa_search_key sa_key; + bool vld; + + u32 lft_cnt; + u32 lft_diff; + u32 hard_round; + u32 soft_round; + u32 hard_remain; + u32 soft_remain; + + u16 vsi; + u8 limit_type; + u8 limit_enable; + u64 hard_limit; + u64 soft_limit; +}; + +struct nbl_ipsec_sa_entry { + struct nbl_ipsec_cfg_info cfg_info; + struct nbl_ipsec_esn_state esn_state; + struct nbl_accel_esp_xfrm_attrs attrs; + u32 index; }; -#define NBL_PORT_FLAG_ENABLE_NOTIFY BIT(0) -#define NBL_PORT_ENABLE_LOOPBACK 1 -#define NBL_PORT_DISABLE_LOOPBCK 0 -#define NBL_PORT_SFP_ON 1 -#define NBL_PORT_SFP_OFF 0 -#define NBL_PORT_KEY_KEY_SHIFT 56 -#define NBL_PORT_KEY_DATA_MASK 0xFFFFFFFFFFFF +union nbl_ipsec_lft_info { + u32 data; + struct { + u32 soft_sad_index : 11; + u32 soft_vld :1; + u32 rsv1 : 4; + u32 hard_sad_index : 11; + u32 hard_vld :1; + u32 rsv2 : 4; + }; +}; -struct nbl_port_key { - u32 id; /* port id */ - u32 subop; /* 1: read, 2: write */ - u64 data[]; /* [47:0]: data, [55:48]: rsvd, [63:56]: key */ +struct nbl_common_irq_num { + int mbx_irq_num; +}; + +struct nbl_ctrl_irq_num { + int adminq_irq_num; + int abnormal_irq_num; }; enum nbl_flow_ctrl { @@ -455,24 +746,9 @@ enum nbl_led_reg_ctrl { NBL_LED_REG_INACTIVE, }; -/* emp to ctrl dev notify */ -struct nbl_port_notify { - u32 id; - u32 speed; /* in 10 Mbps units */ - u8 link_state:1; /* 0:down, 1:up */ - u8 module_inplace:1; /* 0: not inplace, 1:inplace */ - u8 revd0:6; - u8 flow_ctrl; /* enum nbl_flow_ctrl */ - u8 fec; /* enum nbl_port_fec */ - u8 active_lanes; - u8 rsvd1[4]; - u64 advertising; /* enum nbl_port_cap */ - u64 lp_advertising; /* enum nbl_port_cap */ -}; - #define NBL_PORT_CAP_AUTONEG_MASK (BIT(NBL_PORT_CAP_AUTONEG)) #define NBL_PORT_CAP_FEC_MASK \ - (BIT(NBL_PORT_CAP_FEC_NONE) | BIT(NBL_PORT_CAP_FEC_RS) | BIT(NBL_PORT_CAP_FEC_BASER)) + (BIT(NBL_PORT_CAP_FEC_OFF) | BIT(NBL_PORT_CAP_FEC_RS) | BIT(NBL_PORT_CAP_FEC_BASER)) #define NBL_PORT_CAP_PAUSE_MASK (BIT(NBL_PORT_CAP_TX_PAUSE) | BIT(NBL_PORT_CAP_RX_PAUSE)) #define NBL_PORT_CAP_SPEED_1G_MASK\ (BIT(NBL_PORT_CAP_1000BASE_T) | BIT(NBL_PORT_CAP_1000BASE_X)) @@ -511,6 +787,7 @@ enum nbl_port_cap { NBL_PORT_CAP_RX_PAUSE, NBL_PORT_CAP_AUTONEG, NBL_PORT_CAP_FEC_NONE, + NBL_PORT_CAP_FEC_OFF = NBL_PORT_CAP_FEC_NONE, NBL_PORT_CAP_FEC_RS, NBL_PORT_CAP_FEC_BASER, NBL_PORT_CAP_1000BASE_T, @@ -539,6 +816,7 @@ enum nbl_port_cap { NBL_PORT_CAP_100GBASE_SR2_PAM4, NBL_PORT_CAP_100GBASE_CR2_PAM4, NBL_PORT_CAP_100G_AUI2_PAM4, + NBL_PORT_CAP_FEC_AUTONEG, NBL_PORT_CAP_MAX }; @@ -549,32 +827,23 @@ enum nbl_fw_port_speed { NBL_FW_PORT_SPEED_100G, }; -struct nbl_eth_link_info { - u8 link_status; - u32 link_speed; -}; - -struct nbl_port_state { - u64 port_caps; - u64 port_advertising; - u64 port_lp_advertising; - u32 link_speed; - u8 active_fc; - u8 active_fec; /* enum nbl_port_fec */ - u8 link_state; - u8 module_inplace; - u8 port_type; /* enum nbl_port_type */ - u8 port_max_rate; /* enum nbl_port_max_rate */ - u8 fw_port_max_speed; /* enum nbl_fw_port_speed */ -}; +static inline u32 nbl_port_speed_to_speed(enum nbl_fw_port_speed port_speed) +{ + switch (port_speed) { + case NBL_FW_PORT_SPEED_10G: + return SPEED_10000; + case NBL_FW_PORT_SPEED_25G: + return SPEED_25000; + case NBL_FW_PORT_SPEED_50G: + return SPEED_50000; + case NBL_FW_PORT_SPEED_100G: + return SPEED_100000; + default: + return SPEED_25000; + } -struct nbl_port_advertising { - u8 eth_id; - u64 speed_advert; - u8 active_fc; - u8 active_fec; /* enum nbl_port_fec */ - u8 autoneg; -}; + return SPEED_25000; +} #define PASSTHROUGH_FW_CMD_DATA_LEN (3072) struct nbl_passthrough_fw_cmd_param { @@ -585,13 +854,47 @@ struct nbl_passthrough_fw_cmd_param { u8 data[PASSTHROUGH_FW_CMD_DATA_LEN]; }; -#define NBL_RING_NUM_CMD_LEN (520) -struct nbl_fw_cmd_ring_num_param { +#define NBL_NET_RING_NUM_CMD_LEN (520) +struct nbl_fw_cmd_net_ring_num_param { u16 pf_def_max_net_qp_num; u16 vf_def_max_net_qp_num; - u16 net_max_qp_num[NBL_RING_NUM_CMD_LEN]; + u16 net_max_qp_num[NBL_NET_RING_NUM_CMD_LEN]; }; +#define NBL_RDMA_CAP_CMD_LEN (65) +struct nbl_fw_cmd_rdma_cap_param { + u32 valid; + u8 rdma_func_bitmaps[NBL_RDMA_CAP_CMD_LEN]; + u8 rsv[7]; +}; + +#define NBL_RDMA_MEM_TYPE_MAX (2) +struct nbl_fw_cmd_rdma_mem_type_param { + u32 mem_type; +}; + +#define NBL_VF_NUM_CMD_LEN (8) +struct nbl_fw_cmd_vf_num_param { + u32 valid; + u16 vf_max_num[NBL_VF_NUM_CMD_LEN]; +}; + +#define NBL_ST_INFO_NAME_LEN (64) +#define NBL_ST_INFO_NETDEV_MAX (8) +#define NBL_ST_INFO_RESERVED_LEN (376) +struct nbl_st_info_param { + u8 version; + u8 bus; + u8 devid; + u8 function; + u16 domain; + u16 rsv0; + char driver_name[NBL_ST_INFO_NAME_LEN]; + char driver_ver[NBL_ST_INFO_NAME_LEN]; + char netdev_name[NBL_ST_INFO_NETDEV_MAX][NBL_ST_INFO_NAME_LEN]; + u8 rsv[NBL_ST_INFO_RESERVED_LEN]; +} __packed; + static inline u64 nbl_speed_to_link_mode(unsigned int speed, u8 autoneg) { u64 link_mode = 0; @@ -638,6 +941,10 @@ static inline int nbl_##_struct##_size_is_not_equal_to_define(void) \ return check[0]; \ } +#define nbl_list_entry_is_head(pos, head, member) \ + (&pos->member == (head)) + + /** * list_is_first -- tests whether @ list is the first entry in list @head * @list: the entry to test @@ -669,15 +976,335 @@ static inline int nbl_list_empty(const struct list_head *head) return READ_ONCE(head->next) == head; } +/** + * nbl_read_poll_timeout - Periodically poll an address until a condition is + * met or a timeout occurs + * @op: accessor function (takes @args as its arguments) + * @val: Variable to read the value into + * @cond: Break condition (usually involving @val) + * @sleep_us: Maximum time to sleep between reads in us (0 + * tight-loops). Should be less than ~20ms since usleep_range + * is used (see Documentation/timers/timers-howto.rst). + * @timeout_us: Timeout in us, 0 means never timeout + * @sleep_before_read: if it is true, sleep @sleep_us before read. + * @args: arguments for @op poll + * + * Returns 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @args is stored in @val. Must not + * be called from atomic context if sleep_us or timeout_us are used. + * + * When available, you'll probably want to use one of the specialized + * macros defined below rather than this macro directly. + */ +#define nbl_read_poll_timeout(op, val, cond, sleep_us, timeout_us, \ + sleep_before_read, args...) \ +({ \ + u64 __timeout_us = (timeout_us); \ + unsigned long __sleep_us = (sleep_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + might_sleep_if((__sleep_us) != 0); \ + if (sleep_before_read && __sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + for (;;) { \ + (val) = op(args); \ + if (cond) \ + break; \ + if (__timeout_us && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + (val) = op(args); \ + break; \ + } \ + if (__sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) + #define NBL_OPS_CALL(func, para) \ ({ typeof(func) _func = (func); \ (!_func) ? 0 : _func para; }) -enum nbl_module_temp_type { - NBL_MODULE_TEMP, - NBL_MODULE_TEMP_MAX, - NBL_MODULE_TEMP_CRIT, - NBL_MODULE_TEMP_TYPE_MAX, +enum { + NBL_TC_PORT_TYPE_INVALID = 0, + NBL_TC_PORT_TYPE_VSI, + NBL_TC_PORT_TYPE_ETH, + NBL_TC_PORT_TYPE_BOND, +}; + +struct nbl_tc_port { + u32 id:24; + u32 type:8; +}; + +enum nbl_cmd_status { + NBL_CMDQ_SUCCESS = 0, + /* failed establishing cmd */ + NBL_CMDQ_PARAM_ERR = -1, + NBL_CMDQ_NOT_SUPP = -3, + NBL_CMDQ_NO_MEMORY = -4, + NBL_CMDQ_NOT_READY = -5, + NBL_CMDQ_UNDONE = -6, + /* failed sending cmd */ + NBL_CMDQ_CQ_ERR = -100, + NBL_CMDQ_CQ_FULL = -102, + NBL_CMDQ_CQ_NOT_READY = -103, + NBL_CMDQ_CQ_ERR_PARAMS = -104, + NBL_CMDQ_CQ_ERR_BUFFER = -105, + /* failed executing cmd */ + NBL_CMDQ_FAILED = -200, + NBL_CMDQ_NOBUF_ERR = -201, + NBL_CMDQ_TIMEOUT_ERR = -202, + NBL_CMDQ_NOHIT_ERR = -203, + NBL_CMDQ_RESEND_FAIL = -204, + NBL_CMDQ_RESET_FAIL = -205, + NBL_CMDQ_NEED_RESEND = -206, + NBL_CMDQ_NEED_RESET = -207, +}; + +struct nbl_fdir_l2 { + u8 dst_mac[ETH_ALEN]; /* dest MAC address */ + u8 src_mac[ETH_ALEN]; /* src MAC address */ + u16 ether_type; /* for NON_IP_L2 */ +}; + +struct nbl_fdir_l4 { + u16 dst_port; + u16 src_port; + u8 tcp_flag; +}; + +struct nbl_fdir_l3 { + union { + u32 addr; + u8 v6_addr[NBL_IPV6_ADDR_LEN_AS_U8]; + } src_ip, dst_ip; + + u8 ip_ver; + u8 tos; + u8 ttl; + u8 proto; +}; + +struct nbl_tc_fdir_tnl { + u32 flags; + u32 vni; +}; + +struct nbl_port_mcc { + u16 dport_id:12; + u16 port_type:4; +}; + +#define NBL_VLAN_TYPE_ETH_BASE 1027 +#define NBL_VLAN_TPID_VALUE 0x8100 +#define NBL_QINQ_TPID_VALUE 0x88A8 +struct nbl_vlan { + u16 vlan_tag; + u16 eth_proto; + u32 port_id; + u8 port_type; +}; + +/* encap info */ +#define NBL_FLOW_ACTION_ENCAP_TOTAL_LEN 128 +#define NBL_FLOW_ACTION_ENCAP_OFFSET_LEN 9 +#define NBL_FLOW_ACTION_ENCAP_HALF_LEN 45 +#define NBL_FLOW_ACTION_ENCAP_MAX_LEN 90 + +struct nbl_encap_key { + struct ip_tunnel_key ip_tun_key; + void *tc_tunnel; +}; + +struct nbl_encap_entry { + struct nbl_encap_key key; + unsigned char hw_dst[ETH_ALEN]; + + struct net_device *out_dev; + u8 encap_buf[NBL_FLOW_ACTION_ENCAP_TOTAL_LEN]; + u16 encap_size; + u16 encap_idx; + u32 vni; + u32 ref_cnt; +}; + +union nbl_flow_encap_offset_tbl_u { + struct nbl_flow_encap_offset_tbl { + u16 phid3_offset:7; + u16 phid2_offset:7; + u16 l4_ck_mod:3; + u16 l3_ck_en:1; + u16 len_offset1:7; + u16 len_en1:1; + u16 len_offset0:7; + u16 len_en0:1; + u16 dscp_offset:10; + u16 vlan_offset:7; + u16 vni_offset:7; + u16 sport_offset:7; + u16 tnl_len:7; + } __packed info; +#define NBL_FLOW_ENCAP_OFFSET_TBL_WIDTH (sizeof(struct nbl_flow_encap_offset_tbl) \ + / sizeof(u32)) + u32 data[NBL_FLOW_ENCAP_OFFSET_TBL_WIDTH]; +} __packed; + +struct nbl_tc_pedit_headers { + struct ethhdr eth; + struct iphdr ip4; + struct ipv6hdr ip6; + struct tcphdr tcp; + struct udphdr udp; +}; + +enum nbl_flow_ped_type { + /* ped type: default is src dir if ped_type is ip & mac */ + NBL_FLOW_PED_UMAC_TYPE = 0, + NBL_FLOW_PED_DMAC_TYPE, + NBL_FLOW_PED_UIP_TYPE, + NBL_FLOW_PED_DIP_TYPE, + + /* ped for mac & ip got src and dst, _D_TYPE represents the dst dir */ + NBL_FLOW_PED_UMAC_D_TYPE, + NBL_FLOW_PED_DMAC_D_TYPE, + NBL_FLOW_PED_UIP_D_TYPE, + NBL_FLOW_PED_DIP_D_TYPE, + + NBL_FLOW_PED_RES_MAX, + /* the following no need store rsource */ + NBL_FLOW_PED_UIP6_TYPE, + NBL_FLOW_PED_DIP6_TYPE, + NBL_FLOW_PED_RECORD_MAX, +}; + +struct nbl_tc_pedit_node_res { + void *pedit_node[NBL_FLOW_PED_RES_MAX]; + u32 pedits:30; + u32 pedit_val:1; + /* 0 tcp, 1 udp */ + u32 pedit_proto:1; +}; + +struct nbl_tc_pedit_info { + struct nbl_tc_pedit_headers val; + struct nbl_tc_pedit_headers mask; + struct nbl_tc_pedit_node_res pedit_node; +}; + +struct nbl_rule_action { + u64 flag; /* action flag, eg:set ipv4 src/redirect */ + u32 drop_flag:1; /* drop or forward */ + u32 counter_id:31; + + u32 port_id:15; + u32 port_type:8; + u32 action_cnt:5; /* different action type total cnt */ + u32 next_stg_sel:4; + + u32 vni; + u16 encap_size; + u16 encap_idx:15; + u16 encap_parse_ok:1; + + u32 encap_out_dev_ifindex:14; + u32 encap_in_hw:1; + u32 dscp:8; + u32 lag_id:4; + u32 mcc_cnt:5; + + struct nbl_port_mcc port_mcc[NBL_TC_MCC_MEMBER_MAX]; + struct nbl_vlan vlan; + struct ip_tunnel_info *tunnel; + struct nbl_encap_key encap_key; + union nbl_flow_encap_offset_tbl_u encap_idx_info; + u8 encap_buf[NBL_FLOW_ACTION_ENCAP_TOTAL_LEN]; + struct net_device *in_port; + struct net_device *tc_tun_encap_out_dev; + struct nbl_tc_pedit_info tc_pedit_info; +}; + +struct nbl_fdir_fltr { + struct nbl_fdir_l2 l2_data_outer; + struct nbl_fdir_l2 l2_mask_outer; + struct nbl_fdir_l2 l2_data; + struct nbl_fdir_l2 l2_mask; + + struct nbl_fdir_l3 ip; + struct nbl_fdir_l3 ip_mask; + struct nbl_fdir_l3 ip_outer; + struct nbl_fdir_l3 ip_mask_outer; + + struct nbl_fdir_l4 l4; + struct nbl_fdir_l4 l4_mask; + struct nbl_fdir_l4 l4_outer; + struct nbl_fdir_l4 l4_mask_outer; + + struct nbl_tc_fdir_tnl tnl; + struct nbl_tc_fdir_tnl tnl_mask; + + u16 svlan_type; + u16 svlan_tag; + u16 cvlan_type; + u16 cvlan_tag; + u16 svlan_mask; + u16 cvlan_mask; + u32 tnl_flag:1; + u32 tnl_cnt:1; + u32 vlan_cnt:2; + u32 metadata : 16; + u32 acl_flow:1; + u32 dir:1; + u32 rsv:1; + + u8 lag_id; + u16 port; + bool is_cvlan; +}; + +/** + * struct nbl_flow_pattern_conf: + * input : storage key info from pattern + * input_set : storage key flag in order to get ptype + */ +struct nbl_flow_pattern_conf { + struct nbl_fdir_fltr input; + struct net_device *input_dev; + u8 flow_send; + u8 graph_idx; + u16 pp_flag; + u64 key_flag; +}; + +struct nbl_flow_index_key { + union { + u64 cookie; + u8 data[NBL_FLOW_INDEX_BYTE_LEN]; + }; +}; + +struct nbl_tc_flow_param { + struct nbl_tc_port in; + struct nbl_tc_port out; + struct nbl_tc_port mirror_out; + struct nbl_flow_pattern_conf filter; + struct nbl_rule_action act; + struct nbl_flow_index_key key; + struct ip_tunnel_info *tunnel; + bool encap; + struct nbl_common_info *common; + struct nbl_service_mgt *serv_mgt; +}; + +struct nbl_stats_param { + struct flow_cls_offload *f; +}; + +enum nbl_hwmon_type { + NBL_HWMON_TEMP_INPUT, + NBL_HWMON_TEMP_MAX, + NBL_HWMON_TEMP_CRIT, + NBL_HWMON_TEMP_HIGHEST, + NBL_HWMON_TEMP_TYPE_MAX, }; struct nbl_load_p4_param { @@ -692,10 +1319,19 @@ struct nbl_load_p4_param { bool end; }; -struct nbl_board_port_info { - u8 eth_num; - u8 eth_speed; - u8 rsv[6]; +#define NBL_ACL_TCAM_KEY_LEN 5 +#define NBL_ACL_TCAM_KEY_MAX 16 + +struct nbl_acl_tcam_key_param { + u8 data[NBL_ACL_TCAM_KEY_LEN]; +} __packed; + +struct nbl_acl_tcam_param { + union nbl_acl_tcam_info { + struct nbl_acl_tcam_key_param key[NBL_ACL_TCAM_KEY_MAX]; + u8 data[NBL_ACL_TCAM_KEY_LEN * NBL_ACL_TCAM_KEY_MAX]; + } info; + u8 len; }; enum { @@ -790,4 +1426,54 @@ struct nbl_abnormal_event_info { u32 other_abnormal_info; }; +enum nbl_performance_mode { + NBL_QUIRKS_NO_TOE, + NBL_QUIRKS_UVN_PREFETCH_ALIGN, +}; + +extern int performance_mode; +extern int adaptive_rxbuf_len_disable; + +struct nbl_vsi_param { + u16 vsi_id; + u16 queue_offset; + u16 queue_num; + u8 index; +}; + +struct nbl_ring_param { + u16 tx_ring_num; + u16 rx_ring_num; + u16 xdp_ring_offset; /* xdp-vsi queue'vertor share data-vsi queue */ + u16 queue_size; +}; + +enum nbl_trust_mode { + NBL_TRUST_MODE_8021P, + NBL_TRUST_MODE_DSCP +}; + +#define NBL_VSI_MAX_ID 1024 + +struct nbl_mtu_entry { + u32 ref_count; + u16 mtu_value; +}; + +#define NBL_MAX_PFC_PRIORITIES (8) +#define NBL_DSCP_MAX (64) +#define NBL_TC_MAX_BW (100) +#define NBL_MAX_TC_NUM (8) +#define NBL_MAX_BW (100) + +enum nbl_traffic_type { + NBL_TRAFFIC_RDMA_TYPE, + NBL_TRAFFIC_NET_TYPE, +}; + +struct nbl_napi_struct { + struct napi_struct napi; + atomic_t is_irq; +}; + #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_main.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_main.c index 7ca401c59c82ee3a575e9a2d2203dd492c3241e9..813fea2c0fdef872c16f496478307b0150baf288 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_main.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_main.c @@ -10,6 +10,9 @@ static struct nbl_software_tool_table nbl_st_table; static struct dentry *nbl_debugfs_root; +/* global cmdq and tc flow related structures */ +static struct nbl_tc_insts_info g_tc_insts[NBL_TC_FLOW_INST_COUNT] = { { 0 } }; + static struct nbl_product_base_ops nbl_product_base_ops[NBL_PRODUCT_MAX] = { { .phy_init = nbl_phy_init_leonis, @@ -20,6 +23,10 @@ static struct nbl_product_base_ops nbl_product_base_ops[NBL_PRODUCT_MAX] = { .chan_remove = nbl_chan_remove_common, }, }; +static char *nblst_cdevnode(const struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "nblst/%s", dev_name(dev)); +} int nbl_core_start(struct nbl_adapter *adapter, struct nbl_init_param *param) { @@ -63,10 +70,12 @@ struct nbl_adapter *nbl_core_init(struct pci_dev *pdev, struct nbl_init_param *p NBL_COMMON_TO_DMA_DEV(common) = &pdev->dev; NBL_COMMON_TO_DEBUG_LVL(common) |= NBL_DEBUG_ALL; NBL_COMMON_TO_VF_CAP(common) = param->caps.is_vf; + NBL_COMMON_TO_OCP_CAP(common) = param->caps.is_ocp; NBL_COMMON_TO_PCI_USING_DAC(common) = param->pci_using_dac; NBL_COMMON_TO_PCI_FUNC_ID(common) = PCI_FUNC(pdev->devfn); common->devid = PCI_SLOT(pdev->devfn); common->bus = pdev->bus->number; + common->tc_inst_id = NBL_TC_FLOW_INST_COUNT; common->product_type = param->product_type; memcpy(&adapter->init_param, param, sizeof(adapter->init_param)); @@ -120,6 +129,7 @@ struct nbl_adapter *nbl_core_init(struct pci_dev *pdev, struct nbl_init_param *p void nbl_core_remove(struct nbl_adapter *adapter) { struct device *dev; + struct nbl_product_base_ops *product_base_ops; if (!adapter) @@ -138,6 +148,63 @@ void nbl_core_remove(struct nbl_adapter *adapter) devm_kfree(dev, adapter); } +void nbl_tc_set_cmdq_info(int (*send_cmdq)(void *, const void *, void *), + void *priv, u8 index) +{ + g_tc_insts[index].send_cmdq = send_cmdq; + g_tc_insts[index].chan_mgt = priv; +} + +void nbl_tc_unset_cmdq_info(u8 index) +{ + g_tc_insts[index].send_cmdq = NULL; + g_tc_insts[index].chan_mgt = NULL; + g_tc_insts[index].locked = 0; +} + +void nbl_tc_set_flow_info(void *priv, u8 index) +{ + g_tc_insts[index].tc_flow_mgt = priv; +} + +void nbl_tc_unset_flow_info(u8 index) +{ + g_tc_insts[index].tc_flow_mgt = NULL; +} + +void *nbl_tc_get_flow_info(u8 index) +{ + return g_tc_insts[index].tc_flow_mgt; +} + +u8 nbl_tc_alloc_inst_id(void) +{ + u8 inst_id = 0; + + spin_lock(&nbl_tc_flow_inst_lock); + for (inst_id = 0; inst_id < NBL_TC_FLOW_INST_COUNT; inst_id++) + if (!g_tc_insts[inst_id].locked) { + g_tc_insts[inst_id].locked = 1; + spin_unlock(&nbl_tc_flow_inst_lock); + return inst_id; + } + + /* return invalid index */ + spin_unlock(&nbl_tc_flow_inst_lock); + return NBL_TC_FLOW_INST_COUNT; +} + +int nbl_tc_call_inst_cmdq(u8 inst_id, const void *hdr, void *cmd) +{ + void *priv = NULL; + + if (!g_tc_insts[inst_id].chan_mgt || !g_tc_insts[inst_id].send_cmdq) + return NBL_CMDQ_NOT_READY; + + priv = g_tc_insts[inst_id].chan_mgt; + return g_tc_insts[inst_id].send_cmdq(priv, hdr, cmd); +} + int nbl_st_init(struct nbl_software_tool_table *st_table) { dev_t devid; @@ -152,6 +219,7 @@ int nbl_st_init(struct nbl_software_tool_table *st_table) st_table->cls = class_create("nblst_cls"); + st_table->cls->devnode = nblst_cdevnode; if (IS_ERR(st_table->cls)) { unregister_chrdev(st_table->major, "nblst"); unregister_chrdev_region(st_table->devno, NBL_ST_MAX_DEVICE_NUM); @@ -198,7 +266,9 @@ static void nbl_get_func_param(struct pci_dev *pdev, kernel_ulong_t driver_data, param->caps.support_lag = NBL_CAP_SUPPORT_LAG(driver_data); param->caps.has_user = NBL_CAP_IS_USER(driver_data); param->caps.has_grc = NBL_CAP_IS_GRC(driver_data); + param->caps.is_blk = NBL_CAP_IS_BLK(driver_data); param->caps.is_nic = NBL_CAP_IS_NIC(driver_data); + param->caps.is_ocp = NBL_CAP_IS_OCP(driver_data); param->caps.has_factory_ctrl = NBL_CAP_IS_FACTORY_CTRL(driver_data); if (NBL_CAP_IS_LEONIS(driver_data)) @@ -297,28 +367,63 @@ static void nbl_remove(struct pci_dev *pdev) static void nbl_shutdown(struct pci_dev *pdev) { struct nbl_adapter *adapter = pci_get_drvdata(pdev); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + bool wol_ena = common->wol_ena; if (!NBL_COMMON_TO_VF_CAP(NBL_ADAPTER_TO_COMMON(adapter))) nbl_remove(pdev); + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wol_ena); + pci_set_power_state(pdev, PCI_D3hot); + } + dev_info(&pdev->dev, "nbl shutdown OK\n"); } static __maybe_unused int nbl_sriov_configure(struct pci_dev *pdev, int num_vfs) { + struct nbl_adapter *adapter = pci_get_drvdata(pdev); int err; if (!num_vfs) { pci_disable_sriov(pdev); + if (!adapter) + return 0; + + nbl_dev_remove_vf_config(adapter); + + err = nbl_dev_destroy_rep(adapter); + if (err) { + dev_err(&pdev->dev, "nbl destroy repr dev failed %d!\n", err); + return err; + } return 0; } + /* register pf_name to AF first, cuz vf_name depends on pf_anme */ + nbl_dev_register_dev_name(adapter); err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_err(&pdev->dev, "nbl enable sriov failed %d!\n", err); return err; } + err = nbl_dev_create_rep(adapter, num_vfs); + if (err) { + dev_err(&pdev->dev, "nbl create repr dev failed %d!\n", err); + pci_disable_sriov(pdev); + return err; + } + + err = nbl_dev_setup_vf_config(adapter, num_vfs); + if (err) { + dev_err(&pdev->dev, "nbl setup vf config failed %d!\n", err); + pci_disable_sriov(pdev); + nbl_dev_destroy_rep(adapter); + return err; + } + return num_vfs; } @@ -366,19 +471,19 @@ static const struct pci_device_id nbl_id_table[] = { { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT) }, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_LX_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT)}, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_BASE_T_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT)}, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT)}, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_LX_BASE_T_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT)}, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | @@ -398,19 +503,22 @@ static const struct pci_device_id nbl_id_table[] = { { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT)}, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_LX_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT)}, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_BASE_T_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT)}, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_LX_BASE_T_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT)}, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18100_VF), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_VF_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) }, /* required as sentinel */ { 0, } }; @@ -433,13 +541,13 @@ static int __maybe_unused nbl_resume(struct device *dev) } static SIMPLE_DEV_PM_OPS(nbl_pm_ops, nbl_suspend, nbl_resume); - static struct pci_driver nbl_driver = { .name = NBL_DRIVER_NAME, .id_table = nbl_id_table, .probe = nbl_probe, .remove = nbl_remove, .shutdown = nbl_shutdown, + .sriov_configure = nbl_sriov_configure, .driver.pm = &nbl_pm_ops, }; @@ -454,6 +562,10 @@ static int __init nbl_module_init(void) goto wq_create_failed; } + mutex_init(&nbl_lag_mutex); + spin_lock_init(&nbl_tc_flow_inst_lock); + INIT_LIST_HEAD(&lag_resource_head); + nbl_st_init(nbl_get_st_table()); nbl_debugfs_init();