diff --git a/0215-net-hns3-add-start-stop-Tx-datapath-request-for-MP.patch b/0215-net-hns3-add-start-stop-Tx-datapath-request-for-MP.patch new file mode 100644 index 0000000000000000000000000000000000000000..386f8e80a59131de813224ce9c1bb6079d78da5e --- /dev/null +++ b/0215-net-hns3-add-start-stop-Tx-datapath-request-for-MP.patch @@ -0,0 +1,188 @@ +From 15c150affb3d486df5e7a4ab55e3ed1cdf8504ef Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Mon, 26 Jul 2021 18:59:39 +0800 +Subject: [PATCH] net/hns3: add start/stop Tx datapath request for MP + +Currently, hns3 PMD has supported start/stop RxTx datapath request message +between the primary and secondary processes. However, there are some cases +only to start/stop Tx datapath. This patch adds start/stop Tx datapath +request for MP. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.h | 4 +++- + drivers/net/hns3/hns3_mp.c | 50 ++++++++++++++++++++++++++++++++++-------- + drivers/net/hns3/hns3_mp.h | 3 +++ + drivers/net/hns3/hns3_rxtx.c | 4 ++-- + drivers/net/hns3/hns3_rxtx.h | 6 +++++ + 5 files changed, 55 insertions(+), 12 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h +index 8e66d9f..2e48ff6 100644 +--- a/drivers/net/hns3/hns3_ethdev.h ++++ b/drivers/net/hns3/hns3_ethdev.h +@@ -699,7 +699,9 @@ struct hns3_vtag_cfg { + /* Request types for IPC. */ + enum hns3_mp_req_type { + HNS3_MP_REQ_START_RXTX = 1, +- HNS3_MP_REQ_STOP_RXTX, ++ HNS3_MP_REQ_STOP_RXTX = 2, ++ HNS3_MP_REQ_START_TX = 3, ++ HNS3_MP_REQ_STOP_TX = 4, + HNS3_MP_REQ_MAX + }; + +diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c +index a8485f5..cd514ac 100644 +--- a/drivers/net/hns3/hns3_mp.c ++++ b/drivers/net/hns3/hns3_mp.c +@@ -73,6 +73,7 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) + struct hns3_mp_param *res = (struct hns3_mp_param *)mp_res.param; + const struct hns3_mp_param *param = + (const struct hns3_mp_param *)mp_msg->param; ++ eth_tx_prep_t prep = NULL; + struct rte_eth_dev *dev; + int ret; + +@@ -87,19 +88,23 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) + PMD_INIT_LOG(INFO, "port %u starting datapath", + dev->data->port_id); + hns3_set_rxtx_function(dev); +- rte_mb(); +- mp_init_msg(dev, &mp_res, param->type); +- res->result = 0; +- ret = rte_mp_reply(&mp_res, peer); + break; + case HNS3_MP_REQ_STOP_RXTX: + PMD_INIT_LOG(INFO, "port %u stopping datapath", + dev->data->port_id); + hns3_set_rxtx_function(dev); +- rte_mb(); +- mp_init_msg(dev, &mp_res, param->type); +- res->result = 0; +- ret = rte_mp_reply(&mp_res, peer); ++ break; ++ case HNS3_MP_REQ_START_TX: ++ PMD_INIT_LOG(INFO, "port %u starting Tx datapath", ++ dev->data->port_id); ++ dev->tx_pkt_burst = hns3_get_tx_function(dev, &prep); ++ dev->tx_pkt_prepare = prep; ++ break; ++ case HNS3_MP_REQ_STOP_TX: ++ PMD_INIT_LOG(INFO, "port %u stopping Tx datapath", ++ dev->data->port_id); ++ dev->tx_pkt_burst = hns3_dummy_rxtx_burst; ++ dev->tx_pkt_prepare = NULL; + break; + default: + rte_errno = EINVAL; +@@ -107,9 +112,24 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) + dev->data->port_id); + return -rte_errno; + } ++ ++ rte_mb(); ++ mp_init_msg(dev, &mp_res, param->type); ++ res->result = 0; ++ ret = rte_mp_reply(&mp_res, peer); ++ + return ret; + } + ++static bool ++mp_req_type_is_valid(enum hns3_mp_req_type type) ++{ ++ return type == HNS3_MP_REQ_START_RXTX || ++ type == HNS3_MP_REQ_STOP_RXTX || ++ type == HNS3_MP_REQ_START_TX || ++ type == HNS3_MP_REQ_STOP_TX; ++} ++ + /* + * Broadcast request of stopping/starting data-path to secondary processes. + * +@@ -132,7 +152,7 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum hns3_mp_req_type type) + + if (rte_eal_process_type() == RTE_PROC_SECONDARY || !hw->secondary_cnt) + return; +- if (type != HNS3_MP_REQ_START_RXTX && type != HNS3_MP_REQ_STOP_RXTX) { ++ if (!mp_req_type_is_valid(type)) { + hns3_err(hw, "port %u unknown request (req_type %d)", + dev->data->port_id, type); + return; +@@ -189,6 +209,18 @@ void hns3_mp_req_stop_rxtx(struct rte_eth_dev *dev) + mp_req_on_rxtx(dev, HNS3_MP_REQ_STOP_RXTX); + } + ++void ++hns3_mp_req_stop_tx(struct rte_eth_dev *dev) ++{ ++ mp_req_on_rxtx(dev, HNS3_MP_REQ_STOP_TX); ++} ++ ++void ++hns3_mp_req_start_tx(struct rte_eth_dev *dev) ++{ ++ mp_req_on_rxtx(dev, HNS3_MP_REQ_START_TX); ++} ++ + /* + * Initialize by primary process. + */ +diff --git a/drivers/net/hns3/hns3_mp.h b/drivers/net/hns3/hns3_mp.h +index 1a73598..e0e4aea 100644 +--- a/drivers/net/hns3/hns3_mp.h ++++ b/drivers/net/hns3/hns3_mp.h +@@ -7,6 +7,9 @@ + + void hns3_mp_req_start_rxtx(struct rte_eth_dev *dev); + void hns3_mp_req_stop_rxtx(struct rte_eth_dev *dev); ++void hns3_mp_req_start_tx(struct rte_eth_dev *dev); ++void hns3_mp_req_stop_tx(struct rte_eth_dev *dev); ++ + int hns3_mp_init_primary(void); + void hns3_mp_uninit_primary(void); + int hns3_mp_init_secondary(void); +diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c +index d3fbe08..7d8176f 100644 +--- a/drivers/net/hns3/hns3_rxtx.c ++++ b/drivers/net/hns3/hns3_rxtx.c +@@ -4309,7 +4309,7 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev) + #endif + } + +-static eth_tx_burst_t ++eth_tx_burst_t + hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) + { + struct hns3_adapter *hns = dev->data->dev_private; +@@ -4346,7 +4346,7 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) + return hns3_xmit_pkts; + } + +-static uint16_t ++uint16_t + hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) +diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h +index 56c1b80..141de7a 100644 +--- a/drivers/net/hns3/hns3_rxtx.h ++++ b/drivers/net/hns3/hns3_rxtx.h +@@ -729,6 +729,12 @@ int hns3_tx_burst_mode_get(struct rte_eth_dev *dev, + const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev); + void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev); + void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev); ++eth_tx_burst_t hns3_get_tx_function(struct rte_eth_dev *dev, ++ eth_tx_prep_t *prep); ++uint16_t hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused, ++ struct rte_mbuf **pkts __rte_unused, ++ uint16_t pkts_n __rte_unused); ++ + uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id); + void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, + uint8_t gl_idx, uint16_t gl_value); +-- +2.7.4 + diff --git a/0216-net-hns3-support-set-link-up-down-for-PF.patch b/0216-net-hns3-support-set-link-up-down-for-PF.patch new file mode 100644 index 0000000000000000000000000000000000000000..e7601760cd13b9c0de7fcbb1d42f03cc3ce8eebd --- /dev/null +++ b/0216-net-hns3-support-set-link-up-down-for-PF.patch @@ -0,0 +1,287 @@ +From 0a3e6a5d6e2ab6eec0a16db1d5d5f5d0b75bcf8b Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Mon, 26 Jul 2021 18:59:40 +0800 +Subject: [PATCH] net/hns3: support set link up/down for PF + +This patch adds set link up/down feature. RxTx datapath and link status +will be disabled when dev_set_link_down() is called, and can be enabled by +dev_start() or dev_set_link_up(). + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 107 ++++++++++++++++++++++++++++++++++++++--- + drivers/net/hns3/hns3_ethdev.h | 11 +++-- + drivers/net/hns3/hns3_rxtx.c | 28 ++++++++++- + drivers/net/hns3/hns3_rxtx.h | 2 + + 4 files changed, 138 insertions(+), 10 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index a374fa7..7d37004 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -103,6 +103,7 @@ static int hns3_restore_fec(struct hns3_hw *hw); + static int hns3_query_dev_fec_info(struct hns3_hw *hw); + static int hns3_do_stop(struct hns3_adapter *hns); + static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds); ++static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable); + + void hns3_ether_format_addr(char *buf, uint16_t size, + const struct rte_ether_addr *ether_addr) +@@ -2924,6 +2925,88 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) + } + + static int ++hns3_dev_set_link_up(struct rte_eth_dev *dev) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ /* ++ * The "tx_pkt_burst" will be restored. But the secondary process does ++ * not support the mechanism for notifying the primary process. ++ */ ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { ++ hns3_err(hw, "secondary process does not support to set link up."); ++ return -ENOTSUP; ++ } ++ ++ /* ++ * If device isn't started Rx/Tx function is still disabled, setting ++ * link up is not allowed. But it is probably better to return success ++ * to reduce the impact on the upper layer. ++ */ ++ if (hw->adapter_state != HNS3_NIC_STARTED) { ++ hns3_info(hw, "device isn't started, can't set link up."); ++ return 0; ++ } ++ ++ if (!hw->set_link_down) ++ return 0; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_cfg_mac_mode(hw, true); ++ if (ret) { ++ rte_spinlock_unlock(&hw->lock); ++ hns3_err(hw, "failed to set link up, ret = %d", ret); ++ return ret; ++ } ++ ++ hw->set_link_down = false; ++ hns3_start_tx_datapath(dev); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return 0; ++} ++ ++static int ++hns3_dev_set_link_down(struct rte_eth_dev *dev) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ /* ++ * The "tx_pkt_burst" will be set to dummy function. But the secondary ++ * process does not support the mechanism for notifying the primary ++ * process. ++ */ ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { ++ hns3_err(hw, "secondary process does not support to set link down."); ++ return -ENOTSUP; ++ } ++ ++ /* ++ * If device isn't started or the API has been called, link status is ++ * down, return success. ++ */ ++ if (hw->adapter_state != HNS3_NIC_STARTED || hw->set_link_down) ++ return 0; ++ ++ rte_spinlock_lock(&hw->lock); ++ hns3_stop_tx_datapath(dev); ++ ret = hns3_cfg_mac_mode(hw, false); ++ if (ret) { ++ hns3_start_tx_datapath(dev); ++ rte_spinlock_unlock(&hw->lock); ++ hns3_err(hw, "failed to set link down, ret = %d", ret); ++ return ret; ++ } ++ ++ hw->set_link_down = true; ++ rte_spinlock_unlock(&hw->lock); ++ ++ return 0; ++} ++ ++static int + hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status) + { + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); +@@ -5576,6 +5659,7 @@ static int + hns3_do_start(struct hns3_adapter *hns, bool reset_queue) + { + struct hns3_hw *hw = &hns->hw; ++ bool link_en; + int ret; + + ret = hns3_update_queue_map_configure(hns); +@@ -5600,7 +5684,8 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue) + return ret; + } + +- ret = hns3_cfg_mac_mode(hw, true); ++ link_en = hw->set_link_down ? false : true; ++ ret = hns3_cfg_mac_mode(hw, link_en); + if (ret) { + PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret); + goto err_config_mac_mode; +@@ -5731,6 +5816,7 @@ hns3_dev_start(struct rte_eth_dev *dev) + { + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; ++ bool old_state = hw->set_link_down; + int ret; + + PMD_INIT_FUNC_TRACE(); +@@ -5740,12 +5826,17 @@ hns3_dev_start(struct rte_eth_dev *dev) + rte_spinlock_lock(&hw->lock); + hw->adapter_state = HNS3_NIC_STARTING; + ++ /* ++ * If the dev_set_link_down() API has been called, the "set_link_down" ++ * flag can be cleared by dev_start() API. In addition, the flag should ++ * also be cleared before calling hns3_do_start() so that MAC can be ++ * enabled in dev_start stage. ++ */ ++ hw->set_link_down = false; + ret = hns3_do_start(hns, true); +- if (ret) { +- hw->adapter_state = HNS3_NIC_CONFIGURED; +- rte_spinlock_unlock(&hw->lock); +- return ret; +- } ++ if (ret) ++ goto do_start_fail; ++ + ret = hns3_map_rx_interrupt(dev); + if (ret) + goto map_rx_inter_err; +@@ -5801,6 +5892,8 @@ hns3_dev_start(struct rte_eth_dev *dev) + hns3_stop_all_txqs(dev); + map_rx_inter_err: + (void)hns3_do_stop(hns); ++do_start_fail: ++ hw->set_link_down = old_state; + hw->adapter_state = HNS3_NIC_CONFIGURED; + rte_spinlock_unlock(&hw->lock); + +@@ -7345,6 +7438,8 @@ static const struct eth_dev_ops hns3_eth_dev_ops = { + .mac_addr_set = hns3_set_default_mac_addr, + .set_mc_addr_list = hns3_set_mc_mac_addr_list, + .link_update = hns3_dev_link_update, ++ .dev_set_link_up = hns3_dev_set_link_up, ++ .dev_set_link_down = hns3_dev_set_link_down, + .rss_hash_update = hns3_dev_rss_hash_update, + .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, + .reta_update = hns3_dev_rss_reta_update, +diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h +index 2e48ff6..0e4e426 100644 +--- a/drivers/net/hns3/hns3_ethdev.h ++++ b/drivers/net/hns3/hns3_ethdev.h +@@ -481,6 +481,11 @@ struct hns3_hw { + struct hns3_cmq cmq; + struct hns3_mbx_resp_status mbx_resp; /* mailbox response */ + struct hns3_mac mac; ++ /* ++ * This flag indicates dev_set_link_down() API is called, and is cleared ++ * by dev_set_link_up() or dev_start(). ++ */ ++ bool set_link_down; + unsigned int secondary_cnt; /* Number of secondary processes init'd. */ + struct hns3_tqp_stats tqp_stats; + /* Include Mac stats | Rx stats | Tx stats */ +@@ -699,9 +704,9 @@ struct hns3_vtag_cfg { + /* Request types for IPC. */ + enum hns3_mp_req_type { + HNS3_MP_REQ_START_RXTX = 1, +- HNS3_MP_REQ_STOP_RXTX = 2, +- HNS3_MP_REQ_START_TX = 3, +- HNS3_MP_REQ_STOP_TX = 4, ++ HNS3_MP_REQ_STOP_RXTX, ++ HNS3_MP_REQ_START_TX, ++ HNS3_MP_REQ_STOP_TX, + HNS3_MP_REQ_MAX + }; + +diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c +index 7d8176f..0f222b3 100644 +--- a/drivers/net/hns3/hns3_rxtx.c ++++ b/drivers/net/hns3/hns3_rxtx.c +@@ -20,6 +20,7 @@ + #include "hns3_rxtx.h" + #include "hns3_regs.h" + #include "hns3_logs.h" ++#include "hns3_mp.h" + + #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1) + #define HNS3_RX_RING_PREFETCTH_MASK 3 +@@ -4372,6 +4373,7 @@ hns3_trace_rxtx_function(struct rte_eth_dev *dev) + + void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) + { ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct hns3_adapter *hns = eth_dev->data->dev_private; + eth_tx_prep_t prep = NULL; + +@@ -4379,7 +4381,9 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) + __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) { + eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev); + eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status; +- eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep); ++ eth_dev->tx_pkt_burst = hw->set_link_down ? ++ hns3_dummy_rxtx_burst : ++ hns3_get_tx_function(eth_dev, &prep); + eth_dev->tx_pkt_prepare = prep; + eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status; + hns3_trace_rxtx_function(eth_dev); +@@ -4703,3 +4707,25 @@ hns3_enable_rxd_adv_layout(struct hns3_hw *hw) + if (hns3_dev_rxd_adv_layout_supported(hw)) + hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1); + } ++ ++void ++hns3_stop_tx_datapath(struct rte_eth_dev *dev) ++{ ++ dev->tx_pkt_burst = hns3_dummy_rxtx_burst; ++ dev->tx_pkt_prepare = NULL; ++ rte_wmb(); ++ /* Disable tx datapath on secondary process. */ ++ hns3_mp_req_stop_tx(dev); ++ /* Prevent crashes when queues are still in use. */ ++ rte_delay_ms(dev->data->nb_tx_queues); ++} ++ ++void ++hns3_start_tx_datapath(struct rte_eth_dev *dev) ++{ ++ eth_tx_prep_t prep = NULL; ++ ++ dev->tx_pkt_burst = hns3_get_tx_function(dev, &prep); ++ dev->tx_pkt_prepare = prep; ++ hns3_mp_req_start_tx(dev); ++} +diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h +index 141de7a..cd7c21c 100644 +--- a/drivers/net/hns3/hns3_rxtx.h ++++ b/drivers/net/hns3/hns3_rxtx.h +@@ -766,5 +766,7 @@ void hns3_enable_rxd_adv_layout(struct hns3_hw *hw); + int hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); + int hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); + void hns3_tx_push_init(struct rte_eth_dev *dev); ++void hns3_stop_tx_datapath(struct rte_eth_dev *dev); ++void hns3_start_tx_datapath(struct rte_eth_dev *dev); + + #endif /* _HNS3_RXTX_H_ */ +-- +2.7.4 + diff --git a/0217-net-hns3-fix-queue-flow-action-validation.patch b/0217-net-hns3-fix-queue-flow-action-validation.patch new file mode 100644 index 0000000000000000000000000000000000000000..0c6f67ec8a23fa112196e5c671e502dec93706b4 --- /dev/null +++ b/0217-net-hns3-fix-queue-flow-action-validation.patch @@ -0,0 +1,58 @@ +From 15c37af398c3a22b5f46aff8abfc9166f949567c Mon Sep 17 00:00:00 2001 +From: Chengchang Tang +Date: Mon, 30 Aug 2021 16:26:49 +0800 +Subject: [PATCH] net/hns3: fix queue flow action validation + +The used_rx_queues only takes effect after device is started, and +its value is incorrect before the device is started. Therefore, it +is not suitable for flow action to use it to verify the queue index +before the device is started. + +E.g. Enable dedicated queue in bonding device will configure a queue +flow action before start its slave devices. The above problem will +make this reasonable flow action configuration fail. + +This patch use the nb_rx_queues from the configuration phase to +achieve verification. + +Fixes: a951c1ed3ab5 ("net/hns3: support different numbers of Rx and Tx queues") +Fixes: f8e7fcbfd0b8 ("net/hns3: support flow action of queue region") +Cc: stable@dpdk.org + +Signed-off-by: Chengchang Tang +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_flow.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c +index fc77979c5f..841e0b9da3 100644 +--- a/drivers/net/hns3/hns3_flow.c ++++ b/drivers/net/hns3/hns3_flow.c +@@ -275,10 +275,10 @@ hns3_handle_action_queue(struct rte_eth_dev *dev, + struct hns3_hw *hw = &hns->hw; + + queue = (const struct rte_flow_action_queue *)action->conf; +- if (queue->index >= hw->used_rx_queues) { ++ if (queue->index >= hw->data->nb_rx_queues) { + hns3_err(hw, "queue ID(%u) is greater than number of " + "available queue (%u) in driver.", +- queue->index, hw->used_rx_queues); ++ queue->index, hw->data->nb_rx_queues); + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + action, "Invalid queue ID in PF"); +@@ -308,8 +308,8 @@ hns3_handle_action_queue_region(struct rte_eth_dev *dev, + + if ((!rte_is_power_of_2(conf->queue_num)) || + conf->queue_num > hw->rss_size_max || +- conf->queue[0] >= hw->used_rx_queues || +- conf->queue[0] + conf->queue_num > hw->used_rx_queues) { ++ conf->queue[0] >= hw->data->nb_rx_queues || ++ conf->queue[0] + conf->queue_num > hw->data->nb_rx_queues) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, action, + "Invalid start queue ID and queue num! the start queue " +-- +2.33.0 + diff --git a/0218-net-hns3-fix-taskqueue-pair-reset-command.patch b/0218-net-hns3-fix-taskqueue-pair-reset-command.patch new file mode 100644 index 0000000000000000000000000000000000000000..6576e44815fa5d22f5d72abc36f015bbaf08b92c --- /dev/null +++ b/0218-net-hns3-fix-taskqueue-pair-reset-command.patch @@ -0,0 +1,35 @@ +From 9c5fac6cc28c70fe549e60b3765ddef5a58d76f3 Mon Sep 17 00:00:00 2001 +From: Chengchang Tang +Date: Mon, 30 Aug 2021 16:26:50 +0800 +Subject: [PATCH] net/hns3: fix taskqueue pair reset command + +This new taskqueue pair reset command is used incorrectly, resulting in +the new command not taking effect. + +This patch fixes the incorrect use. + +Fixes: 6911e7c22c61 ("net/hns3: fix long task queue pairs reset time") +Cc: stable@dpdk.org + +Signed-off-by: Chengchang Tang +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_rxtx.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c +index 0f222b37f9..481872e395 100644 +--- a/drivers/net/hns3/hns3_rxtx.c ++++ b/drivers/net/hns3/hns3_rxtx.c +@@ -697,7 +697,7 @@ hns3_reset_rcb_cmd(struct hns3_hw *hw, uint8_t *reset_status) + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false); + req = (struct hns3_reset_cmd *)desc.data; +- hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_RCB_B, 1); ++ hns3_set_bit(req->fun_reset_rcb, HNS3_CFG_RESET_RCB_B, 1); + + /* + * The start qid should be the global qid of the first tqp of the +-- +2.33.0 + diff --git a/0219-net-hns3-fix-Tx-push-capability.patch b/0219-net-hns3-fix-Tx-push-capability.patch new file mode 100644 index 0000000000000000000000000000000000000000..a89a5f41cf0e8da4a7d42390e99afa5b66b769ee --- /dev/null +++ b/0219-net-hns3-fix-Tx-push-capability.patch @@ -0,0 +1,53 @@ +From 85289d2ec86fa522962d6599521af0a2f604ac52 Mon Sep 17 00:00:00 2001 +From: "Min Hu (Connor)" +Date: Mon, 30 Aug 2021 16:26:51 +0800 +Subject: [PATCH] net/hns3: fix Tx push capability + +This patch fixes Tx push capability to be compatible with Kunpeng 920, +as Tx push is only supported on Kunpeng 930. + +Fixes: 23e317dd1fbf ("net/hns3: support Tx push quick doorbell for performance") +Cc: stable@dpdk.org + +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_cmd.c | 3 +++ + drivers/net/hns3/hns3_cmd.h | 1 + + 2 files changed, 4 insertions(+) + +diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c +index 928f938536..6a1e634684 100644 +--- a/drivers/net/hns3/hns3_cmd.c ++++ b/drivers/net/hns3/hns3_cmd.c +@@ -423,6 +423,7 @@ hns3_get_caps_name(uint32_t caps_id) + } dev_caps[] = { + { HNS3_CAPS_FD_QUEUE_REGION_B, "fd_queue_region" }, + { HNS3_CAPS_PTP_B, "ptp" }, ++ { HNS3_CAPS_TX_PUSH_B, "tx_push" }, + { HNS3_CAPS_PHY_IMP_B, "phy_imp" }, + { HNS3_CAPS_TQP_TXRX_INDEP_B, "tqp_txrx_indep" }, + { HNS3_CAPS_HW_PAD_B, "hw_pad" }, +@@ -492,6 +493,8 @@ hns3_parse_capability(struct hns3_hw *hw, + hns3_warn(hw, "ignore PTP capability due to lack of " + "rxd advanced layout capability."); + } ++ if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B)) ++ hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1); + if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B)) + hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1); + if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B)) +diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h +index 88683dfaaa..a4683de0aa 100644 +--- a/drivers/net/hns3/hns3_cmd.h ++++ b/drivers/net/hns3/hns3_cmd.h +@@ -315,6 +315,7 @@ enum HNS3_CAPS_BITS { + */ + HNS3_CAPS_FD_QUEUE_REGION_B = 2, + HNS3_CAPS_PTP_B, ++ HNS3_CAPS_TX_PUSH_B = 6, + HNS3_CAPS_PHY_IMP_B = 7, + HNS3_CAPS_TQP_TXRX_INDEP_B, + HNS3_CAPS_HW_PAD_B, +-- +2.33.0 + diff --git a/0220-examples-kni-close-port-before-exit.patch b/0220-examples-kni-close-port-before-exit.patch new file mode 100644 index 0000000000000000000000000000000000000000..8032bdffe0111daddf205279f87ce0a7396f1a8f --- /dev/null +++ b/0220-examples-kni-close-port-before-exit.patch @@ -0,0 +1,30 @@ +From 96fdb80048e279289e012fafe762f5b53e2ecd23 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Tue, 14 Sep 2021 14:02:19 +0800 +Subject: [PATCH 01/17] examples/kni: close port before exit + +This patch adds dev_close() step to release network adapter resources +when kni free. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +Acked-by: Ferruh Yigit +--- + examples/kni/main.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/examples/kni/main.c b/examples/kni/main.c +index fe93b8618..40e1790c4 100644 +--- a/examples/kni/main.c ++++ b/examples/kni/main.c +@@ -1031,6 +1031,7 @@ kni_free_kni(uint16_t port_id) + if (ret != 0) + RTE_LOG(ERR, APP, "Failed to stop port %d: %s\n", + port_id, rte_strerror(-ret)); ++ rte_eth_dev_close(port_id); + + return 0; + } +-- +2.23.0 + diff --git a/0221-net-hns3-fix-residual-MAC-after-setting-default-MAC.patch b/0221-net-hns3-fix-residual-MAC-after-setting-default-MAC.patch new file mode 100644 index 0000000000000000000000000000000000000000..73f59f3a2885fc4c542b92791410af0c1601adec --- /dev/null +++ b/0221-net-hns3-fix-residual-MAC-after-setting-default-MAC.patch @@ -0,0 +1,141 @@ +From 5fcd00f784f9b984bf1f8a084a6be32816585717 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Wed, 22 Sep 2021 11:41:51 +0800 +Subject: [PATCH 02/17] net/hns3: fix residual MAC after setting default MAC + +This problem occurs in the following scenarios: +1) reset is encountered when the adapter is running. +2) set a new default MAC address + +After the above two steps, the old default MAC address should be not +take effect. But the current behavior is contrary to that. This is due +to the change of the "default_addr_setted" in hw->mac from 'true' to +'false' after the reset. As a result, the old MAC address is not removed +when the new default MAC address is set. This variable controls whether +to delete the old default MAC address when setting the default MAC +address. It is only used when the mac_addr_set API is called for the +first time. In fact, when a unicast MAC address is deleted, if the +address isn't in the MAC address table, the driver doesn't return +failure. So this patch remove the redundant and troublesome variables to +resolve this problem. + +Fixes: 7d7f9f80bbfb ("net/hns3: support MAC address related operations") +Cc: stable@dpdk.org + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 38 ++++++++++------------------------ + drivers/net/hns3/hns3_ethdev.h | 1 - + 2 files changed, 11 insertions(+), 28 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index 2fb0c466c..d6228601f 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -1651,7 +1651,7 @@ hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + + static int + hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, +- uint32_t idx, __rte_unused uint32_t pool) ++ __rte_unused uint32_t idx, __rte_unused uint32_t pool) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +@@ -1682,8 +1682,6 @@ hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + return ret; + } + +- if (idx == 0) +- hw->mac.default_addr_setted = true; + rte_spinlock_unlock(&hw->lock); + + return ret; +@@ -1748,30 +1746,19 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_ether_addr *oaddr; + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- bool default_addr_setted; + int ret, ret_val; + +- /* +- * It has been guaranteed that input parameter named mac_addr is valid +- * address in the rte layer of DPDK framework. +- */ +- oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; +- default_addr_setted = hw->mac.default_addr_setted; +- if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr)) +- return 0; +- + rte_spinlock_lock(&hw->lock); +- if (default_addr_setted) { +- ret = hns3_remove_uc_addr_common(hw, oaddr); +- if (ret) { +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- oaddr); +- hns3_warn(hw, "Remove old uc mac address(%s) fail: %d", +- mac_str, ret); ++ oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; ++ ret = hns3_remove_uc_addr_common(hw, oaddr); ++ if (ret) { ++ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, ++ oaddr); ++ hns3_warn(hw, "Remove old uc mac address(%s) fail: %d", ++ mac_str, ret); + +- rte_spinlock_unlock(&hw->lock); +- return ret; +- } ++ rte_spinlock_unlock(&hw->lock); ++ return ret; + } + + ret = hns3_add_uc_addr_common(hw, mac_addr); +@@ -1790,7 +1777,6 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, + + rte_ether_addr_copy(mac_addr, + (struct rte_ether_addr *)hw->mac.mac_addr); +- hw->mac.default_addr_setted = true; + rte_spinlock_unlock(&hw->lock); + + return 0; +@@ -1811,7 +1797,6 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr); + hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d", + mac_str, ret_val); +- hw->mac.default_addr_setted = false; + } + rte_spinlock_unlock(&hw->lock); + +@@ -3470,7 +3455,6 @@ hns3_get_board_configuration(struct hns3_hw *hw) + hw->rss_dis_flag = false; + memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); + hw->mac.phy_addr = cfg.phy_addr; +- hw->mac.default_addr_setted = false; + hw->num_tx_desc = cfg.tqp_desc_num; + hw->num_rx_desc = cfg.tqp_desc_num; + hw->dcb_info.num_pg = 1; +@@ -5928,7 +5912,7 @@ hns3_do_stop(struct hns3_adapter *hns) + return ret; + } + } +- hw->mac.default_addr_setted = false; ++ + return 0; + } + +diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h +index ab44894a8..57387e05b 100644 +--- a/drivers/net/hns3/hns3_ethdev.h ++++ b/drivers/net/hns3/hns3_ethdev.h +@@ -188,7 +188,6 @@ enum hns3_media_type { + + struct hns3_mac { + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; +- bool default_addr_setted; /* whether default addr(mac_addr) is set */ + uint8_t media_type; + uint8_t phy_addr; + uint8_t link_duplex : 1; /* ETH_LINK_[HALF/FULL]_DUPLEX */ +-- +2.23.0 + diff --git a/0222-net-hns3-fix-input-parameters-of-MAC-functions.patch b/0222-net-hns3-fix-input-parameters-of-MAC-functions.patch new file mode 100644 index 0000000000000000000000000000000000000000..91c7ef66aaca14cf75b09582f15f3d35e3594ce4 --- /dev/null +++ b/0222-net-hns3-fix-input-parameters-of-MAC-functions.patch @@ -0,0 +1,210 @@ +From bc25acd9ac200067f0f1a68c192076a65e4c76e6 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Wed, 22 Sep 2021 11:41:52 +0800 +Subject: [PATCH 03/17] net/hns3: fix input parameters of MAC functions + +When adding multicast and unicast MAC addresses, three descriptors and +one descriptor are required for querying or adding MAC VLAN table, +respectively. This patch uses the number of descriptors as input +parameter to complete this task to make the function more secure. + +Fixes: 7d7f9f80bbfb ("net/hns3: support MAC address related operations") +Cc: stable@dpdk.org + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_cmd.h | 3 +- + drivers/net/hns3/hns3_ethdev.c | 88 +++++++++++++++++++--------------- + 2 files changed, 51 insertions(+), 40 deletions(-) + +diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h +index a4683de0a..81bc9e9d9 100644 +--- a/drivers/net/hns3/hns3_cmd.h ++++ b/drivers/net/hns3/hns3_cmd.h +@@ -923,7 +923,8 @@ enum hns3_mac_vlan_add_resp_code { + HNS3_ADD_MC_OVERFLOW, /* ADD failed for MC overflow */ + }; + +-#define HNS3_MC_MAC_VLAN_ADD_DESC_NUM 3 ++#define HNS3_MC_MAC_VLAN_OPS_DESC_NUM 3 ++#define HNS3_UC_MAC_VLAN_OPS_DESC_NUM 1 + + #define HNS3_MAC_VLAN_BIT0_EN_B 0 + #define HNS3_MAC_VLAN_BIT1_EN_B 1 +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index d6228601f..02d68e496 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -1427,28 +1427,31 @@ hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp, + static int + hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw, + struct hns3_mac_vlan_tbl_entry_cmd *req, +- struct hns3_cmd_desc *desc, bool is_mc) ++ struct hns3_cmd_desc *desc, uint8_t desc_num) + { + uint8_t resp_code; + uint16_t retval; + int ret; ++ int i; + +- hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, true); +- if (is_mc) { +- desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); +- memcpy(desc[0].data, req, +- sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); +- hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_MAC_VLAN_ADD, +- true); +- desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); +- hns3_cmd_setup_basic_desc(&desc[2], HNS3_OPC_MAC_VLAN_ADD, ++ if (desc_num == HNS3_MC_MAC_VLAN_OPS_DESC_NUM) { ++ for (i = 0; i < desc_num - 1; i++) { ++ hns3_cmd_setup_basic_desc(&desc[i], ++ HNS3_OPC_MAC_VLAN_ADD, true); ++ desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); ++ if (i == 0) ++ memcpy(desc[i].data, req, ++ sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); ++ } ++ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_MAC_VLAN_ADD, + true); +- ret = hns3_cmd_send(hw, desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); + } else { ++ hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, ++ true); + memcpy(desc[0].data, req, + sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); +- ret = hns3_cmd_send(hw, desc, 1); + } ++ ret = hns3_cmd_send(hw, desc, desc_num); + if (ret) { + hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.", + ret); +@@ -1464,38 +1467,40 @@ hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw, + static int + hns3_add_mac_vlan_tbl(struct hns3_hw *hw, + struct hns3_mac_vlan_tbl_entry_cmd *req, +- struct hns3_cmd_desc *mc_desc) ++ struct hns3_cmd_desc *desc, uint8_t desc_num) + { + uint8_t resp_code; + uint16_t retval; + int cfg_status; + int ret; ++ int i; + +- if (mc_desc == NULL) { +- struct hns3_cmd_desc desc; +- +- hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ADD, false); +- memcpy(desc.data, req, ++ if (desc_num == HNS3_UC_MAC_VLAN_OPS_DESC_NUM) { ++ hns3_cmd_setup_basic_desc(desc, HNS3_OPC_MAC_VLAN_ADD, false); ++ memcpy(desc->data, req, + sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); +- ret = hns3_cmd_send(hw, &desc, 1); +- resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; +- retval = rte_le_to_cpu_16(desc.retval); ++ ret = hns3_cmd_send(hw, desc, desc_num); ++ resp_code = (rte_le_to_cpu_32(desc->data[0]) >> 8) & 0xff; ++ retval = rte_le_to_cpu_16(desc->retval); + + cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, + HNS3_MAC_VLAN_ADD); + } else { +- hns3_cmd_reuse_desc(&mc_desc[0], false); +- mc_desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); +- hns3_cmd_reuse_desc(&mc_desc[1], false); +- mc_desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); +- hns3_cmd_reuse_desc(&mc_desc[2], false); +- mc_desc[2].flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT); +- memcpy(mc_desc[0].data, req, ++ for (i = 0; i < desc_num; i++) { ++ hns3_cmd_reuse_desc(&desc[i], false); ++ if (i == desc_num - 1) ++ desc[i].flag &= ++ rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT); ++ else ++ desc[i].flag |= ++ rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); ++ } ++ memcpy(desc[0].data, req, + sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); +- mc_desc[0].retval = 0; +- ret = hns3_cmd_send(hw, mc_desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); +- resp_code = (rte_le_to_cpu_32(mc_desc[0].data[0]) >> 8) & 0xff; +- retval = rte_le_to_cpu_16(mc_desc[0].retval); ++ desc[0].retval = 0; ++ ret = hns3_cmd_send(hw, desc, desc_num); ++ resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; ++ retval = rte_le_to_cpu_16(desc[0].retval); + + cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, + HNS3_MAC_VLAN_ADD); +@@ -1540,7 +1545,7 @@ hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_mac_vlan_tbl_entry_cmd req; + struct hns3_pf *pf = &hns->pf; +- struct hns3_cmd_desc desc[3]; ++ struct hns3_cmd_desc desc; + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + uint16_t egress_port = 0; + uint8_t vf_id; +@@ -1574,10 +1579,12 @@ hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + * it if the entry is inexistent. Repeated unicast entry + * is not allowed in the mac vlan table. + */ +- ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, false); ++ ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc, ++ HNS3_UC_MAC_VLAN_OPS_DESC_NUM); + if (ret == -ENOENT) { + if (!hns3_is_umv_space_full(hw)) { +- ret = hns3_add_mac_vlan_tbl(hw, &req, NULL); ++ ret = hns3_add_mac_vlan_tbl(hw, &req, &desc, ++ HNS3_UC_MAC_VLAN_OPS_DESC_NUM); + if (!ret) + hns3_update_umv_space(hw, false); + return ret; +@@ -1867,8 +1874,8 @@ hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr) + static int + hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + { ++ struct hns3_cmd_desc desc[HNS3_MC_MAC_VLAN_OPS_DESC_NUM]; + struct hns3_mac_vlan_tbl_entry_cmd req; +- struct hns3_cmd_desc desc[3]; + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + uint8_t vf_id; + int ret; +@@ -1885,7 +1892,8 @@ hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + memset(&req, 0, sizeof(req)); + hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); + hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); +- ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); ++ ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, ++ HNS3_MC_MAC_VLAN_OPS_DESC_NUM); + if (ret) { + /* This mac addr do not exist, add new entry for it */ + memset(desc[0].data, 0, sizeof(desc[0].data)); +@@ -1899,7 +1907,8 @@ hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + */ + vf_id = HNS3_PF_FUNC_ID; + hns3_update_desc_vfid(desc, vf_id, false); +- ret = hns3_add_mac_vlan_tbl(hw, &req, desc); ++ ret = hns3_add_mac_vlan_tbl(hw, &req, desc, ++ HNS3_MC_MAC_VLAN_OPS_DESC_NUM); + if (ret) { + if (ret == -ENOSPC) + hns3_err(hw, "mc mac vlan table is full"); +@@ -1932,7 +1941,8 @@ hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + memset(&req, 0, sizeof(req)); + hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); + hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); +- ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); ++ ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, ++ HNS3_MC_MAC_VLAN_OPS_DESC_NUM); + if (ret == 0) { + /* + * This mac addr exist, remove this handle's VFID for it. +-- +2.23.0 + diff --git a/0223-net-bonding-fix-dedicated-queue-mode-in-vector-burst.patch b/0223-net-bonding-fix-dedicated-queue-mode-in-vector-burst.patch new file mode 100644 index 0000000000000000000000000000000000000000..031c285552c82ee00600324932843daf4ad351ec --- /dev/null +++ b/0223-net-bonding-fix-dedicated-queue-mode-in-vector-burst.patch @@ -0,0 +1,84 @@ +From 19dc6356916c60f282b6d3046f5d2f1d74d48d35 Mon Sep 17 00:00:00 2001 +From: Chengchang Tang +Date: Wed, 22 Sep 2021 15:09:12 +0800 +Subject: [PATCH 10/17] net/bonding: fix dedicated queue mode in vector burst + +If the vector burst mode is selected, the dedicated queue mode will not +take effect on some PMDs because these PMDs may have some limitations +in vector burst mode. For example, the limit on burst size. Currently, +both hns3 and intel I40E require four alignments when receiving packets +in vector mode. As a result, they can't accept packets if burst size +below four. However, in dedicated queue mode, the burst size of periodic +packets processing is one. + +This patch fixes the above problem by modifying the burst size to 32. +This approach also makes the packet processing of the dedicated queue +mode more reasonable. Currently, if multiple LACP protocol packets are +received in the hardware queue in a cycle, only one LACP packet will be +processed in this cycle, and the left packets will be processed in the +following cycle. After the modification, all the LACP packets will be +processed at one time, which seems more reasonable and closer to the +behavior of the bonding driver when the dedicated queue is not turned on. + +Fixes: 112891cd27e5 ("net/bonding: add dedicated HW queues for LACP control") +Cc: stable@dpdk.org + +Signed-off-by: Chengchang Tang +Signed-off-by: Min Hu (Connor) +--- + drivers/net/bonding/rte_eth_bond_8023ad.c | 32 ++++++++++++++++------- + 1 file changed, 23 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c +index 67ca0730f..0bcce6652 100644 +--- a/drivers/net/bonding/rte_eth_bond_8023ad.c ++++ b/drivers/net/bonding/rte_eth_bond_8023ad.c +@@ -823,6 +823,27 @@ rx_machine_update(struct bond_dev_private *internals, uint16_t slave_id, + rx_machine(internals, slave_id, NULL); + } + ++static void ++bond_mode_8023ad_dedicated_rxq_process(struct bond_dev_private *internals, ++ uint16_t slave_id) ++{ ++#define DEDICATED_QUEUE_BURST_SIZE 32 ++ struct rte_mbuf *lacp_pkt[DEDICATED_QUEUE_BURST_SIZE]; ++ uint16_t rx_count = rte_eth_rx_burst(slave_id, ++ internals->mode4.dedicated_queues.rx_qid, ++ lacp_pkt, DEDICATED_QUEUE_BURST_SIZE); ++ ++ if (rx_count) { ++ uint16_t i; ++ ++ for (i = 0; i < rx_count; i++) ++ bond_mode_8023ad_handle_slow_pkt(internals, slave_id, ++ lacp_pkt[i]); ++ } else { ++ rx_machine_update(internals, slave_id, NULL); ++ } ++} ++ + static void + bond_mode_8023ad_periodic_cb(void *arg) + { +@@ -911,15 +932,8 @@ bond_mode_8023ad_periodic_cb(void *arg) + + rx_machine_update(internals, slave_id, lacp_pkt); + } else { +- uint16_t rx_count = rte_eth_rx_burst(slave_id, +- internals->mode4.dedicated_queues.rx_qid, +- &lacp_pkt, 1); +- +- if (rx_count == 1) +- bond_mode_8023ad_handle_slow_pkt(internals, +- slave_id, lacp_pkt); +- else +- rx_machine_update(internals, slave_id, NULL); ++ bond_mode_8023ad_dedicated_rxq_process(internals, ++ slave_id); + } + + periodic_machine(internals, slave_id); +-- +2.23.0 + diff --git a/0224-net-bonding-fix-RSS-key-length.patch b/0224-net-bonding-fix-RSS-key-length.patch new file mode 100644 index 0000000000000000000000000000000000000000..fbf9a1ef6772d510b9fd7e4267b400afe1b4804b --- /dev/null +++ b/0224-net-bonding-fix-RSS-key-length.patch @@ -0,0 +1,143 @@ +From 464bfbd345224ddb04399297988c0d99cbe8acc6 Mon Sep 17 00:00:00 2001 +From: Chengchang Tang +Date: Wed, 22 Sep 2021 15:09:13 +0800 +Subject: [PATCH 11/17] net/bonding: fix RSS key length + +Currently the hash_key_size information has not been set. So, apps can +not get the key size from dev_info(), this make some problem. + +e.g, in testpmd, the hash_key_size will be checked before configure +or get the hash key: +testpmd> show port 4 rss-hash +dev_info did not provide a valid hash key size +testpmd> show port 4 rss-hash key +dev_info did not provide a valid hash key size +testpmd> port config 4 rss-hash-key ipv4 (hash key) +dev_info did not provide a valid hash key size + +In this patch, the meaning of rss_key_len has been modified. It only +indicated the length of the configured hash key before. Therefore, +its value depends on the user's configuration. This seems unreasonable. +And now, it indicates the minimum hash key length required by the +bonded device. Its value will be the shortest hash key among all slave +drivers. + +Fixes: 734ce47f71e0 ("bonding: support RSS dynamic configuration") +Cc: stable@dpdk.org + +Signed-off-by: Chengchang Tang +Signed-off-by: Min Hu (Connor) +--- + drivers/net/bonding/rte_eth_bond_api.c | 6 ++++ + drivers/net/bonding/rte_eth_bond_pmd.c | 44 ++++++++++++++++---------- + 2 files changed, 33 insertions(+), 17 deletions(-) + +diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c +index 44775f61e..c751a1242 100644 +--- a/drivers/net/bonding/rte_eth_bond_api.c ++++ b/drivers/net/bonding/rte_eth_bond_api.c +@@ -290,6 +290,7 @@ eth_bond_slave_inherit_dev_info_rx_first(struct bond_dev_private *internals, + struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf; + + internals->reta_size = di->reta_size; ++ internals->rss_key_len = di->hash_key_size; + + /* Inherit Rx offload capabilities from the first slave device */ + internals->rx_offload_capa = di->rx_offload_capa; +@@ -385,6 +386,11 @@ eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals, + */ + if (internals->reta_size > di->reta_size) + internals->reta_size = di->reta_size; ++ if (internals->rss_key_len > di->hash_key_size) { ++ RTE_BOND_LOG(WARNING, "slave has different rss key size, " ++ "configuring rss may fail"); ++ internals->rss_key_len = di->hash_key_size; ++ } + + if (!internals->max_rx_pktlen && + di->max_rx_pktlen < internals->candidate_max_rx_pktlen) +diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c +index 057b1ada5..c21df6d6f 100644 +--- a/drivers/net/bonding/rte_eth_bond_pmd.c ++++ b/drivers/net/bonding/rte_eth_bond_pmd.c +@@ -1705,14 +1705,11 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, + + /* If RSS is enabled for bonding, try to enable it for slaves */ + if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { +- if (internals->rss_key_len != 0) { +- slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = ++ /* rss_key won't be empty if RSS is configured in bonded dev */ ++ slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = + internals->rss_key_len; +- slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = ++ slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = + internals->rss_key; +- } else { +- slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; +- } + + slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = + bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; +@@ -2234,6 +2231,7 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads; + + dev_info->reta_size = internals->reta_size; ++ dev_info->hash_key_size = internals->rss_key_len; + + return 0; + } +@@ -3023,13 +3021,15 @@ bond_ethdev_rss_hash_update(struct rte_eth_dev *dev, + if (bond_rss_conf.rss_hf != 0) + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf; + +- if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len < +- sizeof(internals->rss_key)) { +- if (bond_rss_conf.rss_key_len == 0) +- bond_rss_conf.rss_key_len = 40; +- internals->rss_key_len = bond_rss_conf.rss_key_len; ++ if (bond_rss_conf.rss_key) { ++ if (bond_rss_conf.rss_key_len < internals->rss_key_len) ++ return -EINVAL; ++ else if (bond_rss_conf.rss_key_len > internals->rss_key_len) ++ RTE_BOND_LOG(WARNING, "rss_key will be truncated"); ++ + memcpy(internals->rss_key, bond_rss_conf.rss_key, + internals->rss_key_len); ++ bond_rss_conf.rss_key_len = internals->rss_key_len; + } + + for (i = 0; i < internals->slave_count; i++) { +@@ -3491,14 +3491,24 @@ bond_ethdev_configure(struct rte_eth_dev *dev) + * Fall back to default RSS key if the key is not specified + */ + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { +- if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) { +- internals->rss_key_len = +- dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len; +- memcpy(internals->rss_key, +- dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key, ++ struct rte_eth_rss_conf *rss_conf = ++ &dev->data->dev_conf.rx_adv_conf.rss_conf; ++ if (rss_conf->rss_key != NULL) { ++ if (internals->rss_key_len > rss_conf->rss_key_len) { ++ RTE_BOND_LOG(ERR, "Invalid rss key length(%u)", ++ rss_conf->rss_key_len); ++ return -EINVAL; ++ } ++ ++ memcpy(internals->rss_key, rss_conf->rss_key, + internals->rss_key_len); + } else { +- internals->rss_key_len = sizeof(default_rss_key); ++ if (internals->rss_key_len > sizeof(default_rss_key)) { ++ RTE_BOND_LOG(ERR, ++ "There is no suitable default hash key"); ++ return -EINVAL; ++ } ++ + memcpy(internals->rss_key, default_rss_key, + internals->rss_key_len); + } +-- +2.23.0 + diff --git a/0225-app-testpmd-add-command-to-show-LACP-bonding-info.patch b/0225-app-testpmd-add-command-to-show-LACP-bonding-info.patch new file mode 100644 index 0000000000000000000000000000000000000000..380433f536c9e006222845a3207588924c0684ee --- /dev/null +++ b/0225-app-testpmd-add-command-to-show-LACP-bonding-info.patch @@ -0,0 +1,247 @@ +From 992fb12f5a2061144190986a1a82c64f9b324e5b Mon Sep 17 00:00:00 2001 +From: Chengchang Tang +Date: Fri, 24 Sep 2021 17:57:20 +0800 +Subject: [PATCH 12/17] app/testpmd: add command to show LACP bonding info + +Add a new cmdline to help diagnostic the bonding mode 4 in testpmd. + +Show the lacp information about the bonded device and its slaves: +show bonding lacp info + +Signed-off-by: Chengchang Tang +Signed-off-by: Min Hu (Connor) +Acked-by: Xiaoyun Li +--- + app/test-pmd/cmdline.c | 184 ++++++++++++++++++++ + doc/guides/testpmd_app_ug/testpmd_funcs.rst | 6 + + 2 files changed, 190 insertions(+) + +diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c +index b69c648bf..5691fab94 100644 +--- a/app/test-pmd/cmdline.c ++++ b/app/test-pmd/cmdline.c +@@ -631,6 +631,9 @@ static void cmd_help_long_parsed(void *parsed_result, + "show bonding config (port_id)\n" + " Show the bonding config for port_id.\n\n" + ++ "show bonding lacp info (port_id)\n" ++ " Show the bonding lacp information for port_id.\n\n" ++ + "set bonding mac_addr (port_id) (address)\n" + " Set the MAC address of a bonded device.\n\n" + +@@ -6040,6 +6043,186 @@ cmdline_parse_inst_t cmd_set_balance_xmit_policy = { + } + }; + ++/* *** SHOW IEEE802.3 BONDING INFORMATION *** */ ++struct cmd_show_bonding_lacp_info_result { ++ cmdline_fixed_string_t show; ++ cmdline_fixed_string_t bonding; ++ cmdline_fixed_string_t lacp; ++ cmdline_fixed_string_t info; ++ portid_t port_id; ++}; ++ ++static void port_param_show(struct port_params *params) ++{ ++ char buf[RTE_ETHER_ADDR_FMT_SIZE]; ++ ++ printf("\t\tsystem priority: %u\n", params->system_priority); ++ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, ¶ms->system); ++ printf("\t\tsystem mac address: %s\n", buf); ++ printf("\t\tport key: %u\n", params->key); ++ printf("\t\tport priority: %u\n", params->port_priority); ++ printf("\t\tport number: %u\n", params->port_number); ++} ++ ++static void lacp_slave_info_show(struct rte_eth_bond_8023ad_slave_info *info) ++{ ++ char a_state[256] = { 0 }; ++ char p_state[256] = { 0 }; ++ int a_len = 0; ++ int p_len = 0; ++ uint32_t i; ++ ++ static const char * const state[] = { ++ "ACTIVE", ++ "TIMEOUT", ++ "AGGREGATION", ++ "SYNCHRONIZATION", ++ "COLLECTING", ++ "DISTRIBUTING", ++ "DEFAULTED", ++ "EXPIRED" ++ }; ++ static const char * const selection[] = { ++ "UNSELECTED", ++ "STANDBY", ++ "SELECTED" ++ }; ++ ++ for (i = 0; i < RTE_DIM(state); i++) { ++ if ((info->actor_state >> i) & 1) ++ a_len += snprintf(&a_state[a_len], ++ RTE_DIM(a_state) - a_len, "%s ", ++ state[i]); ++ ++ if ((info->partner_state >> i) & 1) ++ p_len += snprintf(&p_state[p_len], ++ RTE_DIM(p_state) - p_len, "%s ", ++ state[i]); ++ } ++ printf("\tAggregator port id: %u\n", info->agg_port_id); ++ printf("\tselection: %s\n", selection[info->selected]); ++ printf("\tActor detail info:\n"); ++ port_param_show(&info->actor); ++ printf("\t\tport state: %s\n", a_state); ++ printf("\tPartner detail info:\n"); ++ port_param_show(&info->partner); ++ printf("\t\tport state: %s\n", p_state); ++ printf("\n"); ++} ++ ++static void lacp_conf_show(struct rte_eth_bond_8023ad_conf *conf) ++{ ++ printf("\tfast period: %u ms\n", conf->fast_periodic_ms); ++ printf("\tslow period: %u ms\n", conf->slow_periodic_ms); ++ printf("\tshort timeout: %u ms\n", conf->short_timeout_ms); ++ printf("\tlong timeout: %u ms\n", conf->long_timeout_ms); ++ printf("\taggregate wait timeout: %u ms\n", ++ conf->aggregate_wait_timeout_ms); ++ printf("\ttx period: %u ms\n", conf->tx_period_ms); ++ printf("\trx marker period: %u ms\n", conf->rx_marker_period_ms); ++ printf("\tupdate timeout: %u ms\n", conf->update_timeout_ms); ++ switch (conf->agg_selection) { ++ case AGG_BANDWIDTH: ++ printf("\taggregation mode: bandwidth\n"); ++ break; ++ case AGG_STABLE: ++ printf("\taggregation mode: stable\n"); ++ break; ++ case AGG_COUNT: ++ printf("\taggregation mode: count\n"); ++ break; ++ default: ++ printf("\taggregation mode: invalid\n"); ++ break; ++ } ++ ++ printf("\n"); ++} ++ ++static void cmd_show_bonding_lacp_info_parsed(void *parsed_result, ++ __rte_unused struct cmdline *cl, ++ __rte_unused void *data) ++{ ++ struct cmd_show_bonding_lacp_info_result *res = parsed_result; ++ struct rte_eth_bond_8023ad_slave_info slave_info; ++ struct rte_eth_bond_8023ad_conf port_conf; ++ portid_t slaves[RTE_MAX_ETHPORTS]; ++ portid_t port_id = res->port_id; ++ int num_active_slaves; ++ int bonding_mode; ++ int i; ++ int ret; ++ ++ bonding_mode = rte_eth_bond_mode_get(port_id); ++ if (bonding_mode != BONDING_MODE_8023AD) { ++ fprintf(stderr, "\tBonding mode is not mode 4\n"); ++ return; ++ } ++ ++ num_active_slaves = rte_eth_bond_active_slaves_get(port_id, slaves, ++ RTE_MAX_ETHPORTS); ++ if (num_active_slaves < 0) { ++ fprintf(stderr, "\tFailed to get active slave list for port = %u\n", ++ port_id); ++ return; ++ } ++ if (num_active_slaves == 0) ++ fprintf(stderr, "\tIEEE802.3 port %u has no active slave\n", ++ port_id); ++ ++ printf("\tIEEE802.3 port: %u\n", port_id); ++ ret = rte_eth_bond_8023ad_conf_get(port_id, &port_conf); ++ if (ret) { ++ fprintf(stderr, "\tGet bonded device %u info failed\n", ++ port_id); ++ return; ++ } ++ lacp_conf_show(&port_conf); ++ ++ for (i = 0; i < num_active_slaves; i++) { ++ ret = rte_eth_bond_8023ad_slave_info(port_id, slaves[i], ++ &slave_info); ++ if (ret) { ++ fprintf(stderr, "\tGet slave device %u info failed\n", ++ slaves[i]); ++ return; ++ } ++ printf("\tSlave Port: %u\n", slaves[i]); ++ lacp_slave_info_show(&slave_info); ++ } ++} ++ ++cmdline_parse_token_string_t cmd_show_bonding_lacp_info_show = ++TOKEN_STRING_INITIALIZER(struct cmd_show_bonding_lacp_info_result, ++ show, "show"); ++cmdline_parse_token_string_t cmd_show_bonding_lacp_info_bonding = ++TOKEN_STRING_INITIALIZER(struct cmd_show_bonding_lacp_info_result, ++ bonding, "bonding"); ++cmdline_parse_token_string_t cmd_show_bonding_lacp_info_lacp = ++TOKEN_STRING_INITIALIZER(struct cmd_show_bonding_lacp_info_result, ++ bonding, "lacp"); ++cmdline_parse_token_string_t cmd_show_bonding_lacp_info_info = ++TOKEN_STRING_INITIALIZER(struct cmd_show_bonding_lacp_info_result, ++ info, "info"); ++cmdline_parse_token_num_t cmd_show_bonding_lacp_info_port_id = ++TOKEN_NUM_INITIALIZER(struct cmd_show_bonding_lacp_info_result, ++ port_id, RTE_UINT16); ++ ++cmdline_parse_inst_t cmd_show_bonding_lacp_info = { ++ .f = cmd_show_bonding_lacp_info_parsed, ++ .help_str = "show bonding lacp info : " ++ "Show bonding IEEE802.3 information for port_id", ++ .data = NULL, ++ .tokens = { ++ (void *)&cmd_show_bonding_lacp_info_show, ++ (void *)&cmd_show_bonding_lacp_info_bonding, ++ (void *)&cmd_show_bonding_lacp_info_lacp, ++ (void *)&cmd_show_bonding_lacp_info_info, ++ (void *)&cmd_show_bonding_lacp_info_port_id, ++ NULL ++ } ++}; ++ + /* *** SHOW NIC BONDING CONFIGURATION *** */ + struct cmd_show_bonding_config_result { + cmdline_fixed_string_t show; +@@ -17027,6 +17210,7 @@ cmdline_parse_ctx_t main_ctx[] = { + #ifdef RTE_NET_BOND + (cmdline_parse_inst_t *) &cmd_set_bonding_mode, + (cmdline_parse_inst_t *) &cmd_show_bonding_config, ++ (cmdline_parse_inst_t *) &cmd_show_bonding_lacp_info, + (cmdline_parse_inst_t *) &cmd_set_bonding_primary, + (cmdline_parse_inst_t *) &cmd_add_bonding_slave, + (cmdline_parse_inst_t *) &cmd_remove_bonding_slave, +diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst +index f0e04232a..d5e85b083 100644 +--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst ++++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst +@@ -2609,6 +2609,12 @@ in balance mode with a transmission policy of layer 2+3:: + Active Slaves (3): [1 3 4] + Primary: [3] + ++show bonding lacp info ++~~~~~~~~~~~~~~~~~~~~~~ ++ ++Show information about the Link Bonding device in mode 4 (link-aggregation-802.3ad):: ++ ++ testpmd> show bonding lacp info (port_id) + + Register Functions + ------------------ +-- +2.23.0 + diff --git a/0226-app-testpmd-retain-all-original-dev-conf-when-config.patch b/0226-app-testpmd-retain-all-original-dev-conf-when-config.patch new file mode 100644 index 0000000000000000000000000000000000000000..3dc1ec418b8ecd14375a0b4aa234c6034418ce5f --- /dev/null +++ b/0226-app-testpmd-retain-all-original-dev-conf-when-config.patch @@ -0,0 +1,40 @@ +From 65de76da4d8fc270af6bce73334399d0d3c20fa3 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Mon, 11 Oct 2021 17:12:46 +0800 +Subject: [PATCH 13/17] app/testpmd: retain all original dev conf when config + DCB + +When configuring DCB, testpmd retains the rx_mode/tx_mode configuration in +rte_port->dev_conf. But some configurations, such as the link_speed, were +not saved if they were set before configuring DCB. + +Fixes: 1a572499beb6 ("app/testpmd: setup DCB forwarding based on traffic class") +Cc: stable@dpdk.org + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +Acked-by: Xiaoyun Li +--- + app/test-pmd/testpmd.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c +index 3098df6c5..0eaa4852d 100644 +--- a/app/test-pmd/testpmd.c ++++ b/app/test-pmd/testpmd.c +@@ -3484,10 +3484,8 @@ init_port_dcb_config(portid_t pid, + + rte_port = &ports[pid]; + +- memset(&port_conf, 0, sizeof(struct rte_eth_conf)); +- +- port_conf.rxmode = rte_port->dev_conf.rxmode; +- port_conf.txmode = rte_port->dev_conf.txmode; ++ /* retain the original device configuration. */ ++ memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf)); + + /*set configuration of DCB in vt mode and DCB in non-vt mode*/ + retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); +-- +2.23.0 + diff --git a/0227-net-hns3-remove-similar-macro-function-definitions.patch b/0227-net-hns3-remove-similar-macro-function-definitions.patch new file mode 100644 index 0000000000000000000000000000000000000000..076e76c9c0bf5f0fac994bb2ec28caf3e543a758 --- /dev/null +++ b/0227-net-hns3-remove-similar-macro-function-definitions.patch @@ -0,0 +1,593 @@ +From 637fc8040d4aa52eba9c7c78a5c826f4a13c4da0 Mon Sep 17 00:00:00 2001 +From: Chengchang Tang +Date: Sat, 9 Oct 2021 15:48:05 +0800 +Subject: [PATCH 14/17] net/hns3: remove similar macro function definitions + +For different capabilities, we declare different macro functions to +determine whether the capabilities are supported. + +This patch declare a unified macro function to judge capabilities. + +Signed-off-by: Chengchang Tang +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_cmd.c | 6 ++--- + drivers/net/hns3/hns3_dcb.c | 4 +-- + drivers/net/hns3/hns3_ethdev.c | 24 +++++++++--------- + drivers/net/hns3/hns3_ethdev.h | 41 ++----------------------------- + drivers/net/hns3/hns3_ethdev_vf.c | 6 ++--- + drivers/net/hns3/hns3_flow.c | 2 +- + drivers/net/hns3/hns3_intr.c | 2 +- + drivers/net/hns3/hns3_ptp.c | 18 +++++++------- + drivers/net/hns3/hns3_rxtx.c | 32 ++++++++++++------------ + drivers/net/hns3/hns3_rxtx_vec.c | 4 +-- + drivers/net/hns3/hns3_tm.c | 10 ++++---- + 11 files changed, 56 insertions(+), 93 deletions(-) + +diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c +index cfa943523..6e49108d2 100644 +--- a/drivers/net/hns3/hns3_cmd.c ++++ b/drivers/net/hns3/hns3_cmd.c +@@ -619,7 +619,7 @@ hns3_update_dev_lsc_cap(struct hns3_hw *hw, int fw_compact_cmd_result) + static int + hns3_apply_fw_compat_cmd_result(struct hns3_hw *hw, int result) + { +- if (result != 0 && hns3_dev_copper_supported(hw)) { ++ if (result != 0 && hns3_dev_get_support(hw, COPPER)) { + hns3_err(hw, "firmware fails to initialize the PHY, ret = %d.", + result); + return result; +@@ -658,7 +658,7 @@ hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init) + } + if (revision == PCI_REVISION_ID_HIP09_A) { + struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); +- if (hns3_dev_copper_supported(hw) == 0 || pf->is_tmp_phy) { ++ if (hns3_dev_get_support(hw, COPPER) == 0 || pf->is_tmp_phy) { + PMD_INIT_LOG(ERR, "***use temp phy driver in dpdk***"); + pf->is_tmp_phy = true; + hns3_set_bit(hw->capability, +@@ -676,7 +676,7 @@ hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init) + if (is_init) { + hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1); + hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0); +- if (hns3_dev_copper_supported(hw)) ++ if (hns3_dev_get_support(hw, COPPER)) + hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1); + } + req->compat = rte_cpu_to_le_32(compat); +diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c +index b71e2e9ea..8753c340e 100644 +--- a/drivers/net/hns3/hns3_dcb.c ++++ b/drivers/net/hns3/hns3_dcb.c +@@ -918,7 +918,7 @@ hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw) + if (ret) + return ret; + +- if (!hns3_dev_dcb_supported(hw)) ++ if (!hns3_dev_get_support(hw, DCB)) + return 0; + + ret = hns3_dcb_ets_tc_dwrr_cfg(hw); +@@ -1368,7 +1368,7 @@ hns3_dcb_pause_setup_hw(struct hns3_hw *hw) + } + + /* Only DCB-supported dev supports qset back pressure and pfc cmd */ +- if (!hns3_dev_dcb_supported(hw)) ++ if (!hns3_dev_get_support(hw, DCB)) + return 0; + + ret = hns3_pfc_setup_hw(hw); +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index 02d68e496..c5c355d95 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -2408,7 +2408,7 @@ hns3_setup_dcb(struct rte_eth_dev *dev) + struct hns3_hw *hw = &hns->hw; + int ret; + +- if (!hns3_dev_dcb_supported(hw)) { ++ if (!hns3_dev_get_support(hw, DCB)) { + hns3_err(hw, "this port does not support dcb configurations."); + return -EOPNOTSUPP; + } +@@ -2746,14 +2746,14 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) + DEV_TX_OFFLOAD_MBUF_FAST_FREE | + hns3_txvlan_cap_get(hw)); + +- if (hns3_dev_outer_udp_cksum_supported(hw)) ++ if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) + info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; + +- if (hns3_dev_indep_txrx_supported(hw)) ++ if (hns3_dev_get_support(hw, INDEP_TXRX)) + info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + +- if (hns3_dev_ptp_supported(hw)) ++ if (hns3_dev_get_support(hw, PTP)) + info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; + + info->rx_desc_lim = (struct rte_eth_desc_lim) { +@@ -3418,7 +3418,7 @@ hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) + + switch (media_type) { + case HNS3_MEDIA_TYPE_COPPER: +- if (!hns3_dev_copper_supported(hw)) { ++ if (!hns3_dev_get_support(hw, COPPER)) { + PMD_INIT_LOG(ERR, + "Media type is copper, not supported."); + ret = -EOPNOTSUPP; +@@ -3486,7 +3486,7 @@ hns3_get_board_configuration(struct hns3_hw *hw) + } + + /* Dev does not support DCB */ +- if (!hns3_dev_dcb_supported(hw)) { ++ if (!hns3_dev_get_support(hw, DCB)) { + pf->tc_max = 1; + pf->pfc_max = 0; + } else +@@ -3799,7 +3799,7 @@ hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, + tc_num = hns3_get_tc_num(hw); + aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); + +- if (hns3_dev_dcb_supported(hw)) ++ if (hns3_dev_get_support(hw, DCB)) + shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps + + pf->dv_buf_size; + else +@@ -3816,7 +3816,7 @@ hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, + + shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT); + buf_alloc->s_buf.buf_size = shared_buf; +- if (hns3_dev_dcb_supported(hw)) { ++ if (hns3_dev_get_support(hw, DCB)) { + buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size; + buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high + - roundup(aligned_mps / HNS3_BUF_DIV_BY, +@@ -3827,7 +3827,7 @@ hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, + buf_alloc->s_buf.self.low = aligned_mps; + } + +- if (hns3_dev_dcb_supported(hw)) { ++ if (hns3_dev_get_support(hw, DCB)) { + hi_thrd = shared_buf - pf->dv_buf_size; + + if (tc_num <= NEED_RESERVE_TC_NUM) +@@ -4033,7 +4033,7 @@ static int + hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) + { + /* When DCB is not supported, rx private buffer is not allocated. */ +- if (!hns3_dev_dcb_supported(hw)) { ++ if (!hns3_dev_get_support(hw, DCB)) { + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + uint32_t rx_all = pf->pkt_buf_size; +@@ -4261,7 +4261,7 @@ hns3_buffer_alloc(struct hns3_hw *hw) + return ret; + } + +- if (hns3_dev_dcb_supported(hw)) { ++ if (hns3_dev_get_support(hw, DCB)) { + ret = hns3_rx_priv_wl_config(hw, &pkt_buf); + if (ret) { + PMD_INIT_LOG(ERR, +@@ -6230,7 +6230,7 @@ hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev, + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + +- if (!hns3_dev_dcb_supported(hw)) { ++ if (!hns3_dev_get_support(hw, DCB)) { + hns3_err(hw, "This port does not support dcb configurations."); + return -EOPNOTSUPP; + } +diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h +index 57387e05b..94fd14bfc 100644 +--- a/drivers/net/hns3/hns3_ethdev.h ++++ b/drivers/net/hns3/hns3_ethdev.h +@@ -883,45 +883,8 @@ enum { + HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, + }; + +-#define hns3_dev_dcb_supported(hw) \ +- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_DCB_B) +- +-/* Support copper media type */ +-#define hns3_dev_copper_supported(hw) \ +- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_COPPER_B) +- +-/* Support the queue region action rule of flow directory */ +-#define hns3_dev_fd_queue_region_supported(hw) \ +- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B) +- +-/* Support PTP timestamp offload */ +-#define hns3_dev_ptp_supported(hw) \ +- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_PTP_B) +- +-/* Support to Independently enable/disable/reset Tx or Rx queues */ +-#define hns3_dev_indep_txrx_supported(hw) \ +- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B) +- +-#define hns3_dev_stash_supported(hw) \ +- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_STASH_B) +- +-#define hns3_dev_rxd_adv_layout_supported(hw) \ +- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B) +- +-#define hns3_dev_outer_udp_cksum_supported(hw) \ +- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B) +- +-#define hns3_dev_ras_imp_supported(hw) \ +- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_RAS_IMP_B) +- +-#define hns3_dev_tx_push_supported(hw) \ +- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_TX_PUSH_B) +- +-#define hns3_dev_tm_supported(hw) \ +- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_TM_B) +- +-#define hns3_dev_vf_vlan_flt_supported(hw) \ +- hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B) ++#define hns3_dev_get_support(hw, _name) \ ++ hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_##_name##_B) + + #define HNS3_DEV_PRIVATE_TO_HW(adapter) \ + (&((struct hns3_adapter *)adapter)->hw) +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index e07eb2088..d2895b140 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -988,10 +988,10 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) + DEV_TX_OFFLOAD_MBUF_FAST_FREE | + hns3_txvlan_cap_get(hw)); + +- if (hns3_dev_outer_udp_cksum_supported(hw)) ++ if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) + info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; + +- if (hns3_dev_indep_txrx_supported(hw)) ++ if (hns3_dev_get_support(hw, INDEP_TXRX)) + info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + +@@ -1623,7 +1623,7 @@ hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) + uint8_t msg_data; + int ret; + +- if (!hns3_dev_vf_vlan_flt_supported(hw)) ++ if (!hns3_dev_get_support(hw, VF_VLAN_FLT_MOD)) + return 0; + + msg_data = enable ? 1 : 0; +diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c +index 6844a5dbe..b25fccbca 100644 +--- a/drivers/net/hns3/hns3_flow.c ++++ b/drivers/net/hns3/hns3_flow.c +@@ -301,7 +301,7 @@ hns3_handle_action_queue_region(struct rte_eth_dev *dev, + struct hns3_hw *hw = &hns->hw; + uint16_t idx; + +- if (!hns3_dev_fd_queue_region_supported(hw)) ++ if (!hns3_dev_get_support(hw, FD_QUEUE_REGION)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Not support config queue region!"); +diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c +index 0b307fdd1..3484c76d2 100644 +--- a/drivers/net/hns3/hns3_intr.c ++++ b/drivers/net/hns3/hns3_intr.c +@@ -2368,7 +2368,7 @@ hns3_handle_error(struct hns3_adapter *hns) + { + struct hns3_hw *hw = &hns->hw; + +- if (hns3_dev_ras_imp_supported(hw)) { ++ if (hns3_dev_get_support(hw, RAS_IMP)) { + hns3_handle_hw_error_v2(hw); + hns3_schedule_reset(hns); + } else { +diff --git a/drivers/net/hns3/hns3_ptp.c b/drivers/net/hns3/hns3_ptp.c +index 146b69db7..14c1ad3e4 100644 +--- a/drivers/net/hns3/hns3_ptp.c ++++ b/drivers/net/hns3/hns3_ptp.c +@@ -61,7 +61,7 @@ hns3_ptp_init(struct hns3_hw *hw) + { + int ret; + +- if (!hns3_dev_ptp_supported(hw)) ++ if (!hns3_dev_get_support(hw, PTP)) + return 0; + + ret = hns3_ptp_int_en(hw, true); +@@ -120,7 +120,7 @@ hns3_timesync_enable(struct rte_eth_dev *dev) + struct hns3_pf *pf = &hns->pf; + int ret; + +- if (!hns3_dev_ptp_supported(hw)) ++ if (!hns3_dev_get_support(hw, PTP)) + return -ENOTSUP; + + if (pf->ptp_enable) +@@ -140,7 +140,7 @@ hns3_timesync_disable(struct rte_eth_dev *dev) + struct hns3_pf *pf = &hns->pf; + int ret; + +- if (!hns3_dev_ptp_supported(hw)) ++ if (!hns3_dev_get_support(hw, PTP)) + return -ENOTSUP; + + if (!pf->ptp_enable) +@@ -164,7 +164,7 @@ hns3_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct hns3_pf *pf = &hns->pf; + uint64_t ns, sec; + +- if (!hns3_dev_ptp_supported(hw)) ++ if (!hns3_dev_get_support(hw, PTP)) + return -ENOTSUP; + + ns = pf->rx_timestamp & TIME_RX_STAMP_NS_MASK; +@@ -190,7 +190,7 @@ hns3_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + uint64_t ns; + int ts_cnt; + +- if (!hns3_dev_ptp_supported(hw)) ++ if (!hns3_dev_get_support(hw, PTP)) + return -ENOTSUP; + + ts_cnt = hns3_read_dev(hw, HNS3_TX_1588_BACK_TSP_CNT) & +@@ -219,7 +219,7 @@ hns3_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t ns, sec; + +- if (!hns3_dev_ptp_supported(hw)) ++ if (!hns3_dev_get_support(hw, PTP)) + return -ENOTSUP; + + sec = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_L); +@@ -240,7 +240,7 @@ hns3_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) + uint64_t sec = ts->tv_sec; + uint64_t ns = ts->tv_nsec; + +- if (!hns3_dev_ptp_supported(hw)) ++ if (!hns3_dev_get_support(hw, PTP)) + return -ENOTSUP; + + /* Set the timecounters to a new value. */ +@@ -261,7 +261,7 @@ hns3_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) + struct timespec cur_time; + uint64_t ns; + +- if (!hns3_dev_ptp_supported(hw)) ++ if (!hns3_dev_get_support(hw, PTP)) + return -ENOTSUP; + + (void)hns3_timesync_read_time(dev, &cur_time); +@@ -280,7 +280,7 @@ hns3_restore_ptp(struct hns3_adapter *hns) + bool en = pf->ptp_enable; + int ret; + +- if (!hns3_dev_ptp_supported(hw)) ++ if (!hns3_dev_get_support(hw, PTP)) + return 0; + + ret = hns3_timesync_configure(hns, en); +diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c +index 80d2614d2..bb1723e29 100644 +--- a/drivers/net/hns3/hns3_rxtx.c ++++ b/drivers/net/hns3/hns3_rxtx.c +@@ -381,7 +381,7 @@ hns3_enable_all_queues(struct hns3_hw *hw, bool en) + int i; + + for (i = 0; i < hw->cfg_max_queues; i++) { +- if (hns3_dev_indep_txrx_supported(hw)) { ++ if (hns3_dev_get_support(hw, INDEP_TXRX)) { + rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL; + txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL; + +@@ -426,7 +426,7 @@ hns3_enable_txq(struct hns3_tx_queue *txq, bool en) + struct hns3_hw *hw = &txq->hns->hw; + uint32_t reg; + +- if (hns3_dev_indep_txrx_supported(hw)) { ++ if (hns3_dev_get_support(hw, INDEP_TXRX)) { + reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG); + if (en) + reg |= BIT(HNS3_RING_EN_B); +@@ -443,7 +443,7 @@ hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en) + struct hns3_hw *hw = &rxq->hns->hw; + uint32_t reg; + +- if (hns3_dev_indep_txrx_supported(hw)) { ++ if (hns3_dev_get_support(hw, INDEP_TXRX)) { + reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG); + if (en) + reg |= BIT(HNS3_RING_EN_B); +@@ -1618,7 +1618,7 @@ hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q, + uint16_t q; + int ret; + +- if (hns3_dev_indep_txrx_supported(hw)) ++ if (hns3_dev_get_support(hw, INDEP_TXRX)) + return 0; + + /* Setup new number of fake RX/TX queues and reconfigure device. */ +@@ -1862,7 +1862,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, + conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH; + + rxq->rx_deferred_start = conf->rx_deferred_start; +- if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) { ++ if (rxq->rx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) { + hns3_warn(hw, "deferred start is not supported."); + rxq->rx_deferred_start = false; + } +@@ -1898,7 +1898,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, + HNS3_PORT_BASE_VLAN_ENABLE; + else + rxq->pvid_sw_discard_en = false; +- rxq->ptype_en = hns3_dev_rxd_adv_layout_supported(hw) ? true : false; ++ rxq->ptype_en = hns3_dev_get_support(hw, RXD_ADV_LAYOUT) ? true : false; + rxq->configured = true; + rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET + + idx * HNS3_TQP_REG_SIZE); +@@ -2026,7 +2026,7 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev) + dev->rx_pkt_burst == hns3_recv_scattered_pkts || + dev->rx_pkt_burst == hns3_recv_pkts_vec || + dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) { +- if (hns3_dev_rxd_adv_layout_supported(hw)) ++ if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT)) + return adv_layout_ptypes; + else + return ptypes; +@@ -2928,7 +2928,7 @@ hns3_tx_push_init(struct rte_eth_dev *dev) + volatile uint32_t *reg; + uint32_t val; + +- if (!hns3_dev_tx_push_supported(hw)) ++ if (!hns3_dev_get_support(hw, TX_PUSH)) + return; + + reg = (volatile uint32_t *)hns3_tx_push_get_queue_tail_reg(dev, 0); +@@ -2949,7 +2949,7 @@ hns3_tx_push_queue_init(struct rte_eth_dev *dev, + struct hns3_tx_queue *txq) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- if (!hns3_dev_tx_push_supported(hw)) { ++ if (!hns3_dev_get_support(hw, TX_PUSH)) { + txq->tx_push_enable = false; + return; + } +@@ -2994,7 +2994,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, + } + + txq->tx_deferred_start = conf->tx_deferred_start; +- if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) { ++ if (txq->tx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) { + hns3_warn(hw, "deferred start is not supported."); + txq->tx_deferred_start = false; + } +@@ -4276,7 +4276,7 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev) + uint64_t offloads = dev->data->dev_conf.txmode.offloads; + + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- if (hns3_dev_ptp_supported(hw)) ++ if (hns3_dev_get_support(hw, PTP)) + return false; + + return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)); +@@ -4437,7 +4437,7 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + int ret; + +- if (!hns3_dev_indep_txrx_supported(hw)) ++ if (!hns3_dev_get_support(hw, INDEP_TXRX)) + return -ENOTSUP; + + rte_spinlock_lock(&hw->lock); +@@ -4483,7 +4483,7 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id]; + +- if (!hns3_dev_indep_txrx_supported(hw)) ++ if (!hns3_dev_get_support(hw, INDEP_TXRX)) + return -ENOTSUP; + + rte_spinlock_lock(&hw->lock); +@@ -4505,7 +4505,7 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) + struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id]; + int ret; + +- if (!hns3_dev_indep_txrx_supported(hw)) ++ if (!hns3_dev_get_support(hw, INDEP_TXRX)) + return -ENOTSUP; + + rte_spinlock_lock(&hw->lock); +@@ -4531,7 +4531,7 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id]; + +- if (!hns3_dev_indep_txrx_supported(hw)) ++ if (!hns3_dev_get_support(hw, INDEP_TXRX)) + return -ENOTSUP; + + rte_spinlock_lock(&hw->lock); +@@ -4704,7 +4704,7 @@ hns3_enable_rxd_adv_layout(struct hns3_hw *hw) + * If the hardware support rxd advanced layout, then driver enable it + * default. + */ +- if (hns3_dev_rxd_adv_layout_supported(hw)) ++ if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT)) + hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1); + } + +diff --git a/drivers/net/hns3/hns3_rxtx_vec.c b/drivers/net/hns3/hns3_rxtx_vec.c +index 15a0bd075..bfe84e833 100644 +--- a/drivers/net/hns3/hns3_rxtx_vec.c ++++ b/drivers/net/hns3/hns3_rxtx_vec.c +@@ -19,7 +19,7 @@ hns3_tx_check_vec_support(struct rte_eth_dev *dev) + struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode; + + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- if (hns3_dev_ptp_supported(hw)) ++ if (hns3_dev_get_support(hw, PTP)) + return -ENOTSUP; + + /* Only support DEV_TX_OFFLOAD_MBUF_FAST_FREE */ +@@ -234,7 +234,7 @@ hns3_rx_check_vec_support(struct rte_eth_dev *dev) + DEV_RX_OFFLOAD_VLAN; + + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- if (hns3_dev_ptp_supported(hw)) ++ if (hns3_dev_get_support(hw, PTP)) + return -ENOTSUP; + + if (dev->data->scattered_rx) +diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c +index db5ac786c..44b607af7 100644 +--- a/drivers/net/hns3/hns3_tm.c ++++ b/drivers/net/hns3/hns3_tm.c +@@ -31,7 +31,7 @@ hns3_tm_conf_init(struct rte_eth_dev *dev) + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev); + +- if (!hns3_dev_tm_supported(hw)) ++ if (!hns3_dev_get_support(hw, TM)) + return; + + pf->tm_conf.nb_leaf_nodes_max = max_tx_queues; +@@ -58,7 +58,7 @@ hns3_tm_conf_uninit(struct rte_eth_dev *dev) + struct hns3_tm_shaper_profile *shaper_profile; + struct hns3_tm_node *tm_node; + +- if (!hns3_dev_tm_supported(hw)) ++ if (!hns3_dev_get_support(hw, TM)) + return; + + if (pf->tm_conf.nb_queue_node > 0) { +@@ -1233,7 +1233,7 @@ hns3_tm_ops_get(struct rte_eth_dev *dev, void *arg) + if (arg == NULL) + return -EINVAL; + +- if (!hns3_dev_tm_supported(hw)) ++ if (!hns3_dev_get_support(hw, TM)) + return -EOPNOTSUPP; + + *(const void **)arg = &hns3_tm_ops; +@@ -1246,7 +1246,7 @@ hns3_tm_dev_start_proc(struct hns3_hw *hw) + { + struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); + +- if (!hns3_dev_tm_supported(hw)) ++ if (!hns3_dev_get_support(hw, TM)) + return; + + if (pf->tm_conf.root && !pf->tm_conf.committed) +@@ -1295,7 +1295,7 @@ hns3_tm_conf_update(struct hns3_hw *hw) + struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); + struct rte_tm_error error; + +- if (!hns3_dev_tm_supported(hw)) ++ if (!hns3_dev_get_support(hw, TM)) + return 0; + + if (pf->tm_conf.root == NULL || !pf->tm_conf.committed) +-- +2.23.0 + diff --git a/0228-net-hns3-fix-interrupt-vector-freeing.patch b/0228-net-hns3-fix-interrupt-vector-freeing.patch new file mode 100644 index 0000000000000000000000000000000000000000..bf906a5d41908643090f439c67d001309c6bf9f6 --- /dev/null +++ b/0228-net-hns3-fix-interrupt-vector-freeing.patch @@ -0,0 +1,33 @@ +From b00fefaae559f49fbbbc39ec6ec1aaa1f4f5ba39 Mon Sep 17 00:00:00 2001 +From: Chengwen Feng +Date: Wed, 13 Oct 2021 16:09:08 +0800 +Subject: [PATCH 15/17] net/hns3: fix interrupt vector freeing + +The intr_handle->intr_vec is allocated by rte_zmalloc(), but freed by +free(), this patch fixes it. + +Fixes: 02a7b55657b2 ("net/hns3: support Rx interrupt") +Cc: stable@dpdk.org + +Signed-off-by: Chengwen Feng +Reviewed-by: Ferruh Yigit +--- + drivers/net/hns3/hns3_ethdev_vf.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index d2895b140..9dfc22d2d 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -2355,7 +2355,7 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev) + return 0; + + vf_bind_vector_error: +- free(intr_handle->intr_vec); ++ rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + vf_alloc_intr_vec_error: + rte_intr_efd_disable(intr_handle); +-- +2.23.0 + diff --git a/0229-net-hns3-add-runtime-config-for-mailbox-limit-time.patch b/0229-net-hns3-add-runtime-config-for-mailbox-limit-time.patch new file mode 100644 index 0000000000000000000000000000000000000000..2f9b1dbc8f1fa824ae77142af9e6b4c19a15a659 --- /dev/null +++ b/0229-net-hns3-add-runtime-config-for-mailbox-limit-time.patch @@ -0,0 +1,179 @@ +From 61c8349bbed069317c59da812598b74d2e076ced Mon Sep 17 00:00:00 2001 +From: Chengchang Tang +Date: Fri, 22 Oct 2021 09:38:40 +0800 +Subject: [PATCH 16/17] net/hns3: add runtime config for mailbox limit time + +Current, the max waiting time for MBX response is 500ms, but in +some scenarios, it is not enough. Since it depends on the response +of the kernel mode driver, and its response time is related to the +scheduling of the system. In this special scenario, most of the +cores are isolated, and only a few cores are used for system +scheduling. When a large number of services are started, the +scheduling of the system will be very busy, and the reply of the +mbx message will time out, which will cause our PMD initialization +to fail. + +This patch add a runtime config to set the max wait time. For the +above scenes, users can adjust the waiting time to a suitable value +by themselves. + +Fixes: 463e748964f5 ("net/hns3: support mailbox") +Cc: stable@dpdk.org + +Signed-off-by: Chengchang Tang +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 32 ++++++++++++++++++++++++++++++- + drivers/net/hns3/hns3_ethdev.h | 3 +++ + drivers/net/hns3/hns3_ethdev_vf.c | 3 ++- + drivers/net/hns3/hns3_mbx.c | 8 +++++--- + drivers/net/hns3/hns3_mbx.h | 1 + + 5 files changed, 42 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index c5c355d95..2ae4cb9b7 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -7348,9 +7348,30 @@ hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) + return 0; + } + ++static int ++hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) ++{ ++ uint32_t val; ++ ++ RTE_SET_USED(key); ++ ++ val = strtoul(value, NULL, 10); ++ ++ /* ++ * 500ms is empirical value in process of mailbox communication. If ++ * the delay value is set to one lower thanthe empirical value, mailbox ++ * communication may fail. ++ */ ++ if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX) ++ *(uint16_t *)extra_args = val; ++ ++ return 0; ++} ++ + void + hns3_parse_devargs(struct rte_eth_dev *dev) + { ++ uint16_t mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS; + struct hns3_adapter *hns = dev->data->dev_private; + uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE; + uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE; +@@ -7371,6 +7392,9 @@ hns3_parse_devargs(struct rte_eth_dev *dev) + &hns3_parse_io_hint_func, &tx_func_hint); + (void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK, + &hns3_parse_dev_caps_mask, &dev_caps_mask); ++ (void)rte_kvargs_process(kvlist, HNS3_DEVARG_MBX_TIME_LIMIT_MS, ++ &hns3_parse_mbx_time_limit, &mbx_time_limit_ms); ++ + rte_kvargs_free(kvlist); + + if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE) +@@ -7386,6 +7410,11 @@ hns3_parse_devargs(struct rte_eth_dev *dev) + hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".", + HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask); + hns->dev_caps_mask = dev_caps_mask; ++ ++ if (mbx_time_limit_ms != HNS3_MBX_DEF_TIME_LIMIT_MS) ++ hns3_warn(hw, "parsed %s = %u.", HNS3_DEVARG_MBX_TIME_LIMIT_MS, ++ mbx_time_limit_ms); ++ hns->mbx_time_limit_ms = mbx_time_limit_ms; + } + + static const struct eth_dev_ops hns3_eth_dev_ops = { +@@ -7642,6 +7671,7 @@ RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); + RTE_PMD_REGISTER_PARAM_STRING(net_hns3, + HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " + HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common " +- HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "); ++ HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> " ++ HNS3_DEVARG_MBX_TIME_LIMIT_MS "= "); + RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, NOTICE); + RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, NOTICE); +diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h +index 94fd14bfc..84f5a9f29 100644 +--- a/drivers/net/hns3/hns3_ethdev.h ++++ b/drivers/net/hns3/hns3_ethdev.h +@@ -851,6 +851,7 @@ struct hns3_adapter { + uint32_t tx_func_hint; + + uint64_t dev_caps_mask; ++ uint16_t mbx_time_limit_ms; /* wait time for mbx message */ + + struct hns3_ptype_table ptype_tbl __rte_cache_aligned; + }; +@@ -868,6 +869,8 @@ enum { + + #define HNS3_DEVARG_DEV_CAPS_MASK "dev_caps_mask" + ++#define HNS3_DEVARG_MBX_TIME_LIMIT_MS "mbx_time_limit_ms" ++ + enum { + HNS3_DEV_SUPPORT_DCB_B, + HNS3_DEV_SUPPORT_COPPER_B, +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index 9dfc22d2d..29313c2f7 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -3112,4 +3112,5 @@ RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci"); + RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf, + HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " + HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common " +- HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "); ++ HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> " ++ HNS3_DEVARG_MBX_TIME_LIMIT_MS "= "); +diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c +index 411c5ebe9..a4d9afc45 100644 +--- a/drivers/net/hns3/hns3_mbx.c ++++ b/drivers/net/hns3/hns3_mbx.c +@@ -61,8 +61,9 @@ static int + hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + uint8_t *resp_data, uint16_t resp_len) + { +-#define HNS3_MAX_RETRY_US 500000 + #define HNS3_WAIT_RESP_US 100 ++#define US_PER_MS 1000 ++ uint32_t mbx_time_limit; + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_mbx_resp_status *mbx_resp; + uint32_t wait_time = 0; +@@ -74,7 +75,8 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + return -EINVAL; + } + +- while (wait_time < HNS3_MAX_RETRY_US) { ++ mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS; ++ while (wait_time < mbx_time_limit) { + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { + hns3_err(hw, "Don't wait for mbx respone because of " + "disable_cmd"); +@@ -103,7 +105,7 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + wait_time += HNS3_WAIT_RESP_US; + } + hw->mbx_resp.req_msg_data = 0; +- if (wait_time >= HNS3_MAX_RETRY_US) { ++ if (wait_time >= mbx_time_limit) { + hns3_mbx_proc_timeout(hw, code, subcode); + return -ETIME; + } +diff --git a/drivers/net/hns3/hns3_mbx.h b/drivers/net/hns3/hns3_mbx.h +index f868e33a9..d637bd2b2 100644 +--- a/drivers/net/hns3/hns3_mbx.h ++++ b/drivers/net/hns3/hns3_mbx.h +@@ -87,6 +87,7 @@ enum hns3_mbx_link_fail_subcode { + + #define HNS3_MBX_MAX_MSG_SIZE 16 + #define HNS3_MBX_MAX_RESP_DATA_SIZE 8 ++#define HNS3_MBX_DEF_TIME_LIMIT_MS 500 + + enum { + HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL = 0, +-- +2.23.0 + diff --git a/0230-net-hns3-fix-mailbox-communication-with-HW.patch b/0230-net-hns3-fix-mailbox-communication-with-HW.patch new file mode 100644 index 0000000000000000000000000000000000000000..989fdc25c1050ce574b10131ba94ee4c50ff02ee --- /dev/null +++ b/0230-net-hns3-fix-mailbox-communication-with-HW.patch @@ -0,0 +1,39 @@ +From a277f7dbaa54e8ea10f41a7dc4dadec5f08b61b3 Mon Sep 17 00:00:00 2001 +From: "Min Hu (Connor)" +Date: Thu, 28 Oct 2021 19:52:30 +0800 +Subject: [PATCH 17/17] net/hns3: fix mailbox communication with HW + +Mailbox is the communication mechanism between SW and HW. There exist +two approaches for SW to recognize mailbox message from HW. One way is +using match_id, the other is to compare the message code. The two +approaches are independent and used in different scenarios. + +But for the second approach, "next_to_use" should be updated and written +to HW register. If it not done, HW do not know the position SW steps, +then, the communication between SW and HW will turn to be failed. + +Fixes: dbbbad23e380 ("net/hns3: fix VF handling LSC event in secondary process") +Cc: stable@dpdk.org + +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_mbx.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c +index a4d9afc45..3ad85e721 100644 +--- a/drivers/net/hns3/hns3_mbx.c ++++ b/drivers/net/hns3/hns3_mbx.c +@@ -435,6 +435,9 @@ hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw) + scan_next: + next_to_use = (next_to_use + 1) % hw->cmq.crq.desc_num; + } ++ ++ crq->next_to_use = next_to_use; ++ hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use); + } + + void +-- +2.23.0 + diff --git a/0231-app-testpmd-support-multi-process.patch b/0231-app-testpmd-support-multi-process.patch new file mode 100644 index 0000000000000000000000000000000000000000..3ace44b393cedf11a447711faa379325e36111f6 --- /dev/null +++ b/0231-app-testpmd-support-multi-process.patch @@ -0,0 +1,421 @@ +From c7a841ce328cda8338494640b103d5182268fd1e Mon Sep 17 00:00:00 2001 +From: "Min Hu (Connor)" +Date: Wed, 25 Aug 2021 10:06:38 +0800 +Subject: [PATCH] app/testpmd: support multi-process + +This patch adds multi-process support for testpmd. +For example the following commands run two testpmd +processes: + + * the primary process: + +./dpdk-testpmd --proc-type=auto -l 0-1 -- -i \ + --rxq=4 --txq=4 --num-procs=2 --proc-id=0 + + * the secondary process: + +./dpdk-testpmd --proc-type=auto -l 2-3 -- -i \ + --rxq=4 --txq=4 --num-procs=2 --proc-id=1 + +Signed-off-by: Min Hu (Connor) +Signed-off-by: Lijun Ou +Signed-off-by: Andrew Rybchenko +Acked-by: Xiaoyun Li +Acked-by: Ajit Khaparde +Reviewed-by: Ferruh Yigit +Acked-by: Aman Deep Singh +--- + app/test-pmd/cmdline.c | 6 ++ + app/test-pmd/config.c | 20 +++++- + app/test-pmd/parameters.c | 9 +++ + app/test-pmd/testpmd.c | 92 +++++++++++++++++++++++---- + app/test-pmd/testpmd.h | 11 ++++ + doc/guides/testpmd_app_ug/run_app.rst | 84 ++++++++++++++++++++++++ + 6 files changed, 210 insertions(+), 12 deletions(-) + +diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c +index 5691fab94..b701129d8 100644 +--- a/app/test-pmd/cmdline.c ++++ b/app/test-pmd/cmdline.c +@@ -5441,6 +5441,12 @@ cmd_set_flush_rx_parsed(void *parsed_result, + __rte_unused void *data) + { + struct cmd_set_flush_rx *res = parsed_result; ++ ++ if (num_procs > 1 && (strcmp(res->mode, "on") == 0)) { ++ printf("multi-process doesn't support to flush Rx queues.\n"); ++ return; ++ } ++ + no_flush_rx = (uint8_t)((strcmp(res->mode, "on") == 0) ? 0 : 1); + } + +diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c +index 7af13f65c..0d6639020 100644 +--- a/app/test-pmd/config.c ++++ b/app/test-pmd/config.c +@@ -3117,6 +3117,8 @@ rss_fwd_config_setup(void) + queueid_t rxq; + queueid_t nb_q; + streamid_t sm_id; ++ int start; ++ int end; + + nb_q = nb_rxq; + if (nb_q > nb_txq) +@@ -3134,7 +3136,21 @@ rss_fwd_config_setup(void) + init_fwd_streams(); + + setup_fwd_config_of_each_lcore(&cur_fwd_config); +- rxp = 0; rxq = 0; ++ ++ if (proc_id > 0 && nb_q % num_procs != 0) ++ printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n"); ++ ++ /** ++ * In multi-process, All queues are allocated to different ++ * processes based on num_procs and proc_id. For example: ++ * if supports 4 queues(nb_q), 2 processes(num_procs), ++ * the 0~1 queue for primary process. ++ * the 2~3 queue for secondary process. ++ */ ++ start = proc_id * nb_q / num_procs; ++ end = start + nb_q / num_procs; ++ rxp = 0; ++ rxq = start; + for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { + struct fwd_stream *fs; + +@@ -3151,6 +3167,8 @@ rss_fwd_config_setup(void) + continue; + rxp = 0; + rxq++; ++ if (rxq >= end) ++ rxq = start; + } + } + +diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c +index 414a0068f..c464c42f6 100644 +--- a/app/test-pmd/parameters.c ++++ b/app/test-pmd/parameters.c +@@ -487,6 +487,9 @@ parse_event_printing_config(const char *optarg, int enable) + void + launch_args_parse(int argc, char** argv) + { ++#define PARAM_PROC_ID "proc-id" ++#define PARAM_NUM_PROCS "num-procs" ++ + int n, opt; + char **argvopt; + int opt_idx; +@@ -603,6 +606,8 @@ launch_args_parse(int argc, char** argv) + { "rx-mq-mode", 1, 0, 0 }, + { "record-core-cycles", 0, 0, 0 }, + { "record-burst-stats", 0, 0, 0 }, ++ { PARAM_NUM_PROCS, 1, 0, 0 }, ++ { PARAM_PROC_ID, 1, 0, 0 }, + { 0, 0, 0, 0 }, + }; + +@@ -1359,6 +1364,10 @@ launch_args_parse(int argc, char** argv) + record_core_cycles = 1; + if (!strcmp(lgopts[opt_idx].name, "record-burst-stats")) + record_burst_stats = 1; ++ if (!strcmp(lgopts[opt_idx].name, PARAM_NUM_PROCS)) ++ num_procs = atoi(optarg); ++ if (!strcmp(lgopts[opt_idx].name, PARAM_PROC_ID)) ++ proc_id = atoi(optarg); + break; + case 'h': + usage(argv[0]); +diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c +index 0eaa4852d..983d8827d 100644 +--- a/app/test-pmd/testpmd.c ++++ b/app/test-pmd/testpmd.c +@@ -505,6 +505,61 @@ uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; + * hexadecimal bitmask of RX mq mode can be enabled. + */ + enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS; ++/* ++ * ID of the current process in multi-process, used to ++ * configure the queues to be polled. ++ */ ++int proc_id; ++ ++/* ++ * Number of processes in multi-process, used to ++ * configure the queues to be polled. ++ */ ++unsigned int num_procs = 1; ++ ++static int ++eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, ++ const struct rte_eth_conf *dev_conf) ++{ ++ if (is_proc_primary()) ++ return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q, ++ dev_conf); ++ return 0; ++} ++ ++static int ++eth_dev_start_mp(uint16_t port_id) ++{ ++ if (is_proc_primary()) ++ return rte_eth_dev_start(port_id); ++ ++ return 0; ++} ++ ++static int ++eth_dev_stop_mp(uint16_t port_id) ++{ ++ if (is_proc_primary()) ++ return rte_eth_dev_stop(port_id); ++ ++ return 0; ++} ++ ++static void ++mempool_free_mp(struct rte_mempool *mp) ++{ ++ if (is_proc_primary()) ++ rte_mempool_free(mp); ++} ++ ++static int ++eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu) ++{ ++ if (is_proc_primary()) ++ return rte_eth_dev_set_mtu(port_id, mtu); ++ ++ return 0; ++} + + /* Forward function declarations */ + static void setup_attached_port(portid_t pi); +@@ -964,6 +1019,14 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, + + mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; + mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx); ++ if (!is_proc_primary()) { ++ rte_mp = rte_mempool_lookup(pool_name); ++ if (rte_mp == NULL) ++ rte_exit(EXIT_FAILURE, ++ "Get mbuf pool for socket %u failed: %s\n", ++ socket_id, rte_strerror(rte_errno)); ++ return rte_mp; ++ } + + TESTPMD_LOG(INFO, + "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", +@@ -1969,6 +2032,11 @@ flush_fwd_rx_queues(void) + uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; + uint64_t timer_period; + ++ if (num_procs > 1) { ++ printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n"); ++ return; ++ } ++ + /* convert to number of cycles */ + timer_period = rte_get_timer_hz(); /* 1 second timeout */ + +@@ -2456,7 +2524,7 @@ start_port(portid_t pid) + return -1; + } + /* configure port */ +- diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq, ++ diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq, + nb_txq + nb_hairpinq, + &(port->dev_conf)); + if (diag != 0) { +@@ -2470,7 +2538,7 @@ start_port(portid_t pid) + return -1; + } + } +- if (port->need_reconfig_queues > 0) { ++ if (port->need_reconfig_queues > 0 && is_proc_primary()) { + port->need_reconfig_queues = 0; + /* setup tx queues */ + for (qi = 0; qi < nb_txq; qi++) { +@@ -2571,7 +2639,7 @@ start_port(portid_t pid) + cnt_pi++; + + /* start port */ +- if (rte_eth_dev_start(pi) < 0) { ++ if (eth_dev_start_mp(pi) < 0) { + printf("Fail to start port %d\n", pi); + + /* Fail to setup rx queue, return */ +@@ -2700,7 +2768,7 @@ stop_port(portid_t pid) + } + } + +- if (rte_eth_dev_stop(pi) != 0) ++ if (eth_dev_stop_mp(pi) != 0) + RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n", + pi); + +@@ -2769,8 +2837,10 @@ close_port(portid_t pid) + continue; + } + +- port_flow_flush(pi); +- rte_eth_dev_close(pi); ++ if (is_proc_primary()) { ++ port_flow_flush(pi); ++ rte_eth_dev_close(pi); ++ } + } + + remove_invalid_ports(); +@@ -3035,7 +3105,7 @@ pmd_test_exit(void) + } + for (i = 0 ; i < RTE_DIM(mempools) ; i++) { + if (mempools[i]) +- rte_mempool_free(mempools[i]); ++ mempool_free_mp(mempools[i]); + } + + printf("\nBye...\n"); +@@ -3482,6 +3552,10 @@ init_port_dcb_config(portid_t pid, + int retval; + uint16_t i; + ++ if (num_procs > 1) { ++ printf("The multi-process feature doesn't support dcb.\n"); ++ return -ENOTSUP; ++ } + rte_port = &ports[pid]; + + /* retain the original device configuration. */ +@@ -3646,10 +3720,6 @@ main(int argc, char** argv) + rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n", + rte_strerror(rte_errno)); + +- if (rte_eal_process_type() == RTE_PROC_SECONDARY) +- rte_exit(EXIT_FAILURE, +- "Secondary process type not supported.\n"); +- + ret = register_eth_event_callback(); + if (ret != 0) + rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); +diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h +index 303bed830..122fca29c 100644 +--- a/app/test-pmd/testpmd.h ++++ b/app/test-pmd/testpmd.h +@@ -626,6 +626,17 @@ extern struct mplsoudp_decap_conf mplsoudp_decap_conf; + + extern enum rte_eth_rx_mq_mode rx_mq_mode; + ++extern struct rte_flow_action_conntrack conntrack_context; ++ ++extern int proc_id; ++extern unsigned int num_procs; ++ ++static inline bool ++is_proc_primary(void) ++{ ++ return rte_eal_process_type() == RTE_PROC_PRIMARY; ++} ++ + static inline unsigned int + lcore_num(void) + { +diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst +index ca67105b7..098cbbd43 100644 +--- a/doc/guides/testpmd_app_ug/run_app.rst ++++ b/doc/guides/testpmd_app_ug/run_app.rst +@@ -529,3 +529,87 @@ The command line options are: + bit 1 - two hairpin ports paired + bit 0 - two hairpin ports loop + The default value is 0. Hairpin will use single port mode and implicit Tx flow mode. ++ ++ ++Testpmd Multi-Process Command-line Options ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++ ++The following are the command-line options for testpmd multi-process support: ++ ++* primary process: ++ ++.. code-block:: console ++ ++ sudo ./dpdk-testpmd -a xxx --proc-type=auto -l 0-1 -- -i --rxq=4 --txq=4 \ ++ --num-procs=2 --proc-id=0 ++ ++* secondary process: ++ ++.. code-block:: console ++ ++ sudo ./dpdk-testpmd -a xxx --proc-type=auto -l 2-3 -- -i --rxq=4 --txq=4 \ ++ --num-procs=2 --proc-id=1 ++ ++The command line options are: ++ ++* ``--num-procs=N`` ++ ++ The number of processes which will be used. ++ ++* ``--proc-id=ID`` ++ ++ The ID of the current process (ID < num-procs). ID should be different in ++ primary process and secondary process, which starts from '0'. ++ ++Calculation rule for queue: ++All queues are allocated to different processes based on ``proc_num`` and ++``proc_id``. ++Calculation rule for the testpmd to allocate queues to each process: ++* start(queue start id) = proc_id * nb_q / num_procs£» ++ ++* end(queue end id) = start + nb_q / num_procs£» ++ ++For example, if testpmd is configured to have 4 Tx and Rx queues, ++queues 0 and 1 will be used by the primary process and ++queues 2 and 3 will be used by the secondary process. ++ ++The number of queues should be a multiple of the number of processes. If not, ++redundant queues will exist after queues are allocated to processes. If RSS ++is enabled, packet loss occurs when traffic is sent to all processes at the same ++time. Some traffic goes to redundant queues and cannot be forwarded. ++ ++All the dev ops is supported in primary process. While secondary process is ++not permitted to allocate or release shared memory, so some ops are not supported ++as follows: ++ ++- ``dev_configure`` ++- ``dev_start`` ++- ``dev_stop`` ++- ``rx_queue_setup`` ++- ``tx_queue_setup`` ++- ``rx_queue_release`` ++- ``tx_queue_release`` ++ ++So, any command from testpmd which calls those APIs will not be supported in ++secondary process, like: ++ ++.. code-block:: console ++ ++ port config all rxq|txq|rxd|txd ++ port config rx_offload xxx on/off ++ port config tx_offload xxx on/off ++ ++etc. ++ ++When secondary is running, port in primary is not permitted to be stopped. ++Reconfigure operation is only valid in primary. ++ ++Stats is supported, stats will not change when one quits and starts, as they ++share the same buffer to store the stats. Flow rules are maintained in process ++level: primary and secondary has its own flow list (but one flow list in HW). ++The two can see all the queues, so setting the flow rules for the other is OK. ++But in the testpmd primary process receiving or transmitting packets from the ++queue allocated for secondary process is not permitted, and same for secondary ++process. ++ ++Flow API and RSS are supported. +-- +2.23.0 + diff --git a/0232-app-testpmd-fix-key-for-RSS-flow-rule.patch b/0232-app-testpmd-fix-key-for-RSS-flow-rule.patch new file mode 100644 index 0000000000000000000000000000000000000000..12167e3938929fac3e43ccf4a05e87fdf4038c41 --- /dev/null +++ b/0232-app-testpmd-fix-key-for-RSS-flow-rule.patch @@ -0,0 +1,55 @@ +From b57927702a58685e46d87960aba25a7b1fa0279e Mon Sep 17 00:00:00 2001 +From: Alvin Zhang +Date: Thu, 21 Jan 2021 17:41:54 +0800 +Subject: [PATCH] app/testpmd: fix key for RSS flow rule + +Since the patch '1848b117' has initialized the variable 'key' in +'struct rte_flow_action_rss' with 'NULL', the PMD cannot get the +RSS key now. Details as bellow: + +testpmd> flow create 0 ingress pattern eth / ipv4 / end actions + rss types ipv4-other end key + 1234567890123456789012345678901234567890FFFFFFFFFFFF123 + 4567890123456789012345678901234567890FFFFFFFFFFFF + queues end / end +Flow rule #1 created +testpmd> show port 0 rss-hash key +RSS functions: + all ipv4-other ip +RSS key: + 4439796BB54C5023B675EA5B124F9F30B8A2C03DDFDC4D02A08C9B3 + 34AF64A4C05C6FA343958D8557D99583AE138C92E81150366 + +This patch sets offset and size of the 'key' variable as the first +parameter of the token 'key'. Later, the address of the RSS key will +be copied to 'key' variable. + +Fixes: 1848b117cca1 ("app/testpmd: fix RSS key for flow API RSS rule") +Cc: stable@dpdk.org + +Signed-off-by: Alvin Zhang +Tested-by: Jun W Zhou +Reviewed-by: Ferruh Yigit +--- + app/test-pmd/cmdline_flow.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c +index 0618611ab1..067e120743 100644 +--- a/app/test-pmd/cmdline_flow.c ++++ b/app/test-pmd/cmdline_flow.c +@@ -3541,7 +3541,10 @@ static const struct token token_list[] = { + .name = "key", + .help = "RSS hash key", + .next = NEXT(action_rss, NEXT_ENTRY(HEX)), +- .args = ARGS(ARGS_ENTRY_ARB(0, 0), ++ .args = ARGS(ARGS_ENTRY_ARB ++ (offsetof(struct action_rss_data, conf) + ++ offsetof(struct rte_flow_action_rss, key), ++ sizeof(((struct rte_flow_action_rss *)0)->key)), + ARGS_ENTRY_ARB + (offsetof(struct action_rss_data, conf) + + offsetof(struct rte_flow_action_rss, key_len), +-- +2.33.0 + diff --git a/0233-app-testpmd-release-flows-left-before-port-stop.patch b/0233-app-testpmd-release-flows-left-before-port-stop.patch new file mode 100644 index 0000000000000000000000000000000000000000..569515508c173f6f2b9f05ed259a0c9102337c03 --- /dev/null +++ b/0233-app-testpmd-release-flows-left-before-port-stop.patch @@ -0,0 +1,40 @@ +From b71d309637e90a67f7814604f3a17b696b6304ce Mon Sep 17 00:00:00 2001 +From: Gregory Etelson +Date: Thu, 26 Nov 2020 18:43:02 +0200 +Subject: [PATCH] app/testpmd: release flows left before port stop + +According to RTE flow user guide, PMD will not keep flow rules after +port stop. Application resources that refer to flow rules become +obsolete after port stop and must not be used. +Testpmd maintains linked list of active flows for each port. Entries in +that list are allocated dynamically and must be explicitly released to +prevent memory leak. +The patch releases testpmd port flow_list that holds remaining flows +before port is stopped. + +Cc: stable@dpdk.org + +Signed-off-by: Gregory Etelson +Acked-by: Ori Kam +Acked-by: Ajit Khaparde +--- + app/test-pmd/testpmd.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c +index 60636830b..7bede14ce 100644 +--- a/app/test-pmd/testpmd.c ++++ b/app/test-pmd/testpmd.c +@@ -2768,6 +2768,9 @@ stop_port(portid_t pid) + } + } + ++ if (port->flow_list) ++ port_flow_flush(pi); ++ + if (eth_dev_stop_mp(pi) != 0) + RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n", + pi); +-- +2.33.0 + diff --git a/0234-app-testpmd-delete-unused-function.patch b/0234-app-testpmd-delete-unused-function.patch new file mode 100644 index 0000000000000000000000000000000000000000..48506c2b55ab15ffb37723bd39b7489a47f8c372 --- /dev/null +++ b/0234-app-testpmd-delete-unused-function.patch @@ -0,0 +1,33 @@ +From 888d05e90f1dd78cd5075048206e573b0e30e40c Mon Sep 17 00:00:00 2001 +From: "Min Hu (Connor)" +Date: Wed, 10 Nov 2021 16:50:56 +0800 +Subject: [PATCH 01/33] app/testpmd: delete unused function + +Signed-off-by: Min Hu (Connor) +--- + app/test-pmd/testpmd.c | 9 --------- + 1 file changed, 9 deletions(-) + +diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c +index 50a2a5a94..e3b5a165d 100644 +--- a/app/test-pmd/testpmd.c ++++ b/app/test-pmd/testpmd.c +@@ -552,15 +552,6 @@ mempool_free_mp(struct rte_mempool *mp) + rte_mempool_free(mp); + } + +-static int +-eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu) +-{ +- if (is_proc_primary()) +- return rte_eth_dev_set_mtu(port_id, mtu); +- +- return 0; +-} +- + /* Forward function declarations */ + static void setup_attached_port(portid_t pi); + static void check_all_ports_link_status(uint32_t port_mask); +-- +2.33.0 + diff --git a/0235-dmadev-introduce-DMA-device-support.patch b/0235-dmadev-introduce-DMA-device-support.patch new file mode 100644 index 0000000000000000000000000000000000000000..227498c6daf24f3e5561904b4fb9464593e308bc --- /dev/null +++ b/0235-dmadev-introduce-DMA-device-support.patch @@ -0,0 +1,7487 @@ +From 8f6d527cd1ab3c7f5e6490425795b46cb5ebc42a Mon Sep 17 00:00:00 2001 +From: "Min Hu (Connor)" +Date: Fri, 12 Nov 2021 09:36:45 +0800 +Subject: [PATCH] dmadev: introduce DMA device support + +Signed-off-by: Min Hu (Connor) +--- + app/test/meson.build | 4 + + app/test/test_dmadev.c | 867 ++++++++++++++++++ + app/test/test_dmadev_api.c | 574 ++++++++++++ + app/test/test_dmadev_api.h | 5 + + doc/api/doxy-api-index.md | 1 + + doc/api/doxy-api.conf.in | 1 + + doc/guides/dmadevs/index.rst | 12 + + doc/guides/index.rst | 1 + + doc/guides/prog_guide/dmadev.rst | 90 ++ + doc/guides/prog_guide/img/dmadev.svg | 283 ++++++ + doc/guides/prog_guide/index.rst | 1 + + drivers/dma/hisilicon/hisi_dmadev.c | 925 +++++++++++++++++++ + drivers/dma/hisilicon/hisi_dmadev.h | 236 +++++ + drivers/dma/hisilicon/meson.build | 19 + + drivers/dma/hisilicon/version.map | 3 + + drivers/dma/meson.build | 13 + + drivers/dma/skeleton/meson.build | 11 + + drivers/dma/skeleton/skeleton_dmadev.c | 596 +++++++++++++ + drivers/dma/skeleton/skeleton_dmadev.h | 61 ++ + drivers/dma/skeleton/version.map | 3 + + drivers/meson.build | 1 + + examples/dma/Makefile | 51 ++ + examples/dma/dmafwd.c | 1105 +++++++++++++++++++++++ + examples/dma/meson.build | 15 + + examples/meson.build | 2 +- + lib/librte_dmadev/meson.build | 5 + + lib/librte_dmadev/rte_dmadev.c | 866 ++++++++++++++++++ + lib/librte_dmadev/rte_dmadev.h | 1138 ++++++++++++++++++++++++ + lib/librte_dmadev/rte_dmadev_core.h | 80 ++ + lib/librte_dmadev/rte_dmadev_pmd.h | 171 ++++ + lib/librte_dmadev/version.map | 31 + + lib/meson.build | 2 +- + 32 files changed, 7171 insertions(+), 2 deletions(-) + create mode 100644 app/test/test_dmadev.c + create mode 100644 app/test/test_dmadev_api.c + create mode 100644 app/test/test_dmadev_api.h + create mode 100644 doc/guides/dmadevs/index.rst + create mode 100644 doc/guides/prog_guide/dmadev.rst + create mode 100644 doc/guides/prog_guide/img/dmadev.svg + create mode 100644 drivers/dma/hisilicon/hisi_dmadev.c + create mode 100644 drivers/dma/hisilicon/hisi_dmadev.h + create mode 100644 drivers/dma/hisilicon/meson.build + create mode 100644 drivers/dma/hisilicon/version.map + create mode 100644 drivers/dma/meson.build + create mode 100644 drivers/dma/skeleton/meson.build + create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c + create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h + create mode 100644 drivers/dma/skeleton/version.map + create mode 100644 examples/dma/Makefile + create mode 100644 examples/dma/dmafwd.c + create mode 100644 examples/dma/meson.build + create mode 100644 lib/librte_dmadev/meson.build + create mode 100644 lib/librte_dmadev/rte_dmadev.c + create mode 100644 lib/librte_dmadev/rte_dmadev.h + create mode 100644 lib/librte_dmadev/rte_dmadev_core.h + create mode 100644 lib/librte_dmadev/rte_dmadev_pmd.h + create mode 100644 lib/librte_dmadev/version.map + +diff --git a/app/test/meson.build b/app/test/meson.build +index 94fd39fec..88bed64ad 100644 +--- a/app/test/meson.build ++++ b/app/test/meson.build +@@ -34,6 +34,8 @@ test_sources = files('commands.c', + 'test_debug.c', + 'test_distributor.c', + 'test_distributor_perf.c', ++ 'test_dmadev.c', ++ 'test_dmadev_api.c', + 'test_eal_flags.c', + 'test_eal_fs.c', + 'test_efd.c', +@@ -151,6 +153,7 @@ test_deps = ['acl', + 'cmdline', + 'cryptodev', + 'distributor', ++ 'dmadev', + 'efd', + 'ethdev', + 'eventdev', +@@ -320,6 +323,7 @@ driver_test_names = [ + 'cryptodev_sw_mvsam_autotest', + 'cryptodev_sw_snow3g_autotest', + 'cryptodev_sw_zuc_autotest', ++ 'dmadev_autotest', + 'eventdev_selftest_octeontx', + 'eventdev_selftest_sw', + 'rawdev_autotest', +diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c +new file mode 100644 +index 000000000..b206db27a +--- /dev/null ++++ b/app/test/test_dmadev.c +@@ -0,0 +1,867 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(c) 2021 HiSilicon Limited ++ * Copyright(c) 2021 Intel Corporation ++ */ ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "test.h" ++#include "test_dmadev_api.h" ++ ++#define ERR_RETURN(...) do { print_err(__func__, __LINE__, __VA_ARGS__); return -1; } while (0) ++ ++#define COPY_LEN 1024 ++ ++static struct rte_mempool *pool; ++static uint16_t id_count; ++ ++static void ++__rte_format_printf(3, 4) ++print_err(const char *func, int lineno, const char *format, ...) ++{ ++ va_list ap; ++ ++ fprintf(stderr, "In %s:%d - ", func, lineno); ++ va_start(ap, format); ++ vfprintf(stderr, format, ap); ++ va_end(ap); ++} ++ ++static int ++runtest(const char *printable, int (*test_fn)(int16_t dev_id, uint16_t vchan), int iterations, ++ int16_t dev_id, uint16_t vchan, bool check_err_stats) ++{ ++ struct rte_dma_stats stats; ++ int i; ++ ++ rte_dma_stats_reset(dev_id, vchan); ++ printf("DMA Dev %d: Running %s Tests %s\n", dev_id, printable, ++ check_err_stats ? " " : "(errors expected)"); ++ for (i = 0; i < iterations; i++) { ++ if (test_fn(dev_id, vchan) < 0) ++ return -1; ++ ++ rte_dma_stats_get(dev_id, 0, &stats); ++ printf("Ops submitted: %"PRIu64"\t", stats.submitted); ++ printf("Ops completed: %"PRIu64"\t", stats.completed); ++ printf("Errors: %"PRIu64"\r", stats.errors); ++ ++ if (stats.completed != stats.submitted) ++ ERR_RETURN("\nError, not all submitted jobs are reported as completed\n"); ++ if (check_err_stats && stats.errors != 0) ++ ERR_RETURN("\nErrors reported during op processing, aborting tests\n"); ++ } ++ printf("\n"); ++ return 0; ++} ++ ++static void ++await_hw(int16_t dev_id, uint16_t vchan) ++{ ++ enum rte_dma_vchan_status st; ++ ++ if (rte_dma_vchan_status(dev_id, vchan, &st) < 0) { ++ /* for drivers that don't support this op, just sleep for 1 millisecond */ ++ rte_delay_us_sleep(1000); ++ return; ++ } ++ ++ /* for those that do, *max* end time is one second from now, but all should be faster */ ++ const uint64_t end_cycles = rte_get_timer_cycles() + rte_get_timer_hz(); ++ while (st == RTE_DMA_VCHAN_ACTIVE && rte_get_timer_cycles() < end_cycles) { ++ rte_pause(); ++ rte_dma_vchan_status(dev_id, vchan, &st); ++ } ++} ++ ++/* run a series of copy tests just using some different options for enqueues and completions */ ++static int ++do_multi_copies(int16_t dev_id, uint16_t vchan, ++ int split_batches, /* submit 2 x 16 or 1 x 32 burst */ ++ int split_completions, /* gather 2 x 16 or 1 x 32 completions */ ++ int use_completed_status) /* use completed or completed_status function */ ++{ ++ struct rte_mbuf *srcs[32], *dsts[32]; ++ enum rte_dma_status_code sc[32]; ++ unsigned int i, j; ++ bool dma_err = false; ++ ++ /* Enqueue burst of copies and hit doorbell */ ++ for (i = 0; i < RTE_DIM(srcs); i++) { ++ uint64_t *src_data; ++ ++ if (split_batches && i == RTE_DIM(srcs) / 2) ++ rte_dma_submit(dev_id, vchan); ++ ++ srcs[i] = rte_pktmbuf_alloc(pool); ++ dsts[i] = rte_pktmbuf_alloc(pool); ++ if (srcs[i] == NULL || dsts[i] == NULL) ++ ERR_RETURN("Error allocating buffers\n"); ++ ++ src_data = rte_pktmbuf_mtod(srcs[i], uint64_t *); ++ for (j = 0; j < COPY_LEN/sizeof(uint64_t); j++) ++ src_data[j] = rte_rand(); ++ ++ if (rte_dma_copy(dev_id, vchan, srcs[i]->buf_iova + srcs[i]->data_off, ++ dsts[i]->buf_iova + dsts[i]->data_off, COPY_LEN, 0) != id_count++) ++ ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i); ++ } ++ rte_dma_submit(dev_id, vchan); ++ ++ await_hw(dev_id, vchan); ++ ++ if (split_completions) { ++ /* gather completions in two halves */ ++ uint16_t half_len = RTE_DIM(srcs) / 2; ++ int ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err); ++ if (ret != half_len || dma_err) ++ ERR_RETURN("Error with rte_dma_completed - first half. ret = %d, expected ret = %u, dma_err = %d\n", ++ ret, half_len, dma_err); ++ ++ ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err); ++ if (ret != half_len || dma_err) ++ ERR_RETURN("Error with rte_dma_completed - second half. ret = %d, expected ret = %u, dma_err = %d\n", ++ ret, half_len, dma_err); ++ } else { ++ /* gather all completions in one go, using either ++ * completed or completed_status fns ++ */ ++ if (!use_completed_status) { ++ int n = rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err); ++ if (n != RTE_DIM(srcs) || dma_err) ++ ERR_RETURN("Error with rte_dma_completed, %u [expected: %zu], dma_err = %d\n", ++ n, RTE_DIM(srcs), dma_err); ++ } else { ++ int n = rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc); ++ if (n != RTE_DIM(srcs)) ++ ERR_RETURN("Error with rte_dma_completed_status, %u [expected: %zu]\n", ++ n, RTE_DIM(srcs)); ++ ++ for (j = 0; j < (uint16_t)n; j++) ++ if (sc[j] != RTE_DMA_STATUS_SUCCESSFUL) ++ ERR_RETURN("Error with rte_dma_completed_status, job %u reports failure [code %u]\n", ++ j, sc[j]); ++ } ++ } ++ ++ /* check for empty */ ++ int ret = use_completed_status ? ++ rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc) : ++ rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err); ++ if (ret != 0) ++ ERR_RETURN("Error with completion check - ops unexpectedly returned\n"); ++ ++ for (i = 0; i < RTE_DIM(srcs); i++) { ++ char *src_data, *dst_data; ++ ++ src_data = rte_pktmbuf_mtod(srcs[i], char *); ++ dst_data = rte_pktmbuf_mtod(dsts[i], char *); ++ for (j = 0; j < COPY_LEN; j++) ++ if (src_data[j] != dst_data[j]) ++ ERR_RETURN("Error with copy of packet %u, byte %u\n", i, j); ++ ++ rte_pktmbuf_free(srcs[i]); ++ rte_pktmbuf_free(dsts[i]); ++ } ++ return 0; ++} ++ ++static int ++test_enqueue_copies(int16_t dev_id, uint16_t vchan) ++{ ++ unsigned int i; ++ uint16_t id; ++ ++ /* test doing a single copy */ ++ do { ++ struct rte_mbuf *src, *dst; ++ char *src_data, *dst_data; ++ ++ src = rte_pktmbuf_alloc(pool); ++ dst = rte_pktmbuf_alloc(pool); ++ src_data = rte_pktmbuf_mtod(src, char *); ++ dst_data = rte_pktmbuf_mtod(dst, char *); ++ ++ for (i = 0; i < COPY_LEN; i++) ++ src_data[i] = rte_rand() & 0xFF; ++ ++ id = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src), rte_pktmbuf_iova(dst), ++ COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT); ++ if (id != id_count) ++ ERR_RETURN("Error with rte_dma_copy, got %u, expected %u\n", ++ id, id_count); ++ ++ /* give time for copy to finish, then check it was done */ ++ await_hw(dev_id, vchan); ++ ++ for (i = 0; i < COPY_LEN; i++) ++ if (dst_data[i] != src_data[i]) ++ ERR_RETURN("Data mismatch at char %u [Got %02x not %02x]\n", i, ++ dst_data[i], src_data[i]); ++ ++ /* now check completion works */ ++ if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 1) ++ ERR_RETURN("Error with rte_dma_completed\n"); ++ ++ if (id != id_count) ++ ERR_RETURN("Error:incorrect job id received, %u [expected %u]\n", ++ id, id_count); ++ ++ rte_pktmbuf_free(src); ++ rte_pktmbuf_free(dst); ++ ++ /* now check completion returns nothing more */ ++ if (rte_dma_completed(dev_id, 0, 1, NULL, NULL) != 0) ++ ERR_RETURN("Error with rte_dma_completed in empty check\n"); ++ ++ id_count++; ++ ++ } while (0); ++ ++ /* test doing a multiple single copies */ ++ do { ++ const uint16_t max_ops = 4; ++ struct rte_mbuf *src, *dst; ++ char *src_data, *dst_data; ++ uint16_t count; ++ ++ src = rte_pktmbuf_alloc(pool); ++ dst = rte_pktmbuf_alloc(pool); ++ src_data = rte_pktmbuf_mtod(src, char *); ++ dst_data = rte_pktmbuf_mtod(dst, char *); ++ ++ for (i = 0; i < COPY_LEN; i++) ++ src_data[i] = rte_rand() & 0xFF; ++ ++ /* perform the same copy times */ ++ for (i = 0; i < max_ops; i++) ++ if (rte_dma_copy(dev_id, vchan, ++ rte_pktmbuf_iova(src), ++ rte_pktmbuf_iova(dst), ++ COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT) != id_count++) ++ ERR_RETURN("Error with rte_dma_copy\n"); ++ ++ await_hw(dev_id, vchan); ++ ++ count = rte_dma_completed(dev_id, vchan, max_ops * 2, &id, NULL); ++ if (count != max_ops) ++ ERR_RETURN("Error with rte_dma_completed, got %u not %u\n", ++ count, max_ops); ++ ++ if (id != id_count - 1) ++ ERR_RETURN("Error, incorrect job id returned: got %u not %u\n", ++ id, id_count - 1); ++ ++ for (i = 0; i < COPY_LEN; i++) ++ if (dst_data[i] != src_data[i]) ++ ERR_RETURN("Data mismatch at char %u\n", i); ++ ++ rte_pktmbuf_free(src); ++ rte_pktmbuf_free(dst); ++ } while (0); ++ ++ /* test doing multiple copies */ ++ return do_multi_copies(dev_id, vchan, 0, 0, 0) /* enqueue and complete 1 batch at a time */ ++ /* enqueue 2 batches and then complete both */ ++ || do_multi_copies(dev_id, vchan, 1, 0, 0) ++ /* enqueue 1 batch, then complete in two halves */ ++ || do_multi_copies(dev_id, vchan, 0, 1, 0) ++ /* test using completed_status in place of regular completed API */ ++ || do_multi_copies(dev_id, vchan, 0, 0, 1); ++} ++ ++/* Failure handling test cases - global macros and variables for those tests*/ ++#define COMP_BURST_SZ 16 ++#define OPT_FENCE(idx) ((fence && idx == 8) ? RTE_DMA_OP_FLAG_FENCE : 0) ++ ++static int ++test_failure_in_full_burst(int16_t dev_id, uint16_t vchan, bool fence, ++ struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx) ++{ ++ /* Test single full batch statuses with failures */ ++ enum rte_dma_status_code status[COMP_BURST_SZ]; ++ struct rte_dma_stats baseline, stats; ++ uint16_t invalid_addr_id = 0; ++ uint16_t idx; ++ uint16_t count, status_count; ++ unsigned int i; ++ bool error = false; ++ int err_count = 0; ++ ++ rte_dma_stats_get(dev_id, vchan, &baseline); /* get a baseline set of stats */ ++ for (i = 0; i < COMP_BURST_SZ; i++) { ++ int id = rte_dma_copy(dev_id, vchan, ++ (i == fail_idx ? 0 : (srcs[i]->buf_iova + srcs[i]->data_off)), ++ dsts[i]->buf_iova + dsts[i]->data_off, ++ COPY_LEN, OPT_FENCE(i)); ++ if (id < 0) ++ ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i); ++ if (i == fail_idx) ++ invalid_addr_id = id; ++ } ++ rte_dma_submit(dev_id, vchan); ++ rte_dma_stats_get(dev_id, vchan, &stats); ++ if (stats.submitted != baseline.submitted + COMP_BURST_SZ) ++ ERR_RETURN("Submitted stats value not as expected, %"PRIu64" not %"PRIu64"\n", ++ stats.submitted, baseline.submitted + COMP_BURST_SZ); ++ ++ await_hw(dev_id, vchan); ++ ++ count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error); ++ if (count != fail_idx) ++ ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n", ++ count, fail_idx); ++ if (!error) ++ ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n", ++ fail_idx); ++ if (idx != invalid_addr_id - 1) ++ ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n", ++ fail_idx, idx, invalid_addr_id - 1); ++ ++ /* all checks ok, now verify calling completed() again always returns 0 */ ++ for (i = 0; i < 10; i++) ++ if (rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error) != 0 ++ || error == false || idx != (invalid_addr_id - 1)) ++ ERR_RETURN("Error with follow-up completed calls for fail idx %u\n", ++ fail_idx); ++ ++ status_count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ, ++ &idx, status); ++ /* some HW may stop on error and be restarted after getting error status for single value ++ * To handle this case, if we get just one error back, wait for more completions and get ++ * status for rest of the burst ++ */ ++ if (status_count == 1) { ++ await_hw(dev_id, vchan); ++ status_count += rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - 1, ++ &idx, &status[1]); ++ } ++ /* check that at this point we have all status values */ ++ if (status_count != COMP_BURST_SZ - count) ++ ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n", ++ fail_idx, status_count, COMP_BURST_SZ - count); ++ /* now verify just one failure followed by multiple successful or skipped entries */ ++ if (status[0] == RTE_DMA_STATUS_SUCCESSFUL) ++ ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n", ++ fail_idx); ++ for (i = 1; i < status_count; i++) ++ /* after a failure in a burst, depending on ordering/fencing, ++ * operations may be successful or skipped because of previous error. ++ */ ++ if (status[i] != RTE_DMA_STATUS_SUCCESSFUL ++ && status[i] != RTE_DMA_STATUS_NOT_ATTEMPTED) ++ ERR_RETURN("Error with status calls for fail idx %u. Status for job %u (of %u) is not successful\n", ++ fail_idx, count + i, COMP_BURST_SZ); ++ ++ /* check the completed + errors stats are as expected */ ++ rte_dma_stats_get(dev_id, vchan, &stats); ++ if (stats.completed != baseline.completed + COMP_BURST_SZ) ++ ERR_RETURN("Completed stats value not as expected, %"PRIu64" not %"PRIu64"\n", ++ stats.completed, baseline.completed + COMP_BURST_SZ); ++ for (i = 0; i < status_count; i++) ++ err_count += (status[i] != RTE_DMA_STATUS_SUCCESSFUL); ++ if (stats.errors != baseline.errors + err_count) ++ ERR_RETURN("'Errors' stats value not as expected, %"PRIu64" not %"PRIu64"\n", ++ stats.errors, baseline.errors + err_count); ++ ++ return 0; ++} ++ ++static int ++test_individual_status_query_with_failure(int16_t dev_id, uint16_t vchan, bool fence, ++ struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx) ++{ ++ /* Test gathering batch statuses one at a time */ ++ enum rte_dma_status_code status[COMP_BURST_SZ]; ++ uint16_t invalid_addr_id = 0; ++ uint16_t idx; ++ uint16_t count = 0, status_count = 0; ++ unsigned int j; ++ bool error = false; ++ ++ for (j = 0; j < COMP_BURST_SZ; j++) { ++ int id = rte_dma_copy(dev_id, vchan, ++ (j == fail_idx ? 0 : (srcs[j]->buf_iova + srcs[j]->data_off)), ++ dsts[j]->buf_iova + dsts[j]->data_off, ++ COPY_LEN, OPT_FENCE(j)); ++ if (id < 0) ++ ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j); ++ if (j == fail_idx) ++ invalid_addr_id = id; ++ } ++ rte_dma_submit(dev_id, vchan); ++ await_hw(dev_id, vchan); ++ ++ /* use regular "completed" until we hit error */ ++ while (!error) { ++ uint16_t n = rte_dma_completed(dev_id, vchan, 1, &idx, &error); ++ count += n; ++ if (n > 1 || count >= COMP_BURST_SZ) ++ ERR_RETURN("Error - too many completions got\n"); ++ if (n == 0 && !error) ++ ERR_RETURN("Error, unexpectedly got zero completions after %u completed\n", ++ count); ++ } ++ if (idx != invalid_addr_id - 1) ++ ERR_RETURN("Error, last successful index not as expected, got %u, expected %u\n", ++ idx, invalid_addr_id - 1); ++ ++ /* use completed_status until we hit end of burst */ ++ while (count + status_count < COMP_BURST_SZ) { ++ uint16_t n = rte_dma_completed_status(dev_id, vchan, 1, &idx, ++ &status[status_count]); ++ await_hw(dev_id, vchan); /* allow delay to ensure jobs are completed */ ++ status_count += n; ++ if (n != 1) ++ ERR_RETURN("Error: unexpected number of completions received, %u, not 1\n", ++ n); ++ } ++ ++ /* check for single failure */ ++ if (status[0] == RTE_DMA_STATUS_SUCCESSFUL) ++ ERR_RETURN("Error, unexpected successful DMA transaction\n"); ++ for (j = 1; j < status_count; j++) ++ if (status[j] != RTE_DMA_STATUS_SUCCESSFUL ++ && status[j] != RTE_DMA_STATUS_NOT_ATTEMPTED) ++ ERR_RETURN("Error, unexpected DMA error reported\n"); ++ ++ return 0; ++} ++ ++static int ++test_single_item_status_query_with_failure(int16_t dev_id, uint16_t vchan, ++ struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx) ++{ ++ /* When error occurs just collect a single error using "completed_status()" ++ * before going to back to completed() calls ++ */ ++ enum rte_dma_status_code status; ++ uint16_t invalid_addr_id = 0; ++ uint16_t idx; ++ uint16_t count, status_count, count2; ++ unsigned int j; ++ bool error = false; ++ ++ for (j = 0; j < COMP_BURST_SZ; j++) { ++ int id = rte_dma_copy(dev_id, vchan, ++ (j == fail_idx ? 0 : (srcs[j]->buf_iova + srcs[j]->data_off)), ++ dsts[j]->buf_iova + dsts[j]->data_off, ++ COPY_LEN, 0); ++ if (id < 0) ++ ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j); ++ if (j == fail_idx) ++ invalid_addr_id = id; ++ } ++ rte_dma_submit(dev_id, vchan); ++ await_hw(dev_id, vchan); ++ ++ /* get up to the error point */ ++ count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error); ++ if (count != fail_idx) ++ ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n", ++ count, fail_idx); ++ if (!error) ++ ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n", ++ fail_idx); ++ if (idx != invalid_addr_id - 1) ++ ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n", ++ fail_idx, idx, invalid_addr_id - 1); ++ ++ /* get the error code */ ++ status_count = rte_dma_completed_status(dev_id, vchan, 1, &idx, &status); ++ if (status_count != 1) ++ ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n", ++ fail_idx, status_count, COMP_BURST_SZ - count); ++ if (status == RTE_DMA_STATUS_SUCCESSFUL) ++ ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n", ++ fail_idx); ++ ++ /* delay in case time needed after err handled to complete other jobs */ ++ await_hw(dev_id, vchan); ++ ++ /* get the rest of the completions without status */ ++ count2 = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error); ++ if (error == true) ++ ERR_RETURN("Error, got further errors post completed_status() call, for failure case %u.\n", ++ fail_idx); ++ if (count + status_count + count2 != COMP_BURST_SZ) ++ ERR_RETURN("Error, incorrect number of completions received, got %u not %u\n", ++ count + status_count + count2, COMP_BURST_SZ); ++ ++ return 0; ++} ++ ++static int ++test_multi_failure(int16_t dev_id, uint16_t vchan, struct rte_mbuf **srcs, struct rte_mbuf **dsts, ++ const unsigned int *fail, size_t num_fail) ++{ ++ /* test having multiple errors in one go */ ++ enum rte_dma_status_code status[COMP_BURST_SZ]; ++ unsigned int i, j; ++ uint16_t count, err_count = 0; ++ bool error = false; ++ ++ /* enqueue and gather completions in one go */ ++ for (j = 0; j < COMP_BURST_SZ; j++) { ++ uintptr_t src = srcs[j]->buf_iova + srcs[j]->data_off; ++ /* set up for failure if the current index is anywhere is the fails array */ ++ for (i = 0; i < num_fail; i++) ++ if (j == fail[i]) ++ src = 0; ++ ++ int id = rte_dma_copy(dev_id, vchan, ++ src, dsts[j]->buf_iova + dsts[j]->data_off, ++ COPY_LEN, 0); ++ if (id < 0) ++ ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j); ++ } ++ rte_dma_submit(dev_id, vchan); ++ await_hw(dev_id, vchan); ++ ++ count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ, NULL, status); ++ while (count < COMP_BURST_SZ) { ++ await_hw(dev_id, vchan); ++ ++ uint16_t ret = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - count, ++ NULL, &status[count]); ++ if (ret == 0) ++ ERR_RETURN("Error getting all completions for jobs. Got %u of %u\n", ++ count, COMP_BURST_SZ); ++ count += ret; ++ } ++ for (i = 0; i < count; i++) ++ if (status[i] != RTE_DMA_STATUS_SUCCESSFUL) ++ err_count++; ++ ++ if (err_count != num_fail) ++ ERR_RETURN("Error: Invalid number of failed completions returned, %u; expected %zu\n", ++ err_count, num_fail); ++ ++ /* enqueue and gather completions in bursts, but getting errors one at a time */ ++ for (j = 0; j < COMP_BURST_SZ; j++) { ++ uintptr_t src = srcs[j]->buf_iova + srcs[j]->data_off; ++ /* set up for failure if the current index is anywhere is the fails array */ ++ for (i = 0; i < num_fail; i++) ++ if (j == fail[i]) ++ src = 0; ++ ++ int id = rte_dma_copy(dev_id, vchan, ++ src, dsts[j]->buf_iova + dsts[j]->data_off, ++ COPY_LEN, 0); ++ if (id < 0) ++ ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j); ++ } ++ rte_dma_submit(dev_id, vchan); ++ await_hw(dev_id, vchan); ++ ++ count = 0; ++ err_count = 0; ++ while (count + err_count < COMP_BURST_SZ) { ++ count += rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, NULL, &error); ++ if (error) { ++ uint16_t ret = rte_dma_completed_status(dev_id, vchan, 1, ++ NULL, status); ++ if (ret != 1) ++ ERR_RETURN("Error getting error-status for completions\n"); ++ err_count += ret; ++ await_hw(dev_id, vchan); ++ } ++ } ++ if (err_count != num_fail) ++ ERR_RETURN("Error: Incorrect number of failed completions received, got %u not %zu\n", ++ err_count, num_fail); ++ ++ return 0; ++} ++ ++static int ++test_completion_status(int16_t dev_id, uint16_t vchan, bool fence) ++{ ++ const unsigned int fail[] = {0, 7, 14, 15}; ++ struct rte_mbuf *srcs[COMP_BURST_SZ], *dsts[COMP_BURST_SZ]; ++ unsigned int i; ++ ++ for (i = 0; i < COMP_BURST_SZ; i++) { ++ srcs[i] = rte_pktmbuf_alloc(pool); ++ dsts[i] = rte_pktmbuf_alloc(pool); ++ } ++ ++ for (i = 0; i < RTE_DIM(fail); i++) { ++ if (test_failure_in_full_burst(dev_id, vchan, fence, srcs, dsts, fail[i]) < 0) ++ return -1; ++ ++ if (test_individual_status_query_with_failure(dev_id, vchan, fence, ++ srcs, dsts, fail[i]) < 0) ++ return -1; ++ ++ /* test is run the same fenced, or unfenced, but no harm in running it twice */ ++ if (test_single_item_status_query_with_failure(dev_id, vchan, ++ srcs, dsts, fail[i]) < 0) ++ return -1; ++ } ++ ++ if (test_multi_failure(dev_id, vchan, srcs, dsts, fail, RTE_DIM(fail)) < 0) ++ return -1; ++ ++ for (i = 0; i < COMP_BURST_SZ; i++) { ++ rte_pktmbuf_free(srcs[i]); ++ rte_pktmbuf_free(dsts[i]); ++ } ++ return 0; ++} ++ ++static int ++test_completion_handling(int16_t dev_id, uint16_t vchan) ++{ ++ return test_completion_status(dev_id, vchan, false) /* without fences */ ++ || test_completion_status(dev_id, vchan, true); /* with fences */ ++} ++ ++static int ++test_enqueue_fill(int16_t dev_id, uint16_t vchan) ++{ ++ const unsigned int lengths[] = {8, 64, 1024, 50, 100, 89}; ++ struct rte_mbuf *dst; ++ char *dst_data; ++ uint64_t pattern = 0xfedcba9876543210; ++ unsigned int i, j; ++ ++ dst = rte_pktmbuf_alloc(pool); ++ if (dst == NULL) ++ ERR_RETURN("Failed to allocate mbuf\n"); ++ dst_data = rte_pktmbuf_mtod(dst, char *); ++ ++ for (i = 0; i < RTE_DIM(lengths); i++) { ++ /* reset dst_data */ ++ memset(dst_data, 0, rte_pktmbuf_data_len(dst)); ++ ++ /* perform the fill operation */ ++ int id = rte_dma_fill(dev_id, vchan, pattern, ++ rte_pktmbuf_iova(dst), lengths[i], RTE_DMA_OP_FLAG_SUBMIT); ++ if (id < 0) ++ ERR_RETURN("Error with rte_dma_fill\n"); ++ await_hw(dev_id, vchan); ++ ++ if (rte_dma_completed(dev_id, vchan, 1, NULL, NULL) != 1) ++ ERR_RETURN("Error: fill operation failed (length: %u)\n", lengths[i]); ++ /* check the data from the fill operation is correct */ ++ for (j = 0; j < lengths[i]; j++) { ++ char pat_byte = ((char *)&pattern)[j % 8]; ++ if (dst_data[j] != pat_byte) ++ ERR_RETURN("Error with fill operation (lengths = %u): got (%x), not (%x)\n", ++ lengths[i], dst_data[j], pat_byte); ++ } ++ /* check that the data after the fill operation was not written to */ ++ for (; j < rte_pktmbuf_data_len(dst); j++) ++ if (dst_data[j] != 0) ++ ERR_RETURN("Error, fill operation wrote too far (lengths = %u): got (%x), not (%x)\n", ++ lengths[i], dst_data[j], 0); ++ } ++ ++ rte_pktmbuf_free(dst); ++ return 0; ++} ++ ++static int ++test_burst_capacity(int16_t dev_id, uint16_t vchan) ++{ ++#define CAP_TEST_BURST_SIZE 64 ++ const int ring_space = rte_dma_burst_capacity(dev_id, vchan); ++ struct rte_mbuf *src, *dst; ++ int i, j, iter; ++ int cap, ret; ++ bool dma_err; ++ ++ src = rte_pktmbuf_alloc(pool); ++ dst = rte_pktmbuf_alloc(pool); ++ ++ /* to test capacity, we enqueue elements and check capacity is reduced ++ * by one each time - rebaselining the expected value after each burst ++ * as the capacity is only for a burst. We enqueue multiple bursts to ++ * fill up half the ring, before emptying it again. We do this twice to ++ * ensure that we get to test scenarios where we get ring wrap-around ++ */ ++ for (iter = 0; iter < 2; iter++) { ++ for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) { ++ cap = rte_dma_burst_capacity(dev_id, vchan); ++ ++ for (j = 0; j < CAP_TEST_BURST_SIZE; j++) { ++ ret = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src), ++ rte_pktmbuf_iova(dst), COPY_LEN, 0); ++ if (ret < 0) ++ ERR_RETURN("Error with rte_dmadev_copy\n"); ++ ++ if (rte_dma_burst_capacity(dev_id, vchan) != cap - (j + 1)) ++ ERR_RETURN("Error, ring capacity did not change as expected\n"); ++ } ++ if (rte_dma_submit(dev_id, vchan) < 0) ++ ERR_RETURN("Error, failed to submit burst\n"); ++ ++ if (cap < rte_dma_burst_capacity(dev_id, vchan)) ++ ERR_RETURN("Error, avail ring capacity has gone up, not down\n"); ++ } ++ await_hw(dev_id, vchan); ++ ++ for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) { ++ ret = rte_dma_completed(dev_id, vchan, ++ CAP_TEST_BURST_SIZE, NULL, &dma_err); ++ if (ret != CAP_TEST_BURST_SIZE || dma_err) { ++ enum rte_dma_status_code status; ++ ++ rte_dma_completed_status(dev_id, vchan, 1, NULL, &status); ++ ERR_RETURN("Error with rte_dmadev_completed, %u [expected: %u], dma_err = %d, i = %u, iter = %u, status = %u\n", ++ ret, CAP_TEST_BURST_SIZE, dma_err, i, iter, status); ++ } ++ } ++ cap = rte_dma_burst_capacity(dev_id, vchan); ++ if (cap != ring_space) ++ ERR_RETURN("Error, ring capacity has not reset to original value, got %u, expected %u\n", ++ cap, ring_space); ++ } ++ ++ rte_pktmbuf_free(src); ++ rte_pktmbuf_free(dst); ++ ++ return 0; ++} ++ ++static int ++test_dmadev_instance(int16_t dev_id) ++{ ++#define TEST_RINGSIZE 512 ++#define CHECK_ERRS true ++ struct rte_dma_stats stats; ++ struct rte_dma_info info; ++ const struct rte_dma_conf conf = { .nb_vchans = 1}; ++ const struct rte_dma_vchan_conf qconf = { ++ .direction = RTE_DMA_DIR_MEM_TO_MEM, ++ .nb_desc = TEST_RINGSIZE, ++ }; ++ const int vchan = 0; ++ int ret; ++ ++ ret = rte_dma_info_get(dev_id, &info); ++ if (ret != 0) ++ ERR_RETURN("Error with rte_dma_info_get()\n"); ++ ++ printf("\n### Test dmadev instance %u [%s]\n", ++ dev_id, info.dev_name); ++ ++ if (info.max_vchans < 1) ++ ERR_RETURN("Error, no channels available on device id %u\n", dev_id); ++ ++ if (rte_dma_configure(dev_id, &conf) != 0) ++ ERR_RETURN("Error with rte_dma_configure()\n"); ++ ++ if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0) ++ ERR_RETURN("Error with queue configuration\n"); ++ ++ ret = rte_dma_info_get(dev_id, &info); ++ if (ret != 0 || info.nb_vchans != 1) ++ ERR_RETURN("Error, no configured queues reported on device id %u\n", dev_id); ++ ++ if (rte_dma_start(dev_id) != 0) ++ ERR_RETURN("Error with rte_dma_start()\n"); ++ ++ if (rte_dma_stats_get(dev_id, vchan, &stats) != 0) ++ ERR_RETURN("Error with rte_dma_stats_get()\n"); ++ ++ if (stats.completed != 0 || stats.submitted != 0 || stats.errors != 0) ++ ERR_RETURN("Error device stats are not all zero: completed = %"PRIu64", " ++ "submitted = %"PRIu64", errors = %"PRIu64"\n", ++ stats.completed, stats.submitted, stats.errors); ++ id_count = 0; ++ ++ /* create a mempool for running tests */ ++ pool = rte_pktmbuf_pool_create("TEST_DMADEV_POOL", ++ TEST_RINGSIZE * 2, /* n == num elements */ ++ 32, /* cache size */ ++ 0, /* priv size */ ++ 2048, /* data room size */ ++ info.numa_node); ++ if (pool == NULL) ++ ERR_RETURN("Error with mempool creation\n"); ++ ++ /* run the test cases, use many iterations to ensure UINT16_MAX id wraparound */ ++ if (runtest("copy", test_enqueue_copies, 640, dev_id, vchan, CHECK_ERRS) < 0) ++ goto err; ++ ++ /* run some burst capacity tests */ ++ if (runtest("burst capacity", test_burst_capacity, 1, dev_id, vchan, CHECK_ERRS) < 0) ++ goto err; ++ ++ /* to test error handling we can provide null pointers for source or dest in copies. This ++ * requires VA mode in DPDK, since NULL(0) is a valid physical address. ++ * We also need hardware that can report errors back. ++ */ ++ if (rte_eal_iova_mode() != RTE_IOVA_VA) ++ printf("DMA Dev %u: DPDK not in VA mode, skipping error handling tests\n", dev_id); ++ else if ((info.dev_capa & RTE_DMA_CAPA_HANDLES_ERRORS) == 0) ++ printf("DMA Dev %u: device does not report errors, skipping error handling tests\n", ++ dev_id); ++ else if (runtest("error handling", test_completion_handling, 1, ++ dev_id, vchan, !CHECK_ERRS) < 0) ++ goto err; ++ ++ if ((info.dev_capa & RTE_DMA_CAPA_OPS_FILL) == 0) ++ printf("DMA Dev %u: No device fill support, skipping fill tests\n", dev_id); ++ else if (runtest("fill", test_enqueue_fill, 1, dev_id, vchan, CHECK_ERRS) < 0) ++ goto err; ++ ++ rte_mempool_free(pool); ++ rte_dma_stop(dev_id); ++ rte_dma_stats_reset(dev_id, vchan); ++ return 0; ++ ++err: ++ rte_mempool_free(pool); ++ rte_dma_stop(dev_id); ++ return -1; ++} ++ ++static int ++test_apis(void) ++{ ++ const char *pmd = "dma_skeleton"; ++ int id; ++ int ret; ++ ++ /* attempt to create skeleton instance - ignore errors due to one being already present */ ++ rte_vdev_init(pmd, NULL); ++ id = rte_dma_get_dev_id_by_name(pmd); ++ if (id < 0) ++ return TEST_SKIPPED; ++ printf("\n### Test dmadev infrastructure using skeleton driver\n"); ++ ret = test_dma_api(id); ++ ++ return ret; ++} ++ ++static int ++test_dma(void) ++{ ++ int i; ++ ++ /* basic sanity on dmadev infrastructure */ ++ if (test_apis() < 0) ++ ERR_RETURN("Error performing API tests\n"); ++ ++ if (rte_dma_count_avail() == 0) ++ return TEST_SKIPPED; ++ ++ RTE_DMA_FOREACH_DEV(i) ++ if (test_dmadev_instance(i) < 0) ++ ERR_RETURN("Error, test failure for device %d\n", i); ++ ++ return 0; ++} ++ ++REGISTER_TEST_COMMAND(dmadev_autotest, test_dma); +diff --git a/app/test/test_dmadev_api.c b/app/test/test_dmadev_api.c +new file mode 100644 +index 000000000..4a181af90 +--- /dev/null ++++ b/app/test/test_dmadev_api.c +@@ -0,0 +1,574 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(c) 2021 HiSilicon Limited ++ */ ++ ++#include ++ ++#include ++#include ++#include ++#include ++ ++extern int test_dma_api(uint16_t dev_id); ++ ++#define DMA_TEST_API_RUN(test) \ ++ testsuite_run_test(test, #test) ++ ++#define TEST_MEMCPY_SIZE 1024 ++#define TEST_WAIT_US_VAL 50000 ++ ++#define TEST_SUCCESS 0 ++#define TEST_FAILED -1 ++ ++static int16_t test_dev_id; ++static int16_t invalid_dev_id; ++ ++static char *src; ++static char *dst; ++ ++static int total; ++static int passed; ++static int failed; ++ ++static int ++testsuite_setup(int16_t dev_id) ++{ ++ test_dev_id = dev_id; ++ invalid_dev_id = -1; ++ ++ src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0); ++ if (src == NULL) ++ return -ENOMEM; ++ dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0); ++ if (dst == NULL) { ++ rte_free(src); ++ src = NULL; ++ return -ENOMEM; ++ } ++ ++ total = 0; ++ passed = 0; ++ failed = 0; ++ ++ /* Set dmadev log level to critical to suppress unnecessary output ++ * during API tests. ++ */ ++ rte_log_set_level_pattern("lib.dmadev", RTE_LOG_CRIT); ++ ++ return 0; ++} ++ ++static void ++testsuite_teardown(void) ++{ ++ rte_free(src); ++ src = NULL; ++ rte_free(dst); ++ dst = NULL; ++ /* Ensure the dmadev is stopped. */ ++ rte_dma_stop(test_dev_id); ++ ++ rte_log_set_level_pattern("lib.dmadev", RTE_LOG_INFO); ++} ++ ++static void ++testsuite_run_test(int (*test)(void), const char *name) ++{ ++ int ret = 0; ++ ++ if (test) { ++ ret = test(); ++ if (ret < 0) { ++ failed++; ++ printf("%s Failed\n", name); ++ } else { ++ passed++; ++ printf("%s Passed\n", name); ++ } ++ } ++ ++ total++; ++} ++ ++static int ++test_dma_get_dev_id_by_name(void) ++{ ++ int ret = rte_dma_get_dev_id_by_name("invalid_dmadev_device"); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ return TEST_SUCCESS; ++} ++ ++static int ++test_dma_is_valid_dev(void) ++{ ++ int ret; ++ ret = rte_dma_is_valid(invalid_dev_id); ++ RTE_TEST_ASSERT(ret == false, "Expected false for invalid dev id"); ++ ret = rte_dma_is_valid(test_dev_id); ++ RTE_TEST_ASSERT(ret == true, "Expected true for valid dev id"); ++ return TEST_SUCCESS; ++} ++ ++static int ++test_dma_count(void) ++{ ++ uint16_t count = rte_dma_count_avail(); ++ RTE_TEST_ASSERT(count > 0, "Invalid dmadev count %u", count); ++ return TEST_SUCCESS; ++} ++ ++static int ++test_dma_info_get(void) ++{ ++ struct rte_dma_info info = { 0 }; ++ int ret; ++ ++ ret = rte_dma_info_get(invalid_dev_id, &info); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ret = rte_dma_info_get(test_dev_id, NULL); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ret = rte_dma_info_get(test_dev_id, &info); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info"); ++ ++ return TEST_SUCCESS; ++} ++ ++static int ++test_dma_configure(void) ++{ ++ struct rte_dma_conf conf = { 0 }; ++ struct rte_dma_info info = { 0 }; ++ int ret; ++ ++ /* Check for invalid parameters */ ++ ret = rte_dma_configure(invalid_dev_id, &conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ret = rte_dma_configure(test_dev_id, NULL); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ++ /* Check for nb_vchans == 0 */ ++ memset(&conf, 0, sizeof(conf)); ++ ret = rte_dma_configure(test_dev_id, &conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ++ /* Check for conf.nb_vchans > info.max_vchans */ ++ ret = rte_dma_info_get(test_dev_id, &info); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info"); ++ memset(&conf, 0, sizeof(conf)); ++ conf.nb_vchans = info.max_vchans + 1; ++ ret = rte_dma_configure(test_dev_id, &conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ++ /* Check enable silent mode */ ++ memset(&conf, 0, sizeof(conf)); ++ conf.nb_vchans = info.max_vchans; ++ conf.enable_silent = true; ++ ret = rte_dma_configure(test_dev_id, &conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ++ /* Configure success */ ++ memset(&conf, 0, sizeof(conf)); ++ conf.nb_vchans = info.max_vchans; ++ ret = rte_dma_configure(test_dev_id, &conf); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret); ++ ++ /* Check configure success */ ++ ret = rte_dma_info_get(test_dev_id, &info); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info"); ++ RTE_TEST_ASSERT_EQUAL(conf.nb_vchans, info.nb_vchans, ++ "Configure nb_vchans not match"); ++ ++ return TEST_SUCCESS; ++} ++ ++static int ++check_direction(void) ++{ ++ struct rte_dma_vchan_conf vchan_conf; ++ int ret; ++ ++ /* Check for direction */ ++ memset(&vchan_conf, 0, sizeof(vchan_conf)); ++ vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV + 1; ++ ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM - 1; ++ ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ++ /* Check for direction and dev_capa combination */ ++ memset(&vchan_conf, 0, sizeof(vchan_conf)); ++ vchan_conf.direction = RTE_DMA_DIR_MEM_TO_DEV; ++ ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ vchan_conf.direction = RTE_DMA_DIR_DEV_TO_MEM; ++ ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV; ++ ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ++ return 0; ++} ++ ++static int ++check_port_type(struct rte_dma_info *dev_info) ++{ ++ struct rte_dma_vchan_conf vchan_conf; ++ int ret; ++ ++ /* Check src port type validation */ ++ memset(&vchan_conf, 0, sizeof(vchan_conf)); ++ vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM; ++ vchan_conf.nb_desc = dev_info->min_desc; ++ vchan_conf.src_port.port_type = RTE_DMA_PORT_PCIE; ++ ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ++ /* Check dst port type validation */ ++ memset(&vchan_conf, 0, sizeof(vchan_conf)); ++ vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM; ++ vchan_conf.nb_desc = dev_info->min_desc; ++ vchan_conf.dst_port.port_type = RTE_DMA_PORT_PCIE; ++ ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ++ return 0; ++} ++ ++static int ++test_dma_vchan_setup(void) ++{ ++ struct rte_dma_vchan_conf vchan_conf = { 0 }; ++ struct rte_dma_conf dev_conf = { 0 }; ++ struct rte_dma_info dev_info = { 0 }; ++ int ret; ++ ++ /* Check for invalid parameters */ ++ ret = rte_dma_vchan_setup(invalid_dev_id, 0, &vchan_conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ret = rte_dma_vchan_setup(test_dev_id, 0, NULL); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ++ /* Make sure configure success */ ++ ret = rte_dma_info_get(test_dev_id, &dev_info); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info"); ++ dev_conf.nb_vchans = dev_info.max_vchans; ++ ret = rte_dma_configure(test_dev_id, &dev_conf); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret); ++ ++ /* Check for invalid vchan */ ++ ret = rte_dma_vchan_setup(test_dev_id, dev_conf.nb_vchans, &vchan_conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ++ /* Check for direction */ ++ ret = check_direction(); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to check direction"); ++ ++ /* Check for nb_desc validation */ ++ memset(&vchan_conf, 0, sizeof(vchan_conf)); ++ vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM; ++ vchan_conf.nb_desc = dev_info.min_desc - 1; ++ ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ vchan_conf.nb_desc = dev_info.max_desc + 1; ++ ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ++ /* Check port type */ ++ ret = check_port_type(&dev_info); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to check port type"); ++ ++ /* Check vchan setup success */ ++ memset(&vchan_conf, 0, sizeof(vchan_conf)); ++ vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM; ++ vchan_conf.nb_desc = dev_info.min_desc; ++ ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret); ++ ++ return TEST_SUCCESS; ++} ++ ++static int ++setup_one_vchan(void) ++{ ++ struct rte_dma_vchan_conf vchan_conf = { 0 }; ++ struct rte_dma_info dev_info = { 0 }; ++ struct rte_dma_conf dev_conf = { 0 }; ++ int ret; ++ ++ ret = rte_dma_info_get(test_dev_id, &dev_info); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret); ++ dev_conf.nb_vchans = dev_info.max_vchans; ++ ret = rte_dma_configure(test_dev_id, &dev_conf); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure, %d", ret); ++ vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM; ++ vchan_conf.nb_desc = dev_info.min_desc; ++ ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret); ++ ++ return TEST_SUCCESS; ++} ++ ++static int ++test_dma_start_stop(void) ++{ ++ struct rte_dma_vchan_conf vchan_conf = { 0 }; ++ struct rte_dma_conf dev_conf = { 0 }; ++ int ret; ++ ++ /* Check for invalid parameters */ ++ ret = rte_dma_start(invalid_dev_id); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ret = rte_dma_stop(invalid_dev_id); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ++ /* Setup one vchan for later test */ ++ ret = setup_one_vchan(); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret); ++ ++ ret = rte_dma_start(test_dev_id); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret); ++ ++ /* Check reconfigure and vchan setup when device started */ ++ ret = rte_dma_configure(test_dev_id, &dev_conf); ++ RTE_TEST_ASSERT(ret == -EBUSY, "Failed to configure, %d", ret); ++ ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf); ++ RTE_TEST_ASSERT(ret == -EBUSY, "Failed to setup vchan, %d", ret); ++ ++ ret = rte_dma_stop(test_dev_id); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret); ++ ++ return TEST_SUCCESS; ++} ++ ++static int ++test_dma_stats(void) ++{ ++ struct rte_dma_info dev_info = { 0 }; ++ struct rte_dma_stats stats = { 0 }; ++ int ret; ++ ++ /* Check for invalid parameters */ ++ ret = rte_dma_stats_get(invalid_dev_id, 0, &stats); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ret = rte_dma_stats_get(invalid_dev_id, 0, NULL); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ret = rte_dma_stats_reset(invalid_dev_id, 0); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ++ /* Setup one vchan for later test */ ++ ret = setup_one_vchan(); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret); ++ ++ /* Check for invalid vchan */ ++ ret = rte_dma_info_get(test_dev_id, &dev_info); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret); ++ ret = rte_dma_stats_get(test_dev_id, dev_info.max_vchans, &stats); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ret = rte_dma_stats_reset(test_dev_id, dev_info.max_vchans); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); ++ ++ /* Check for valid vchan */ ++ ret = rte_dma_stats_get(test_dev_id, 0, &stats); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get stats, %d", ret); ++ ret = rte_dma_stats_get(test_dev_id, RTE_DMA_ALL_VCHAN, &stats); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get all stats, %d", ret); ++ ret = rte_dma_stats_reset(test_dev_id, 0); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset stats, %d", ret); ++ ret = rte_dma_stats_reset(test_dev_id, RTE_DMA_ALL_VCHAN); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset all stats, %d", ret); ++ ++ return TEST_SUCCESS; ++} ++ ++static int ++test_dma_dump(void) ++{ ++ int ret; ++ ++ /* Check for invalid parameters */ ++ ret = rte_dma_dump(invalid_dev_id, stderr); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret); ++ ret = rte_dma_dump(test_dev_id, NULL); ++ RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret); ++ ++ return TEST_SUCCESS; ++} ++ ++static void ++setup_memory(void) ++{ ++ int i; ++ ++ for (i = 0; i < TEST_MEMCPY_SIZE; i++) ++ src[i] = (char)i; ++ memset(dst, 0, TEST_MEMCPY_SIZE); ++} ++ ++static int ++verify_memory(void) ++{ ++ int i; ++ ++ for (i = 0; i < TEST_MEMCPY_SIZE; i++) { ++ if (src[i] == dst[i]) ++ continue; ++ RTE_TEST_ASSERT_EQUAL(src[i], dst[i], ++ "Failed to copy memory, %d %d", src[i], dst[i]); ++ } ++ ++ return 0; ++} ++ ++static int ++test_dma_completed(void) ++{ ++ uint16_t last_idx = 1; ++ bool has_error = true; ++ uint16_t cpl_ret; ++ int ret; ++ ++ /* Setup one vchan for later test */ ++ ret = setup_one_vchan(); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret); ++ ++ ret = rte_dma_start(test_dev_id); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret); ++ ++ setup_memory(); ++ ++ /* Check enqueue without submit */ ++ ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst, ++ TEST_MEMCPY_SIZE, 0); ++ RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret); ++ rte_delay_us_sleep(TEST_WAIT_US_VAL); ++ cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error); ++ RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to get completed"); ++ ++ /* Check add submit */ ++ ret = rte_dma_submit(test_dev_id, 0); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to submit, %d", ret); ++ rte_delay_us_sleep(TEST_WAIT_US_VAL); ++ cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error); ++ RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed"); ++ RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u", ++ last_idx); ++ RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error"); ++ ret = verify_memory(); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to verify memory"); ++ ++ setup_memory(); ++ ++ /* Check for enqueue with submit */ ++ ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst, ++ TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT); ++ RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret); ++ rte_delay_us_sleep(TEST_WAIT_US_VAL); ++ cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error); ++ RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed"); ++ RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u", ++ last_idx); ++ RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error"); ++ ret = verify_memory(); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to verify memory"); ++ ++ /* Stop dmadev to make sure dmadev to a known state */ ++ ret = rte_dma_stop(test_dev_id); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret); ++ ++ return TEST_SUCCESS; ++} ++ ++static int ++test_dma_completed_status(void) ++{ ++ enum rte_dma_status_code status[1] = { 1 }; ++ uint16_t last_idx = 1; ++ uint16_t cpl_ret, i; ++ int ret; ++ ++ /* Setup one vchan for later test */ ++ ret = setup_one_vchan(); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret); ++ ++ ret = rte_dma_start(test_dev_id); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret); ++ ++ /* Check for enqueue with submit */ ++ ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst, ++ TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT); ++ RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret); ++ rte_delay_us_sleep(TEST_WAIT_US_VAL); ++ cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx, ++ status); ++ RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status"); ++ RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u", ++ last_idx); ++ for (i = 0; i < RTE_DIM(status); i++) ++ RTE_TEST_ASSERT_EQUAL(status[i], 0, ++ "Failed to completed status, %d", status[i]); ++ ++ /* Check do completed status again */ ++ cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx, ++ status); ++ RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to completed status"); ++ ++ /* Check for enqueue with submit again */ ++ ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst, ++ TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT); ++ RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret); ++ rte_delay_us_sleep(TEST_WAIT_US_VAL); ++ cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx, ++ status); ++ RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status"); ++ RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u", ++ last_idx); ++ for (i = 0; i < RTE_DIM(status); i++) ++ RTE_TEST_ASSERT_EQUAL(status[i], 0, ++ "Failed to completed status, %d", status[i]); ++ ++ /* Stop dmadev to make sure dmadev to a known state */ ++ ret = rte_dma_stop(test_dev_id); ++ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret); ++ ++ return TEST_SUCCESS; ++} ++ ++int ++test_dma_api(uint16_t dev_id) ++{ ++ int ret = testsuite_setup(dev_id); ++ if (ret) { ++ printf("testsuite setup fail!\n"); ++ return -1; ++ } ++ ++ /* If the testcase exit successfully, ensure that the test dmadev exist ++ * and the dmadev is in the stopped state. ++ */ ++ DMA_TEST_API_RUN(test_dma_get_dev_id_by_name); ++ DMA_TEST_API_RUN(test_dma_is_valid_dev); ++ DMA_TEST_API_RUN(test_dma_count); ++ DMA_TEST_API_RUN(test_dma_info_get); ++ DMA_TEST_API_RUN(test_dma_configure); ++ DMA_TEST_API_RUN(test_dma_vchan_setup); ++ DMA_TEST_API_RUN(test_dma_start_stop); ++ DMA_TEST_API_RUN(test_dma_stats); ++ DMA_TEST_API_RUN(test_dma_dump); ++ DMA_TEST_API_RUN(test_dma_completed); ++ DMA_TEST_API_RUN(test_dma_completed_status); ++ ++ testsuite_teardown(); ++ ++ printf("Total tests : %d\n", total); ++ printf("Passed : %d\n", passed); ++ printf("Failed : %d\n", failed); ++ ++ if (failed) ++ return -1; ++ ++ return 0; ++}; +diff --git a/app/test/test_dmadev_api.h b/app/test/test_dmadev_api.h +new file mode 100644 +index 000000000..33fbc5bd4 +--- /dev/null ++++ b/app/test/test_dmadev_api.h +@@ -0,0 +1,5 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(c) 2021 HiSilicon Limited ++ */ ++ ++int test_dma_api(uint16_t dev_id); +diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md +index 748514e24..8dc353300 100644 +--- a/doc/api/doxy-api-index.md ++++ b/doc/api/doxy-api-index.md +@@ -21,6 +21,7 @@ The public API headers are grouped by topics: + [compressdev] (@ref rte_compressdev.h), + [compress] (@ref rte_comp.h), + [regexdev] (@ref rte_regexdev.h), ++ [dmadev] (@ref rte_dmadev.h), + [eventdev] (@ref rte_eventdev.h), + [event_eth_rx_adapter] (@ref rte_event_eth_rx_adapter.h), + [event_eth_tx_adapter] (@ref rte_event_eth_tx_adapter.h), +diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in +index 5c883b613..3b2c53426 100644 +--- a/doc/api/doxy-api.conf.in ++++ b/doc/api/doxy-api.conf.in +@@ -35,6 +35,7 @@ INPUT = @TOPDIR@/doc/api/doxy-api-index.md \ + @TOPDIR@/lib/librte_compressdev \ + @TOPDIR@/lib/librte_cryptodev \ + @TOPDIR@/lib/librte_distributor \ ++ @TOPDIR@/lib/librte_dmadev \ + @TOPDIR@/lib/librte_efd \ + @TOPDIR@/lib/librte_ethdev \ + @TOPDIR@/lib/librte_eventdev \ +diff --git a/doc/guides/dmadevs/index.rst b/doc/guides/dmadevs/index.rst +new file mode 100644 +index 000000000..0bce29d76 +--- /dev/null ++++ b/doc/guides/dmadevs/index.rst +@@ -0,0 +1,12 @@ ++.. SPDX-License-Identifier: BSD-3-Clause ++ Copyright 2021 HiSilicon Limited ++ ++DMA Device Drivers ++================== ++ ++The following are a list of DMA device drivers, which can be used from ++an application through DMA API. ++ ++.. toctree:: ++ :maxdepth: 2 ++ :numbered: +diff --git a/doc/guides/index.rst b/doc/guides/index.rst +index 857f0363d..919825992 100644 +--- a/doc/guides/index.rst ++++ b/doc/guides/index.rst +@@ -21,6 +21,7 @@ DPDK documentation + compressdevs/index + vdpadevs/index + regexdevs/index ++ dmadevs/index + eventdevs/index + rawdevs/index + mempool/index +diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst +new file mode 100644 +index 000000000..32f714786 +--- /dev/null ++++ b/doc/guides/prog_guide/dmadev.rst +@@ -0,0 +1,90 @@ ++.. SPDX-License-Identifier: BSD-3-Clause ++ Copyright 2021 HiSilicon Limited ++ ++DMA Device Library ++================== ++ ++The DMA library provides a DMA device framework for management and provisioning ++of hardware and software DMA poll mode drivers, defining generic API which ++support a number of different DMA operations. ++ ++ ++Design Principles ++----------------- ++ ++The DMA framework provides a generic DMA device framework which supports both ++physical (hardware) and virtual (software) DMA devices, as well as a generic DMA ++API which allows DMA devices to be managed and configured, and supports DMA ++operations to be provisioned on DMA poll mode driver. ++ ++.. _figure_dmadev: ++ ++.. figure:: img/dmadev.* ++ ++The above figure shows the model on which the DMA framework is built on: ++ ++ * The DMA controller could have multiple hardware DMA channels (aka. hardware ++ DMA queues), each hardware DMA channel should be represented by a dmadev. ++ * The dmadev could create multiple virtual DMA channels, each virtual DMA ++ channel represents a different transfer context. ++ * The DMA operation request must be submitted to the virtual DMA channel. ++ ++ ++Device Management ++----------------- ++ ++Device Creation ++~~~~~~~~~~~~~~~ ++ ++Physical DMA controllers are discovered during the PCI probe/enumeration of the ++EAL function which is executed at DPDK initialization, this is based on their ++PCI BDF (bus/bridge, device, function). Specific physical DMA controllers, like ++other physical devices in DPDK can be listed using the EAL command line options. ++ ++The dmadevs are dynamically allocated by using the function ++``rte_dma_pmd_allocate`` based on the number of hardware DMA channels. ++ ++ ++Device Identification ++~~~~~~~~~~~~~~~~~~~~~ ++ ++Each DMA device, whether physical or virtual is uniquely designated by two ++identifiers: ++ ++- A unique device index used to designate the DMA device in all functions ++ exported by the DMA API. ++ ++- A device name used to designate the DMA device in console messages, for ++ administration or debugging purposes. ++ ++ ++Device Features and Capabilities ++-------------------------------- ++ ++DMA devices may support different feature sets. The ``rte_dma_info_get`` API ++can be used to get the device info and supported features. ++ ++Silent mode is a special device capability which does not require the ++application to invoke dequeue APIs. ++ ++ ++Enqueue / Dequeue APIs ++~~~~~~~~~~~~~~~~~~~~~~ ++ ++Enqueue APIs such as ``rte_dma_copy`` and ``rte_dma_fill`` can be used to ++enqueue operations to hardware. If an enqueue is successful, a ``ring_idx`` is ++returned. This ``ring_idx`` can be used by applications to track per operation ++metadata in an application-defined circular ring. ++ ++The ``rte_dma_submit`` API is used to issue doorbell to hardware. ++Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the enqueue ++APIs to also issue the doorbell to hardware. ++ ++There are two dequeue APIs ``rte_dma_completed`` and ++``rte_dma_completed_status``, these are used to obtain the results of the ++enqueue requests. ``rte_dma_completed`` will return the number of successfully ++completed operations. ``rte_dma_completed_status`` will return the number of ++completed operations along with the status of each operation (filled into the ++``status`` array passed by user). These two APIs can also return the last ++completed operation's ``ring_idx`` which could help user track operations within ++their own application-defined rings. +diff --git a/doc/guides/prog_guide/img/dmadev.svg b/doc/guides/prog_guide/img/dmadev.svg +new file mode 100644 +index 000000000..157d7eb7d +--- /dev/null ++++ b/doc/guides/prog_guide/img/dmadev.svg +@@ -0,0 +1,283 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ virtual DMA channel ++ ++ virtual DMA channel ++ ++ virtual DMA channel ++ ++ ++ dmadev ++ ++ hardware DMA channel ++ ++ hardware DMA channel ++ ++ hardware DMA controller ++ ++ dmadev ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst +index 45c7dec88..dc60db9be 100644 +--- a/doc/guides/prog_guide/index.rst ++++ b/doc/guides/prog_guide/index.rst +@@ -27,6 +27,7 @@ Programmer's Guide + cryptodev_lib + compressdev + regexdev ++ dmadev + rte_security + rawdev + link_bonding_poll_mode_drv_lib +diff --git a/drivers/dma/hisilicon/hisi_dmadev.c b/drivers/dma/hisilicon/hisi_dmadev.c +new file mode 100644 +index 000000000..cf5bc6dc9 +--- /dev/null ++++ b/drivers/dma/hisilicon/hisi_dmadev.c +@@ -0,0 +1,925 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(C) 2021 HiSilicon Limited ++ */ ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "hisi_dmadev.h" ++ ++RTE_LOG_REGISTER(hisi_dma_logtype, pmd.dma.hisi, INFO); ++#define HISI_DMA_LOG(level, fmt, args...) \ ++ rte_log(RTE_LOG_ ## level, hisi_dma_logtype, \ ++ "%s(): " fmt "\n", __func__, ##args) ++#define HISI_DMA_LOG_RAW(hw, level, fmt, args...) \ ++ rte_log(RTE_LOG_ ## level, hisi_dma_logtype, \ ++ "%s %s(): " fmt "\n", (hw)->data->dev_name, \ ++ __func__, ##args) ++#define HISI_DMA_DEBUG(hw, fmt, args...) \ ++ HISI_DMA_LOG_RAW(hw, DEBUG, fmt, ## args) ++#define HISI_DMA_INFO(hw, fmt, args...) \ ++ HISI_DMA_LOG_RAW(hw, INFO, fmt, ## args) ++#define HISI_DMA_WARN(hw, fmt, args...) \ ++ HISI_DMA_LOG_RAW(hw, WARNING, fmt, ## args) ++#define HISI_DMA_ERR(hw, fmt, args...) \ ++ HISI_DMA_LOG_RAW(hw, ERR, fmt, ## args) ++ ++static uint32_t ++hisi_dma_queue_base(struct hisi_dma_dev *hw) ++{ ++ if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) ++ return HISI_DMA_HIP08_QUEUE_BASE; ++ else ++ return 0; ++} ++ ++static volatile void * ++hisi_dma_queue_regaddr(struct hisi_dma_dev *hw, uint32_t qoff) ++{ ++ uint32_t off = hisi_dma_queue_base(hw) + ++ hw->queue_id * HISI_DMA_QUEUE_REGION_SIZE + qoff; ++ return (volatile void *)((char *)hw->io_base + off); ++} ++ ++static void ++hisi_dma_write_reg(void *base, uint32_t off, uint32_t val) ++{ ++ rte_write32(rte_cpu_to_le_32(val), ++ (volatile void *)((char *)base + off)); ++} ++ ++static void ++hisi_dma_write_dev(struct hisi_dma_dev *hw, uint32_t off, uint32_t val) ++{ ++ hisi_dma_write_reg(hw->io_base, off, val); ++} ++ ++static void ++hisi_dma_write_queue(struct hisi_dma_dev *hw, uint32_t qoff, uint32_t val) ++{ ++ uint32_t off = hisi_dma_queue_base(hw) + ++ hw->queue_id * HISI_DMA_QUEUE_REGION_SIZE + qoff; ++ hisi_dma_write_dev(hw, off, val); ++} ++ ++static uint32_t ++hisi_dma_read_reg(void *base, uint32_t off) ++{ ++ uint32_t val = rte_read32((volatile void *)((char *)base + off)); ++ return rte_le_to_cpu_32(val); ++} ++ ++static uint32_t ++hisi_dma_read_dev(struct hisi_dma_dev *hw, uint32_t off) ++{ ++ return hisi_dma_read_reg(hw->io_base, off); ++} ++ ++static uint32_t ++hisi_dma_read_queue(struct hisi_dma_dev *hw, uint32_t qoff) ++{ ++ uint32_t off = hisi_dma_queue_base(hw) + ++ hw->queue_id * HISI_DMA_QUEUE_REGION_SIZE + qoff; ++ return hisi_dma_read_dev(hw, off); ++} ++ ++static void ++hisi_dma_update_bit(struct hisi_dma_dev *hw, uint32_t off, uint32_t pos, ++ bool set) ++{ ++ uint32_t tmp = hisi_dma_read_dev(hw, off); ++ uint32_t mask = 1u << pos; ++ tmp = set ? tmp | mask : tmp & ~mask; ++ hisi_dma_write_dev(hw, off, tmp); ++} ++ ++static void ++hisi_dma_update_queue_bit(struct hisi_dma_dev *hw, uint32_t qoff, uint32_t pos, ++ bool set) ++{ ++ uint32_t tmp = hisi_dma_read_queue(hw, qoff); ++ uint32_t mask = 1u << pos; ++ tmp = set ? tmp | mask : tmp & ~mask; ++ hisi_dma_write_queue(hw, qoff, tmp); ++} ++ ++static void ++hisi_dma_update_queue_mbit(struct hisi_dma_dev *hw, uint32_t qoff, ++ uint32_t mask, bool set) ++{ ++ uint32_t tmp = hisi_dma_read_queue(hw, qoff); ++ tmp = set ? tmp | mask : tmp & ~mask; ++ hisi_dma_write_queue(hw, qoff, tmp); ++} ++ ++#define hisi_dma_poll_hw_state(hw, val, cond, sleep_us, timeout_us) ({ \ ++ uint32_t timeout = 0; \ ++ while (timeout++ <= (timeout_us)) { \ ++ (val) = hisi_dma_read_queue(hw, HISI_DMA_QUEUE_FSM_REG); \ ++ if (cond) \ ++ break; \ ++ rte_delay_us(sleep_us); \ ++ } \ ++ (cond) ? 0 : -ETIME; \ ++}) ++ ++static int ++hisi_dma_reset_hw(struct hisi_dma_dev *hw) ++{ ++#define POLL_SLEEP_US 100 ++#define POLL_TIMEOUT_US 10000 ++ ++ uint32_t tmp; ++ int ret; ++ ++ hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL0_REG, ++ HISI_DMA_QUEUE_CTRL0_PAUSE_B, true); ++ hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL0_REG, ++ HISI_DMA_QUEUE_CTRL0_EN_B, false); ++ ++ ret = hisi_dma_poll_hw_state(hw, tmp, ++ FIELD_GET(HISI_DMA_QUEUE_FSM_STS_M, tmp) != HISI_DMA_STATE_RUN, ++ POLL_SLEEP_US, POLL_TIMEOUT_US); ++ if (ret) { ++ HISI_DMA_ERR(hw, "disable dma timeout!"); ++ return ret; ++ } ++ ++ hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL1_REG, ++ HISI_DMA_QUEUE_CTRL1_RESET_B, true); ++ hisi_dma_write_queue(hw, HISI_DMA_QUEUE_SQ_TAIL_REG, 0); ++ hisi_dma_write_queue(hw, HISI_DMA_QUEUE_CQ_HEAD_REG, 0); ++ hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL0_REG, ++ HISI_DMA_QUEUE_CTRL0_PAUSE_B, false); ++ ++ ret = hisi_dma_poll_hw_state(hw, tmp, ++ FIELD_GET(HISI_DMA_QUEUE_FSM_STS_M, tmp) == HISI_DMA_STATE_IDLE, ++ POLL_SLEEP_US, POLL_TIMEOUT_US); ++ if (ret) { ++ HISI_DMA_ERR(hw, "reset dma timeout!"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void ++hisi_dma_init_hw(struct hisi_dma_dev *hw) ++{ ++ hisi_dma_write_queue(hw, HISI_DMA_QUEUE_SQ_BASE_L_REG, ++ lower_32_bits(hw->sqe_iova)); ++ hisi_dma_write_queue(hw, HISI_DMA_QUEUE_SQ_BASE_H_REG, ++ upper_32_bits(hw->sqe_iova)); ++ hisi_dma_write_queue(hw, HISI_DMA_QUEUE_CQ_BASE_L_REG, ++ lower_32_bits(hw->cqe_iova)); ++ hisi_dma_write_queue(hw, HISI_DMA_QUEUE_CQ_BASE_H_REG, ++ upper_32_bits(hw->cqe_iova)); ++ hisi_dma_write_queue(hw, HISI_DMA_QUEUE_SQ_DEPTH_REG, ++ hw->sq_depth_mask); ++ hisi_dma_write_queue(hw, HISI_DMA_QUEUE_CQ_DEPTH_REG, hw->cq_depth - 1); ++ hisi_dma_write_queue(hw, HISI_DMA_QUEUE_SQ_TAIL_REG, 0); ++ hisi_dma_write_queue(hw, HISI_DMA_QUEUE_CQ_HEAD_REG, 0); ++ hisi_dma_write_queue(hw, HISI_DMA_QUEUE_ERR_INT_NUM0_REG, 0); ++ hisi_dma_write_queue(hw, HISI_DMA_QUEUE_ERR_INT_NUM1_REG, 0); ++ hisi_dma_write_queue(hw, HISI_DMA_QUEUE_ERR_INT_NUM2_REG, 0); ++ ++ if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) { ++ hisi_dma_write_queue(hw, HISI_DMA_HIP08_QUEUE_ERR_INT_NUM3_REG, ++ 0); ++ hisi_dma_write_queue(hw, HISI_DMA_HIP08_QUEUE_ERR_INT_NUM4_REG, ++ 0); ++ hisi_dma_write_queue(hw, HISI_DMA_HIP08_QUEUE_ERR_INT_NUM5_REG, ++ 0); ++ hisi_dma_write_queue(hw, HISI_DMA_HIP08_QUEUE_ERR_INT_NUM6_REG, ++ 0); ++ hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL0_REG, ++ HISI_DMA_HIP08_QUEUE_CTRL0_ERR_ABORT_B, false); ++ hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_STATUS_REG, ++ HISI_DMA_HIP08_QUEUE_INT_MASK_M, true); ++ hisi_dma_update_queue_mbit(hw, ++ HISI_DMA_HIP08_QUEUE_INT_MASK_REG, ++ HISI_DMA_HIP08_QUEUE_INT_MASK_M, true); ++ } ++} ++ ++static void ++hisi_dma_init_gbl(void *pci_bar, uint8_t revision) ++{ ++ struct hisi_dma_dev hw; ++ ++ memset(&hw, 0, sizeof(hw)); ++ hw.io_base = pci_bar; ++ ++ if (revision == HISI_DMA_REVISION_HIP08B) ++ hisi_dma_update_bit(&hw, HISI_DMA_HIP08_MODE_REG, ++ HISI_DMA_HIP08_MODE_SEL_B, true); ++} ++ ++static uint8_t ++hisi_dma_reg_layout(uint8_t revision) ++{ ++ if (revision == HISI_DMA_REVISION_HIP08B) ++ return HISI_DMA_REG_LAYOUT_HIP08; ++ else ++ return HISI_DMA_REG_LAYOUT_INVALID; ++} ++ ++static void ++hisi_dma_zero_iomem(struct hisi_dma_dev *hw) ++{ ++ memset(hw->iomz->addr, 0, hw->iomz_sz); ++} ++ ++static int ++hisi_dma_alloc_iomem(struct hisi_dma_dev *hw, uint16_t ring_size, ++ const char *dev_name) ++{ ++ uint32_t sq_size = sizeof(struct hisi_dma_sqe) * ring_size; ++ uint32_t cq_size = sizeof(struct hisi_dma_cqe) * ++ (ring_size + HISI_DMA_CQ_RESERVED); ++ uint32_t status_size = sizeof(uint16_t) * ring_size; ++ char mz_name[RTE_MEMZONE_NAMESIZE]; ++ const struct rte_memzone *iomz; ++ uint32_t total_size; ++ ++ sq_size = RTE_CACHE_LINE_ROUNDUP(sq_size); ++ cq_size = RTE_CACHE_LINE_ROUNDUP(cq_size); ++ status_size = RTE_CACHE_LINE_ROUNDUP(status_size); ++ total_size = sq_size + cq_size + status_size; ++ ++ (void)snprintf(mz_name, sizeof(mz_name), "hisi_dma:%s", dev_name); ++ iomz = rte_memzone_reserve(mz_name, total_size, hw->data->numa_node, ++ RTE_MEMZONE_IOVA_CONTIG); ++ if (iomz == NULL) { ++ HISI_DMA_ERR(hw, "malloc %s iomem fail!", mz_name); ++ return -ENOMEM; ++ } ++ ++ hw->iomz = iomz; ++ hw->iomz_sz = total_size; ++ hw->sqe = iomz->addr; ++ hw->cqe = (void *)((char *)iomz->addr + sq_size); ++ hw->status = (void *)((char *)iomz->addr + sq_size + cq_size); ++ hw->sqe_iova = iomz->iova; ++ hw->cqe_iova = iomz->iova + sq_size; ++ hw->sq_depth_mask = ring_size - 1; ++ hw->cq_depth = ring_size + HISI_DMA_CQ_RESERVED; ++ hisi_dma_zero_iomem(hw); ++ ++ return 0; ++} ++ ++static void ++hisi_dma_free_iomem(struct hisi_dma_dev *hw) ++{ ++ if (hw->iomz != NULL) ++ rte_memzone_free(hw->iomz); ++ ++ hw->iomz = NULL; ++ hw->sqe = NULL; ++ hw->cqe = NULL; ++ hw->status = NULL; ++ hw->sqe_iova = 0; ++ hw->cqe_iova = 0; ++ hw->sq_depth_mask = 0; ++ hw->cq_depth = 0; ++} ++ ++static int ++hisi_dma_info_get(const struct rte_dma_dev *dev, ++ struct rte_dma_info *dev_info, ++ uint32_t info_sz) ++{ ++ RTE_SET_USED(dev); ++ RTE_SET_USED(info_sz); ++ ++ dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | ++ RTE_DMA_CAPA_OPS_COPY; ++ dev_info->max_vchans = 1; ++ dev_info->max_desc = HISI_DMA_MAX_DESC_NUM; ++ dev_info->min_desc = HISI_DMA_MIN_DESC_NUM; ++ ++ return 0; ++} ++ ++static int ++hisi_dma_configure(struct rte_dma_dev *dev, ++ const struct rte_dma_conf *conf, ++ uint32_t conf_sz) ++{ ++ RTE_SET_USED(dev); ++ RTE_SET_USED(conf); ++ RTE_SET_USED(conf_sz); ++ return 0; ++} ++ ++static int ++hisi_dma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, ++ const struct rte_dma_vchan_conf *conf, ++ uint32_t conf_sz) ++{ ++ struct hisi_dma_dev *hw = dev->data->dev_private; ++ int ret; ++ ++ RTE_SET_USED(vchan); ++ RTE_SET_USED(conf_sz); ++ ++ if (!rte_is_power_of_2(conf->nb_desc)) { ++ HISI_DMA_ERR(hw, "Number of desc must be power of 2!"); ++ return -EINVAL; ++ } ++ ++ hisi_dma_free_iomem(hw); ++ ret = hisi_dma_alloc_iomem(hw, conf->nb_desc, dev->data->dev_name); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static int ++hisi_dma_start(struct rte_dma_dev *dev) ++{ ++ struct hisi_dma_dev *hw = dev->data->dev_private; ++ ++ if (hw->iomz == NULL) { ++ HISI_DMA_ERR(hw, "Vchan was not setup, start fail!\n"); ++ return -EINVAL; ++ } ++ ++ /* Reset the dmadev to a known state, include: ++ * 1) zero iomem, also include status fields. ++ * 2) init hardware register. ++ * 3) init index values to zero. ++ * 4) init running statistics. ++ */ ++ hisi_dma_zero_iomem(hw); ++ hisi_dma_init_hw(hw); ++ hw->ridx = 0; ++ hw->cridx = 0; ++ hw->sq_head = 0; ++ hw->sq_tail = 0; ++ hw->cq_sq_head = 0; ++ hw->cq_head = 0; ++ hw->cqs_completed = 0; ++ hw->cqe_vld = 1; ++ hw->submitted = 0; ++ hw->completed = 0; ++ hw->errors = 0; ++ ++ hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL0_REG, ++ HISI_DMA_QUEUE_CTRL0_EN_B, true); ++ ++ return 0; ++} ++ ++static int ++hisi_dma_stop(struct rte_dma_dev *dev) ++{ ++ return hisi_dma_reset_hw(dev->data->dev_private); ++} ++ ++static int ++hisi_dma_close(struct rte_dma_dev *dev) ++{ ++ if (rte_eal_process_type() == RTE_PROC_PRIMARY) { ++ /* The dmadev already stopped */ ++ hisi_dma_free_iomem(dev->data->dev_private); ++ } ++ return 0; ++} ++ ++static int ++hisi_dma_stats_get(const struct rte_dma_dev *dev, uint16_t vchan, ++ struct rte_dma_stats *stats, ++ uint32_t stats_sz) ++{ ++ struct hisi_dma_dev *hw = dev->data->dev_private; ++ ++ RTE_SET_USED(vchan); ++ RTE_SET_USED(stats_sz); ++ stats->submitted = hw->submitted; ++ stats->completed = hw->completed; ++ stats->errors = hw->errors; ++ ++ return 0; ++} ++ ++static int ++hisi_dma_stats_reset(struct rte_dma_dev *dev, uint16_t vchan) ++{ ++ struct hisi_dma_dev *hw = dev->data->dev_private; ++ ++ RTE_SET_USED(vchan); ++ hw->submitted = 0; ++ hw->completed = 0; ++ hw->errors = 0; ++ ++ return 0; ++} ++ ++static void ++hisi_dma_get_dump_range(struct hisi_dma_dev *hw, uint32_t *start, uint32_t *end) ++{ ++ if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) { ++ *start = HISI_DMA_HIP08_DUMP_START_REG; ++ *end = HISI_DMA_HIP08_DUMP_END_REG; ++ } else { ++ *start = 0; ++ *end = 0; ++ } ++} ++ ++static void ++hisi_dma_dump_common(struct hisi_dma_dev *hw, FILE *f) ++{ ++#define DUMP_REGNUM_PER_LINE 4 ++ ++ uint32_t start, end; ++ uint32_t cnt, i; ++ ++ hisi_dma_get_dump_range(hw, &start, &end); ++ ++ (void)fprintf(f, " common-register:\n"); ++ ++ cnt = 0; ++ for (i = start; i <= end; i += sizeof(uint32_t)) { ++ if (cnt % DUMP_REGNUM_PER_LINE == 0) ++ (void)fprintf(f, " [%4x]:", i); ++ (void)fprintf(f, " 0x%08x", hisi_dma_read_dev(hw, i)); ++ cnt++; ++ if (cnt % DUMP_REGNUM_PER_LINE == 0) ++ (void)fprintf(f, "\n"); ++ } ++ if (cnt % DUMP_REGNUM_PER_LINE) ++ (void)fprintf(f, "\n"); ++} ++ ++static void ++hisi_dma_dump_read_queue(struct hisi_dma_dev *hw, uint32_t qoff, ++ char *buffer, int max_sz) ++{ ++ memset(buffer, 0, max_sz); ++ ++ /* Address-related registers are not printed for security reasons. */ ++ if (qoff == HISI_DMA_QUEUE_SQ_BASE_L_REG || ++ qoff == HISI_DMA_QUEUE_SQ_BASE_H_REG || ++ qoff == HISI_DMA_QUEUE_CQ_BASE_L_REG || ++ qoff == HISI_DMA_QUEUE_CQ_BASE_H_REG) { ++ (void)snprintf(buffer, max_sz, "**********"); ++ return; ++ } ++ ++ (void)snprintf(buffer, max_sz, "0x%08x", hisi_dma_read_queue(hw, qoff)); ++} ++ ++static void ++hisi_dma_dump_queue(struct hisi_dma_dev *hw, FILE *f) ++{ ++#define REG_FMT_LEN 32 ++ char buf[REG_FMT_LEN] = { 0 }; ++ uint32_t i; ++ ++ (void)fprintf(f, " queue-register:\n"); ++ for (i = 0; i < HISI_DMA_QUEUE_REGION_SIZE; ) { ++ hisi_dma_dump_read_queue(hw, i, buf, sizeof(buf)); ++ (void)fprintf(f, " [%2x]: %s", i, buf); ++ i += sizeof(uint32_t); ++ hisi_dma_dump_read_queue(hw, i, buf, sizeof(buf)); ++ (void)fprintf(f, " %s", buf); ++ i += sizeof(uint32_t); ++ hisi_dma_dump_read_queue(hw, i, buf, sizeof(buf)); ++ (void)fprintf(f, " %s", buf); ++ i += sizeof(uint32_t); ++ hisi_dma_dump_read_queue(hw, i, buf, sizeof(buf)); ++ (void)fprintf(f, " %s\n", buf); ++ i += sizeof(uint32_t); ++ } ++} ++ ++static int ++hisi_dma_dump(const struct rte_dma_dev *dev, FILE *f) ++{ ++ struct hisi_dma_dev *hw = dev->data->dev_private; ++ ++ (void)fprintf(f, ++ " revision: 0x%x queue_id: %u ring_size: %u\n" ++ " ridx: %u cridx: %u\n" ++ " sq_head: %u sq_tail: %u cq_sq_head: %u\n" ++ " cq_head: %u cqs_completed: %u cqe_vld: %u\n" ++ " submitted: %" PRIu64 " completed: %" PRIu64 " errors %" ++ PRIu64"\n", ++ hw->revision, hw->queue_id, ++ hw->sq_depth_mask > 0 ? hw->sq_depth_mask + 1 : 0, ++ hw->ridx, hw->cridx, ++ hw->sq_head, hw->sq_tail, hw->cq_sq_head, ++ hw->cq_head, hw->cqs_completed, hw->cqe_vld, ++ hw->submitted, hw->completed, hw->errors); ++ hisi_dma_dump_queue(hw, f); ++ hisi_dma_dump_common(hw, f); ++ ++ return 0; ++} ++ ++static int ++hisi_dma_copy(void *dev_private, uint16_t vchan, ++ rte_iova_t src, rte_iova_t dst, ++ uint32_t length, uint64_t flags) ++{ ++ struct hisi_dma_dev *hw = dev_private; ++ struct hisi_dma_sqe *sqe = &hw->sqe[hw->sq_tail]; ++ ++ RTE_SET_USED(vchan); ++ ++ if (((hw->sq_tail + 1) & hw->sq_depth_mask) == hw->sq_head) ++ return -ENOSPC; ++ ++ sqe->dw0 = rte_cpu_to_le_32(SQE_OPCODE_M2M); ++ sqe->dw1 = 0; ++ sqe->dw2 = 0; ++ sqe->length = rte_cpu_to_le_32(length); ++ sqe->src_addr = rte_cpu_to_le_64(src); ++ sqe->dst_addr = rte_cpu_to_le_64(dst); ++ hw->sq_tail = (hw->sq_tail + 1) & hw->sq_depth_mask; ++ hw->submitted++; ++ ++ if (flags & RTE_DMA_OP_FLAG_FENCE) ++ sqe->dw0 |= rte_cpu_to_le_32(SQE_FENCE_FLAG); ++ if (flags & RTE_DMA_OP_FLAG_SUBMIT) ++ rte_write32(rte_cpu_to_le_32(hw->sq_tail), hw->sq_tail_reg); ++ ++ return hw->ridx++; ++} ++ ++static int ++hisi_dma_submit(void *dev_private, uint16_t vchan) ++{ ++ struct hisi_dma_dev *hw = dev_private; ++ ++ RTE_SET_USED(vchan); ++ rte_write32(rte_cpu_to_le_32(hw->sq_tail), hw->sq_tail_reg); ++ ++ return 0; ++} ++ ++static inline void ++hisi_dma_scan_cq(struct hisi_dma_dev *hw) ++{ ++ volatile struct hisi_dma_cqe *cqe; ++ uint16_t csq_head = hw->cq_sq_head; ++ uint16_t cq_head = hw->cq_head; ++ uint16_t count = 0; ++ uint64_t misc; ++ ++ while (true) { ++ cqe = &hw->cqe[cq_head]; ++ misc = cqe->misc; ++ misc = rte_le_to_cpu_64(misc); ++ if (FIELD_GET(CQE_VALID_B, misc) != hw->cqe_vld) ++ break; ++ ++ csq_head = FIELD_GET(CQE_SQ_HEAD_MASK, misc); ++ if (unlikely(misc & CQE_STATUS_MASK)) ++ hw->status[csq_head] = FIELD_GET(CQE_STATUS_MASK, ++ misc); ++ ++ count++; ++ cq_head++; ++ if (cq_head == hw->cq_depth) { ++ hw->cqe_vld = !hw->cqe_vld; ++ cq_head = 0; ++ } ++ } ++ ++ if (count == 0) ++ return; ++ ++ hw->cq_head = cq_head; ++ hw->cq_sq_head = (csq_head + 1) & hw->sq_depth_mask; ++ hw->cqs_completed += count; ++ if (hw->cqs_completed >= HISI_DMA_CQ_RESERVED) { ++ rte_write32(rte_cpu_to_le_32(cq_head), hw->cq_head_reg); ++ hw->cqs_completed = 0; ++ } ++} ++ ++static inline uint16_t ++hisi_dma_calc_cpls(struct hisi_dma_dev *hw, const uint16_t nb_cpls) ++{ ++ uint16_t cpl_num; ++ ++ if (hw->cq_sq_head >= hw->sq_head) ++ cpl_num = hw->cq_sq_head - hw->sq_head; ++ else ++ cpl_num = hw->sq_depth_mask + 1 - hw->sq_head + hw->cq_sq_head; ++ ++ if (cpl_num > nb_cpls) ++ cpl_num = nb_cpls; ++ ++ return cpl_num; ++} ++ ++static uint16_t ++hisi_dma_completed(void *dev_private, ++ uint16_t vchan, const uint16_t nb_cpls, ++ uint16_t *last_idx, bool *has_error) ++{ ++ struct hisi_dma_dev *hw = dev_private; ++ uint16_t sq_head = hw->sq_head; ++ uint16_t cpl_num, i; ++ ++ RTE_SET_USED(vchan); ++ hisi_dma_scan_cq(hw); ++ ++ cpl_num = hisi_dma_calc_cpls(hw, nb_cpls); ++ for (i = 0; i < cpl_num; i++) { ++ if (hw->status[sq_head]) { ++ *has_error = true; ++ break; ++ } ++ sq_head = (sq_head + 1) & hw->sq_depth_mask; ++ } ++ if (i > 0) { ++ hw->cridx += i; ++ *last_idx = hw->cridx - 1; ++ hw->sq_head = sq_head; ++ } ++ hw->completed += i; ++ ++ return i; ++} ++ ++static enum rte_dma_status_code ++hisi_dma_convert_status(uint16_t status) ++{ ++ switch (status) { ++ case HISI_DMA_STATUS_SUCCESS: ++ return RTE_DMA_STATUS_SUCCESSFUL; ++ case HISI_DMA_STATUS_INVALID_OPCODE: ++ return RTE_DMA_STATUS_INVALID_OPCODE; ++ case HISI_DMA_STATUS_INVALID_LENGTH: ++ return RTE_DMA_STATUS_INVALID_LENGTH; ++ case HISI_DMA_STATUS_USER_ABORT: ++ return RTE_DMA_STATUS_USER_ABORT; ++ case HISI_DMA_STATUS_REMOTE_READ_ERROR: ++ case HISI_DMA_STATUS_AXI_READ_ERROR: ++ return RTE_DMA_STATUS_BUS_READ_ERROR; ++ case HISI_DMA_STATUS_AXI_WRITE_ERROR: ++ return RTE_DMA_STATUS_BUS_WRITE_ERROR; ++ case HISI_DMA_STATUS_DATA_POISON: ++ case HISI_DMA_STATUS_REMOTE_DATA_POISION: ++ return RTE_DMA_STATUS_DATA_POISION; ++ case HISI_DMA_STATUS_SQE_READ_ERROR: ++ case HISI_DMA_STATUS_SQE_READ_POISION: ++ return RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR; ++ case HISI_DMA_STATUS_LINK_DOWN_ERROR: ++ return RTE_DMA_STATUS_DEV_LINK_ERROR; ++ default: ++ return RTE_DMA_STATUS_ERROR_UNKNOWN; ++ } ++} ++ ++static uint16_t ++hisi_dma_completed_status(void *dev_private, ++ uint16_t vchan, const uint16_t nb_cpls, ++ uint16_t *last_idx, enum rte_dma_status_code *status) ++{ ++ struct hisi_dma_dev *hw = dev_private; ++ uint16_t sq_head = hw->sq_head; ++ uint16_t cpl_num, i; ++ ++ RTE_SET_USED(vchan); ++ hisi_dma_scan_cq(hw); ++ ++ cpl_num = hisi_dma_calc_cpls(hw, nb_cpls); ++ for (i = 0; i < cpl_num; i++) { ++ status[i] = hisi_dma_convert_status(hw->status[sq_head]); ++ hw->errors += !!status[i]; ++ hw->status[sq_head] = HISI_DMA_STATUS_SUCCESS; ++ sq_head = (sq_head + 1) & hw->sq_depth_mask; ++ } ++ if (likely(cpl_num > 0)) { ++ hw->cridx += cpl_num; ++ *last_idx = hw->cridx - 1; ++ hw->sq_head = sq_head; ++ } ++ hw->completed += cpl_num; ++ ++ return cpl_num; ++} ++ ++static uint16_t ++hisi_dma_burst_capacity(const void *dev_private, uint16_t vchan) ++{ ++ const struct hisi_dma_dev *hw = dev_private; ++ uint16_t sq_head = hw->sq_head; ++ uint16_t sq_tail = hw->sq_tail; ++ ++ RTE_SET_USED(vchan); ++ ++ return (sq_tail >= sq_head) ? hw->sq_depth_mask - sq_tail + sq_head : ++ sq_head - 1 - sq_tail; ++} ++ ++static void ++hisi_dma_gen_pci_device_name(const struct rte_pci_device *pci_dev, ++ char *name, size_t size) ++{ ++ memset(name, 0, size); ++ (void)snprintf(name, size, "%x:%x.%x", ++ pci_dev->addr.bus, pci_dev->addr.devid, ++ pci_dev->addr.function); ++} ++ ++static void ++hisi_dma_gen_dev_name(const struct rte_pci_device *pci_dev, ++ uint8_t queue_id, char *name, size_t size) ++{ ++ memset(name, 0, size); ++ (void)snprintf(name, size, "%x:%x.%x-ch%u", ++ pci_dev->addr.bus, pci_dev->addr.devid, ++ pci_dev->addr.function, queue_id); ++} ++ ++/** ++ * Hardware queue state machine: ++ * ++ * ----------- dmadev_create ------------------ ++ * | Unknown | ---------------> | IDLE | ++ * ----------- ------------------ ++ * ^ | ++ * | |dev_start ++ * dev_stop| | ++ * | v ++ * ------------------ ++ * | RUN | ++ * ------------------ ++ * ++ */ ++static const struct rte_dma_dev_ops hisi_dmadev_ops = { ++ .dev_info_get = hisi_dma_info_get, ++ .dev_configure = hisi_dma_configure, ++ .dev_start = hisi_dma_start, ++ .dev_stop = hisi_dma_stop, ++ .dev_close = hisi_dma_close, ++ .vchan_setup = hisi_dma_vchan_setup, ++ .stats_get = hisi_dma_stats_get, ++ .stats_reset = hisi_dma_stats_reset, ++ .dev_dump = hisi_dma_dump, ++}; ++ ++static int ++hisi_dma_create(struct rte_pci_device *pci_dev, uint8_t queue_id, ++ uint8_t revision) ++{ ++#define REG_PCI_BAR_INDEX 2 ++ ++ char name[RTE_DEV_NAME_MAX_LEN]; ++ struct rte_dma_dev *dev; ++ struct hisi_dma_dev *hw; ++ int ret; ++ ++ hisi_dma_gen_dev_name(pci_dev, queue_id, name, sizeof(name)); ++ dev = rte_dma_pmd_allocate(name, pci_dev->device.numa_node, ++ sizeof(*hw)); ++ if (dev == NULL) { ++ HISI_DMA_LOG(ERR, "%s allocate dmadev fail!", name); ++ return -EINVAL; ++ } ++ ++ dev->device = &pci_dev->device; ++ dev->dev_ops = &hisi_dmadev_ops; ++ dev->fp_obj->dev_private = dev->data->dev_private; ++ dev->fp_obj->copy = hisi_dma_copy; ++ dev->fp_obj->submit = hisi_dma_submit; ++ dev->fp_obj->completed = hisi_dma_completed; ++ dev->fp_obj->completed_status = hisi_dma_completed_status; ++ dev->fp_obj->burst_capacity = hisi_dma_burst_capacity; ++ ++ hw = dev->data->dev_private; ++ hw->data = dev->data; ++ hw->revision = revision; ++ hw->reg_layout = hisi_dma_reg_layout(revision); ++ hw->io_base = pci_dev->mem_resource[REG_PCI_BAR_INDEX].addr; ++ hw->queue_id = queue_id; ++ hw->sq_tail_reg = hisi_dma_queue_regaddr(hw, ++ HISI_DMA_QUEUE_SQ_TAIL_REG); ++ hw->cq_head_reg = hisi_dma_queue_regaddr(hw, ++ HISI_DMA_QUEUE_CQ_HEAD_REG); ++ ++ if (rte_eal_process_type() == RTE_PROC_PRIMARY) { ++ ret = hisi_dma_reset_hw(hw); ++ if (ret) { ++ HISI_DMA_LOG(ERR, "%s init device fail!", name); ++ (void)rte_dma_pmd_release(name); ++ return -EIO; ++ } ++ } ++ ++ dev->state = RTE_DMA_DEV_READY; ++ HISI_DMA_LOG(DEBUG, "%s create dmadev success!", name); ++ ++ return 0; ++} ++ ++static int ++hisi_dma_check_revision(struct rte_pci_device *pci_dev, const char *name, ++ uint8_t *out_revision) ++{ ++ uint8_t revision; ++ int ret; ++ ++ ret = rte_pci_read_config(pci_dev, &revision, 1, ++ HISI_DMA_PCI_REVISION_ID_REG); ++ if (ret != 1) { ++ HISI_DMA_LOG(ERR, "%s read PCI revision failed!", name); ++ return -EINVAL; ++ } ++ if (hisi_dma_reg_layout(revision) == HISI_DMA_REG_LAYOUT_INVALID) { ++ HISI_DMA_LOG(ERR, "%s revision: 0x%x not supported!", ++ name, revision); ++ return -EINVAL; ++ } ++ ++ *out_revision = revision; ++ return 0; ++} ++ ++static int ++hisi_dma_probe(struct rte_pci_driver *pci_drv __rte_unused, ++ struct rte_pci_device *pci_dev) ++{ ++ char name[RTE_DEV_NAME_MAX_LEN] = { 0 }; ++ uint8_t revision; ++ uint8_t i; ++ int ret; ++ ++ hisi_dma_gen_pci_device_name(pci_dev, name, sizeof(name)); ++ ++ if (pci_dev->mem_resource[2].addr == NULL) { ++ HISI_DMA_LOG(ERR, "%s BAR2 is NULL!\n", name); ++ return -ENODEV; ++ } ++ ++ ret = hisi_dma_check_revision(pci_dev, name, &revision); ++ if (ret) ++ return ret; ++ HISI_DMA_LOG(DEBUG, "%s read PCI revision: 0x%x", name, revision); ++ ++ if (rte_eal_process_type() == RTE_PROC_PRIMARY) ++ hisi_dma_init_gbl(pci_dev->mem_resource[2].addr, revision); ++ ++ for (i = 0; i < HISI_DMA_MAX_HW_QUEUES; i++) { ++ ret = hisi_dma_create(pci_dev, i, revision); ++ if (ret) { ++ HISI_DMA_LOG(ERR, "%s create dmadev %u failed!", ++ name, i); ++ break; ++ } ++ } ++ ++ return ret; ++} ++ ++static int ++hisi_dma_remove(struct rte_pci_device *pci_dev) ++{ ++ char name[RTE_DEV_NAME_MAX_LEN]; ++ uint8_t i; ++ int ret; ++ ++ for (i = 0; i < HISI_DMA_MAX_HW_QUEUES; i++) { ++ hisi_dma_gen_dev_name(pci_dev, i, name, sizeof(name)); ++ ret = rte_dma_pmd_release(name); ++ if (ret) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static const struct rte_pci_id pci_id_hisi_dma_map[] = { ++ { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HISI_DMA_DEVICE_ID) }, ++ { .vendor_id = 0, }, /* sentinel */ ++}; ++ ++static struct rte_pci_driver hisi_dma_pmd_drv = { ++ .id_table = pci_id_hisi_dma_map, ++ .drv_flags = RTE_PCI_DRV_NEED_MAPPING, ++ .probe = hisi_dma_probe, ++ .remove = hisi_dma_remove, ++}; ++ ++RTE_PMD_REGISTER_PCI(dma_hisilicon, hisi_dma_pmd_drv); ++RTE_PMD_REGISTER_PCI_TABLE(dma_hisilicon, pci_id_hisi_dma_map); ++RTE_PMD_REGISTER_KMOD_DEP(dma_hisilicon, "vfio-pci"); +diff --git a/drivers/dma/hisilicon/hisi_dmadev.h b/drivers/dma/hisilicon/hisi_dmadev.h +new file mode 100644 +index 000000000..12e209c86 +--- /dev/null ++++ b/drivers/dma/hisilicon/hisi_dmadev.h +@@ -0,0 +1,236 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(c) 2021 HiSilicon Limited ++ */ ++ ++#ifndef HISI_DMADEV_H ++#define HISI_DMADEV_H ++ ++#include ++#include ++ ++#define BIT(x) (1ul << (x)) ++#define BITS_PER_LONG (__SIZEOF_LONG__ * 8) ++#define GENMASK(h, l) \ ++ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) ++#define BF_SHF(x) (__builtin_ffsll(x) - 1) ++#define FIELD_GET(mask, reg) \ ++ ((typeof(mask))(((reg) & (mask)) >> BF_SHF(mask))) ++ ++#define lower_32_bits(x) ((uint32_t)(x)) ++#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16)) ++ ++#define PCI_VENDOR_ID_HUAWEI 0x19e5 ++#define HISI_DMA_DEVICE_ID 0xA122 ++#define HISI_DMA_PCI_REVISION_ID_REG 0x08 ++#define HISI_DMA_REVISION_HIP08B 0x21 ++ ++#define HISI_DMA_MAX_HW_QUEUES 4 ++#define HISI_DMA_MAX_DESC_NUM 8192 ++#define HISI_DMA_MIN_DESC_NUM 32 ++ ++/** ++ * The HIP08B(HiSilicon IP08) and later Chip(e.g. HiSilicon IP09) are DMA iEPs, ++ * they have the same pci device id but with different pci revision. ++ * Unfortunately, they have different register layouts, so the layout ++ * enumerations are defined. ++ */ ++enum { ++ HISI_DMA_REG_LAYOUT_INVALID = 0, ++ HISI_DMA_REG_LAYOUT_HIP08 ++}; ++ ++/** ++ * Hardware PCI bar register MAP: ++ * ++ * -------------- ++ * | Misc-reg-0 | ++ * | | ++ * -------------- -> Queue base ++ * | | ++ * | Queue-0 | ++ * | | ++ * -------------- --- ++ * | | ^ ++ * | Queue-1 | Queue region ++ * | | v ++ * -------------- --- ++ * | ... | ++ * | Queue-x | ++ * | ... | ++ * -------------- ++ * | Misc-reg-1 | ++ * -------------- ++ * ++ * As described above, a single queue register is continuous and occupies the ++ * length of queue-region. The global offset for a single queue register is ++ * calculated by: ++ * offset = queue-base + (queue-id * queue-region) + reg-offset-in-region. ++ * ++ * The first part of queue region is basically the same for HIP08 and later chip ++ * register layouts, therefore, HISI_QUEUE_* registers are defined for it. ++ */ ++#define HISI_DMA_QUEUE_SQ_BASE_L_REG 0x0 ++#define HISI_DMA_QUEUE_SQ_BASE_H_REG 0x4 ++#define HISI_DMA_QUEUE_SQ_DEPTH_REG 0x8 ++#define HISI_DMA_QUEUE_SQ_TAIL_REG 0xC ++#define HISI_DMA_QUEUE_CQ_BASE_L_REG 0x10 ++#define HISI_DMA_QUEUE_CQ_BASE_H_REG 0x14 ++#define HISI_DMA_QUEUE_CQ_DEPTH_REG 0x18 ++#define HISI_DMA_QUEUE_CQ_HEAD_REG 0x1C ++#define HISI_DMA_QUEUE_CTRL0_REG 0x20 ++#define HISI_DMA_QUEUE_CTRL0_EN_B 0 ++#define HISI_DMA_QUEUE_CTRL0_PAUSE_B 4 ++#define HISI_DMA_QUEUE_CTRL1_REG 0x24 ++#define HISI_DMA_QUEUE_CTRL1_RESET_B 0 ++#define HISI_DMA_QUEUE_FSM_REG 0x30 ++#define HISI_DMA_QUEUE_FSM_STS_M GENMASK(3, 0) ++#define HISI_DMA_QUEUE_INT_STATUS_REG 0x40 ++#define HISI_DMA_QUEUE_ERR_INT_NUM0_REG 0x84 ++#define HISI_DMA_QUEUE_ERR_INT_NUM1_REG 0x88 ++#define HISI_DMA_QUEUE_ERR_INT_NUM2_REG 0x8C ++#define HISI_DMA_QUEUE_REGION_SIZE 0x100 ++ ++/** ++ * HiSilicon IP08 DMA register and field define: ++ */ ++#define HISI_DMA_HIP08_QUEUE_BASE 0x0 ++#define HISI_DMA_HIP08_QUEUE_CTRL0_ERR_ABORT_B 2 ++#define HISI_DMA_HIP08_QUEUE_INT_MASK_REG 0x44 ++#define HISI_DMA_HIP08_QUEUE_INT_MASK_M GENMASK(14, 0) ++#define HISI_DMA_HIP08_QUEUE_ERR_INT_NUM3_REG 0x90 ++#define HISI_DMA_HIP08_QUEUE_ERR_INT_NUM4_REG 0x94 ++#define HISI_DMA_HIP08_QUEUE_ERR_INT_NUM5_REG 0x98 ++#define HISI_DMA_HIP08_QUEUE_ERR_INT_NUM6_REG 0x48 ++#define HISI_DMA_HIP08_MODE_REG 0x217C ++#define HISI_DMA_HIP08_MODE_SEL_B 0 ++#define HISI_DMA_HIP08_DUMP_START_REG 0x2000 ++#define HISI_DMA_HIP08_DUMP_END_REG 0x2280 ++ ++/** ++ * In fact, there are multiple states, but it need to pay attention to ++ * the following two states for the driver: ++ */ ++enum { ++ HISI_DMA_STATE_IDLE = 0, ++ HISI_DMA_STATE_RUN, ++}; ++ ++/** ++ * Hardware complete status define: ++ */ ++#define HISI_DMA_STATUS_SUCCESS 0x0 ++#define HISI_DMA_STATUS_INVALID_OPCODE 0x1 ++#define HISI_DMA_STATUS_INVALID_LENGTH 0x2 ++#define HISI_DMA_STATUS_USER_ABORT 0x4 ++#define HISI_DMA_STATUS_REMOTE_READ_ERROR 0x10 ++#define HISI_DMA_STATUS_AXI_READ_ERROR 0x20 ++#define HISI_DMA_STATUS_AXI_WRITE_ERROR 0x40 ++#define HISI_DMA_STATUS_DATA_POISON 0x80 ++#define HISI_DMA_STATUS_SQE_READ_ERROR 0x100 ++#define HISI_DMA_STATUS_SQE_READ_POISION 0x200 ++#define HISI_DMA_STATUS_REMOTE_DATA_POISION 0x400 ++#define HISI_DMA_STATUS_LINK_DOWN_ERROR 0x800 ++ ++/** ++ * After scanning the CQ array, the CQ head register needs to be updated. ++ * Updating the register involves write memory barrier operations. ++ * Here use the following method to reduce WMB operations: ++ * a) malloc more CQEs, which correspond to the macro HISI_DMA_CQ_RESERVED. ++ * b) update the CQ head register after accumulated number of completed CQs ++ * is greater than or equal to HISI_DMA_CQ_RESERVED. ++ */ ++#define HISI_DMA_CQ_RESERVED 64 ++ ++struct hisi_dma_sqe { ++ uint32_t dw0; ++#define SQE_FENCE_FLAG BIT(10) ++#define SQE_OPCODE_M2M 0x4 ++ uint32_t dw1; ++ uint32_t dw2; ++ uint32_t length; ++ uint64_t src_addr; ++ uint64_t dst_addr; ++}; ++ ++struct hisi_dma_cqe { ++ uint64_t rsv; ++ uint64_t misc; ++#define CQE_SQ_HEAD_MASK GENMASK(15, 0) ++#define CQE_VALID_B BIT(48) ++#define CQE_STATUS_MASK GENMASK(63, 49) ++}; ++ ++struct hisi_dma_dev { ++ struct hisi_dma_sqe *sqe; ++ volatile struct hisi_dma_cqe *cqe; ++ uint16_t *status; /* the completion status array of SQEs. */ ++ ++ volatile void *sq_tail_reg; /**< register address for doorbell. */ ++ volatile void *cq_head_reg; /**< register address for answer CQ. */ ++ ++ uint16_t sq_depth_mask; /**< SQ depth - 1, the SQ depth is power of 2 */ ++ uint16_t cq_depth; /* CQ depth */ ++ ++ uint16_t ridx; /**< ring index which will assign to the next request. */ ++ /** ring index which returned by hisi_dmadev_completed APIs. */ ++ uint16_t cridx; ++ ++ /** ++ * SQE array management fields: ++ * ++ * ----------------------------------------------------- ++ * | SQE0 | SQE1 | SQE2 | ... | SQEx | ... | SQEn-1 | ++ * ----------------------------------------------------- ++ * ^ ^ ^ ++ * | | | ++ * sq_head cq_sq_head sq_tail ++ * ++ * sq_head: index to the oldest completed request, this filed was ++ * updated by hisi_dmadev_completed* APIs. ++ * sq_tail: index of the next new request, this field was updated by ++ * hisi_dmadev_copy API. ++ * cq_sq_head: next index of index that has been completed by hardware, ++ * this filed was updated by hisi_dmadev_completed* APIs. ++ * ++ * [sq_head, cq_sq_head): the SQEs that hardware already completed. ++ * [cq_sq_head, sq_tail): the SQEs that hardware processing. ++ */ ++ uint16_t sq_head; ++ uint16_t sq_tail; ++ uint16_t cq_sq_head; ++ /** ++ * The driver scans the CQE array, if the valid bit changes, the CQE is ++ * considered valid. ++ * Note: One CQE is corresponding to one or several SQEs, e.g. app ++ * submits two copy requests, the hardware processes the two SQEs, ++ * but it may write back only one CQE and the CQE's sq_head field ++ * indicates the index of the second copy request in the SQE ++ * array. ++ */ ++ uint16_t cq_head; /**< CQ index for next scans. */ ++ /** accumulated number of completed CQs ++ * @see HISI_DMA_CQ_RESERVED ++ */ ++ uint16_t cqs_completed; ++ uint8_t cqe_vld; /**< valid bit for CQE, will change for every round. */ ++ ++ uint64_t submitted; ++ uint64_t completed; ++ uint64_t errors; ++ ++ /** ++ * The following fields are not accessed in the I/O path, so they are ++ * placed at the end. ++ */ ++ struct rte_dma_dev_data *data; ++ uint8_t revision; /**< PCI revision. */ ++ uint8_t reg_layout; /**< hardware register layout. */ ++ void *io_base; ++ uint8_t queue_id; /**< hardware DMA queue index. */ ++ const struct rte_memzone *iomz; ++ uint32_t iomz_sz; ++ rte_iova_t sqe_iova; ++ rte_iova_t cqe_iova; ++}; ++ ++#endif /* HISI_DMADEV_H */ +diff --git a/drivers/dma/hisilicon/meson.build b/drivers/dma/hisilicon/meson.build +new file mode 100644 +index 000000000..c11dc352d +--- /dev/null ++++ b/drivers/dma/hisilicon/meson.build +@@ -0,0 +1,19 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright(c) 2021 HiSilicon Limited ++ ++if not is_linux ++ build = false ++ reason = 'only supported on Linux' ++ subdir_done() ++endif ++ ++if (arch_subdir != 'x86' and arch_subdir != 'arm') or (not dpdk_conf.get('RTE_ARCH_64')) ++ build = false ++ reason = 'only supported on x86_64 and aarch64' ++ subdir_done() ++endif ++ ++deps += ['bus_pci', 'dmadev'] ++sources = files( ++ 'hisi_dmadev.c' ++) +diff --git a/drivers/dma/hisilicon/version.map b/drivers/dma/hisilicon/version.map +new file mode 100644 +index 000000000..c2e0723b4 +--- /dev/null ++++ b/drivers/dma/hisilicon/version.map +@@ -0,0 +1,3 @@ ++DPDK_22 { ++ local: *; ++}; +diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build +new file mode 100644 +index 000000000..d030069fd +--- /dev/null ++++ b/drivers/dma/meson.build +@@ -0,0 +1,13 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright 2021 HiSilicon Limited ++ ++if is_windows ++ subdir_done() ++endif ++ ++drivers = [ ++ 'hisilicon', ++ 'skeleton' ++] ++std_deps = ['dmadev'] ++config_flag_fmt = 'RTE_LIBRTE_PMD_DMADEV_@0@' +diff --git a/drivers/dma/skeleton/meson.build b/drivers/dma/skeleton/meson.build +new file mode 100644 +index 000000000..defe905e4 +--- /dev/null ++++ b/drivers/dma/skeleton/meson.build +@@ -0,0 +1,11 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright(c) 2021 HiSilicon Limited ++ ++if is_windows ++ subdir_done() ++endif ++ ++deps += ['dmadev', 'kvargs', 'ring', 'bus_vdev'] ++sources = files( ++ 'skeleton_dmadev.c', ++) +diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c +new file mode 100644 +index 000000000..8bed41f8b +--- /dev/null ++++ b/drivers/dma/skeleton/skeleton_dmadev.c +@@ -0,0 +1,596 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(c) 2021 HiSilicon Limited ++ */ ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "skeleton_dmadev.h" ++ ++RTE_LOG_REGISTER(skeldma_logtype, pmd.dma.skeleton, INFO); ++#define SKELDMA_LOG(level, fmt, args...) \ ++ rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "\n", \ ++ __func__, ##args) ++ ++/* Count of instances, currently only 1 is supported. */ ++static uint16_t skeldma_count; ++ ++static int ++skeldma_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info, ++ uint32_t info_sz) ++{ ++#define SKELDMA_MAX_DESC 8192 ++#define SKELDMA_MIN_DESC 32 ++ ++ RTE_SET_USED(dev); ++ RTE_SET_USED(info_sz); ++ ++ dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | ++ RTE_DMA_CAPA_SVA | ++ RTE_DMA_CAPA_OPS_COPY; ++ dev_info->max_vchans = 1; ++ dev_info->max_desc = SKELDMA_MAX_DESC; ++ dev_info->min_desc = SKELDMA_MIN_DESC; ++ ++ return 0; ++} ++ ++static int ++skeldma_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf, ++ uint32_t conf_sz) ++{ ++ RTE_SET_USED(dev); ++ RTE_SET_USED(conf); ++ RTE_SET_USED(conf_sz); ++ return 0; ++} ++ ++static void * ++cpucopy_thread(void *param) ++{ ++#define SLEEP_THRESHOLD 10000 ++#define SLEEP_US_VAL 10 ++ ++ struct rte_dma_dev *dev = param; ++ struct skeldma_hw *hw = dev->data->dev_private; ++ struct skeldma_desc *desc = NULL; ++ int ret; ++ ++ while (!hw->exit_flag) { ++ ret = rte_ring_dequeue(hw->desc_running, (void **)&desc); ++ if (ret) { ++ hw->zero_req_count++; ++ if (hw->zero_req_count == 0) ++ hw->zero_req_count = SLEEP_THRESHOLD; ++ if (hw->zero_req_count >= SLEEP_THRESHOLD) ++ rte_delay_us_sleep(SLEEP_US_VAL); ++ continue; ++ } ++ ++ hw->zero_req_count = 0; ++ rte_memcpy(desc->dst, desc->src, desc->len); ++ __atomic_add_fetch(&hw->completed_count, 1, __ATOMIC_RELEASE); ++ (void)rte_ring_enqueue(hw->desc_completed, (void *)desc); ++ } ++ ++ return NULL; ++} ++ ++static void ++fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring) ++{ ++ struct skeldma_desc *desc = NULL; ++ while (rte_ring_count(ring) > 0) { ++ (void)rte_ring_dequeue(ring, (void **)&desc); ++ (void)rte_ring_enqueue(hw->desc_empty, (void *)desc); ++ } ++} ++ ++static int ++skeldma_start(struct rte_dma_dev *dev) ++{ ++ struct skeldma_hw *hw = dev->data->dev_private; ++ rte_cpuset_t cpuset; ++ int ret; ++ ++ if (hw->desc_mem == NULL) { ++ SKELDMA_LOG(ERR, "Vchan was not setup, start fail!"); ++ return -EINVAL; ++ } ++ ++ /* Reset the dmadev to a known state, include: ++ * 1) fflush pending/running/completed ring to empty ring. ++ * 2) init ring idx to zero. ++ * 3) init running statistics. ++ * 4) mark cpucopy task exit_flag to false. ++ */ ++ fflush_ring(hw, hw->desc_pending); ++ fflush_ring(hw, hw->desc_running); ++ fflush_ring(hw, hw->desc_completed); ++ hw->ridx = 0; ++ hw->submitted_count = 0; ++ hw->zero_req_count = 0; ++ hw->completed_count = 0; ++ hw->exit_flag = false; ++ ++ rte_mb(); ++ ++ ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL, ++ cpucopy_thread, dev); ++ if (ret) { ++ SKELDMA_LOG(ERR, "Start cpucopy thread fail!"); ++ return -EINVAL; ++ } ++ ++ if (hw->lcore_id != -1) { ++ cpuset = rte_lcore_cpuset(hw->lcore_id); ++ ret = pthread_setaffinity_np(hw->thread, sizeof(cpuset), ++ &cpuset); ++ if (ret) ++ SKELDMA_LOG(WARNING, ++ "Set thread affinity lcore = %d fail!", ++ hw->lcore_id); ++ } ++ ++ return 0; ++} ++ ++static int ++skeldma_stop(struct rte_dma_dev *dev) ++{ ++ struct skeldma_hw *hw = dev->data->dev_private; ++ ++ hw->exit_flag = true; ++ rte_delay_ms(1); ++ ++ (void)pthread_cancel(hw->thread); ++ pthread_join(hw->thread, NULL); ++ ++ return 0; ++} ++ ++static int ++vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc) ++{ ++ struct skeldma_desc *desc; ++ struct rte_ring *empty; ++ struct rte_ring *pending; ++ struct rte_ring *running; ++ struct rte_ring *completed; ++ uint16_t i; ++ ++ desc = rte_zmalloc_socket("dma_skelteon_desc", ++ nb_desc * sizeof(struct skeldma_desc), ++ RTE_CACHE_LINE_SIZE, hw->socket_id); ++ if (desc == NULL) { ++ SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!"); ++ return -ENOMEM; ++ } ++ ++ empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc, ++ hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); ++ pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc, ++ hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); ++ running = rte_ring_create("dma_skeleton_desc_running", nb_desc, ++ hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); ++ completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc, ++ hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); ++ if (empty == NULL || pending == NULL || running == NULL || ++ completed == NULL) { ++ SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!"); ++ rte_ring_free(empty); ++ rte_ring_free(pending); ++ rte_ring_free(running); ++ rte_ring_free(completed); ++ rte_free(desc); ++ return -ENOMEM; ++ } ++ ++ /* The real usable ring size is *count-1* instead of *count* to ++ * differentiate a free ring from an empty ring. ++ * @see rte_ring_create ++ */ ++ for (i = 0; i < nb_desc - 1; i++) ++ (void)rte_ring_enqueue(empty, (void *)(desc + i)); ++ ++ hw->desc_mem = desc; ++ hw->desc_empty = empty; ++ hw->desc_pending = pending; ++ hw->desc_running = running; ++ hw->desc_completed = completed; ++ ++ return 0; ++} ++ ++static void ++vchan_release(struct skeldma_hw *hw) ++{ ++ if (hw->desc_mem == NULL) ++ return; ++ ++ rte_free(hw->desc_mem); ++ hw->desc_mem = NULL; ++ rte_ring_free(hw->desc_empty); ++ hw->desc_empty = NULL; ++ rte_ring_free(hw->desc_pending); ++ hw->desc_pending = NULL; ++ rte_ring_free(hw->desc_running); ++ hw->desc_running = NULL; ++ rte_ring_free(hw->desc_completed); ++ hw->desc_completed = NULL; ++} ++ ++static int ++skeldma_close(struct rte_dma_dev *dev) ++{ ++ /* The device already stopped */ ++ vchan_release(dev->data->dev_private); ++ return 0; ++} ++ ++static int ++skeldma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, ++ const struct rte_dma_vchan_conf *conf, ++ uint32_t conf_sz) ++{ ++ struct skeldma_hw *hw = dev->data->dev_private; ++ ++ RTE_SET_USED(vchan); ++ RTE_SET_USED(conf_sz); ++ ++ if (!rte_is_power_of_2(conf->nb_desc)) { ++ SKELDMA_LOG(ERR, "Number of desc must be power of 2!"); ++ return -EINVAL; ++ } ++ ++ vchan_release(hw); ++ return vchan_setup(hw, conf->nb_desc); ++} ++ ++static int ++skeldma_vchan_status(const struct rte_dma_dev *dev, ++ uint16_t vchan, enum rte_dma_vchan_status *status) ++{ ++ struct skeldma_hw *hw = dev->data->dev_private; ++ ++ RTE_SET_USED(vchan); ++ ++ *status = RTE_DMA_VCHAN_IDLE; ++ if (hw->submitted_count != __atomic_load_n(&hw->completed_count, __ATOMIC_ACQUIRE) ++ || hw->zero_req_count == 0) ++ *status = RTE_DMA_VCHAN_ACTIVE; ++ return 0; ++} ++ ++static int ++skeldma_stats_get(const struct rte_dma_dev *dev, uint16_t vchan, ++ struct rte_dma_stats *stats, uint32_t stats_sz) ++{ ++ struct skeldma_hw *hw = dev->data->dev_private; ++ ++ RTE_SET_USED(vchan); ++ RTE_SET_USED(stats_sz); ++ ++ stats->submitted = hw->submitted_count; ++ stats->completed = hw->completed_count; ++ stats->errors = 0; ++ ++ return 0; ++} ++ ++static int ++skeldma_stats_reset(struct rte_dma_dev *dev, uint16_t vchan) ++{ ++ struct skeldma_hw *hw = dev->data->dev_private; ++ ++ RTE_SET_USED(vchan); ++ ++ hw->submitted_count = 0; ++ hw->completed_count = 0; ++ ++ return 0; ++} ++ ++static int ++skeldma_dump(const struct rte_dma_dev *dev, FILE *f) ++{ ++#define GET_RING_COUNT(ring) ((ring) ? (rte_ring_count(ring)) : 0) ++ ++ struct skeldma_hw *hw = dev->data->dev_private; ++ ++ (void)fprintf(f, ++ " lcore_id: %d\n" ++ " socket_id: %d\n" ++ " desc_empty_ring_count: %u\n" ++ " desc_pending_ring_count: %u\n" ++ " desc_running_ring_count: %u\n" ++ " desc_completed_ring_count: %u\n", ++ hw->lcore_id, hw->socket_id, ++ GET_RING_COUNT(hw->desc_empty), ++ GET_RING_COUNT(hw->desc_pending), ++ GET_RING_COUNT(hw->desc_running), ++ GET_RING_COUNT(hw->desc_completed)); ++ (void)fprintf(f, ++ " next_ring_idx: %u\n" ++ " submitted_count: %" PRIu64 "\n" ++ " completed_count: %" PRIu64 "\n", ++ hw->ridx, hw->submitted_count, hw->completed_count); ++ ++ return 0; ++} ++ ++static inline void ++submit(struct skeldma_hw *hw, struct skeldma_desc *desc) ++{ ++ uint16_t count = rte_ring_count(hw->desc_pending); ++ struct skeldma_desc *pend_desc = NULL; ++ ++ while (count > 0) { ++ (void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc); ++ (void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc); ++ count--; ++ } ++ ++ if (desc) ++ (void)rte_ring_enqueue(hw->desc_running, (void *)desc); ++} ++ ++static int ++skeldma_copy(void *dev_private, uint16_t vchan, ++ rte_iova_t src, rte_iova_t dst, ++ uint32_t length, uint64_t flags) ++{ ++ struct skeldma_hw *hw = dev_private; ++ struct skeldma_desc *desc; ++ int ret; ++ ++ RTE_SET_USED(vchan); ++ RTE_SET_USED(flags); ++ ++ ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc); ++ if (ret) ++ return -ENOSPC; ++ desc->src = (void *)(uintptr_t)src; ++ desc->dst = (void *)(uintptr_t)dst; ++ desc->len = length; ++ desc->ridx = hw->ridx; ++ if (flags & RTE_DMA_OP_FLAG_SUBMIT) ++ submit(hw, desc); ++ else ++ (void)rte_ring_enqueue(hw->desc_pending, (void *)desc); ++ hw->submitted_count++; ++ ++ return hw->ridx++; ++} ++ ++static int ++skeldma_submit(void *dev_private, uint16_t vchan) ++{ ++ struct skeldma_hw *hw = dev_private; ++ RTE_SET_USED(vchan); ++ submit(hw, NULL); ++ return 0; ++} ++ ++static uint16_t ++skeldma_completed(void *dev_private, ++ uint16_t vchan, const uint16_t nb_cpls, ++ uint16_t *last_idx, bool *has_error) ++{ ++ struct skeldma_hw *hw = dev_private; ++ struct skeldma_desc *desc = NULL; ++ uint16_t index = 0; ++ uint16_t count; ++ ++ RTE_SET_USED(vchan); ++ RTE_SET_USED(has_error); ++ ++ count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed)); ++ while (index < count) { ++ (void)rte_ring_dequeue(hw->desc_completed, (void **)&desc); ++ if (index == count - 1) ++ *last_idx = desc->ridx; ++ index++; ++ (void)rte_ring_enqueue(hw->desc_empty, (void *)desc); ++ } ++ ++ return count; ++} ++ ++static uint16_t ++skeldma_completed_status(void *dev_private, ++ uint16_t vchan, const uint16_t nb_cpls, ++ uint16_t *last_idx, enum rte_dma_status_code *status) ++{ ++ struct skeldma_hw *hw = dev_private; ++ struct skeldma_desc *desc = NULL; ++ uint16_t index = 0; ++ uint16_t count; ++ ++ RTE_SET_USED(vchan); ++ ++ count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed)); ++ while (index < count) { ++ (void)rte_ring_dequeue(hw->desc_completed, (void **)&desc); ++ if (index == count - 1) ++ *last_idx = desc->ridx; ++ status[index++] = RTE_DMA_STATUS_SUCCESSFUL; ++ (void)rte_ring_enqueue(hw->desc_empty, (void *)desc); ++ } ++ ++ return count; ++} ++ ++static uint16_t ++skeldma_burst_capacity(const void *dev_private, uint16_t vchan) ++{ ++ const struct skeldma_hw *hw = dev_private; ++ ++ RTE_SET_USED(vchan); ++ return rte_ring_count(hw->desc_empty); ++} ++ ++static const struct rte_dma_dev_ops skeldma_ops = { ++ .dev_info_get = skeldma_info_get, ++ .dev_configure = skeldma_configure, ++ .dev_start = skeldma_start, ++ .dev_stop = skeldma_stop, ++ .dev_close = skeldma_close, ++ ++ .vchan_setup = skeldma_vchan_setup, ++ .vchan_status = skeldma_vchan_status, ++ ++ .stats_get = skeldma_stats_get, ++ .stats_reset = skeldma_stats_reset, ++ ++ .dev_dump = skeldma_dump, ++}; ++ ++static int ++skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id) ++{ ++ struct rte_dma_dev *dev; ++ struct skeldma_hw *hw; ++ int socket_id; ++ ++ socket_id = (lcore_id < 0) ? rte_socket_id() : ++ rte_lcore_to_socket_id(lcore_id); ++ dev = rte_dma_pmd_allocate(name, socket_id, sizeof(struct skeldma_hw)); ++ if (dev == NULL) { ++ SKELDMA_LOG(ERR, "Unable to allocate dmadev: %s", name); ++ return -EINVAL; ++ } ++ ++ dev->device = &vdev->device; ++ dev->dev_ops = &skeldma_ops; ++ dev->fp_obj->dev_private = dev->data->dev_private; ++ dev->fp_obj->copy = skeldma_copy; ++ dev->fp_obj->submit = skeldma_submit; ++ dev->fp_obj->completed = skeldma_completed; ++ dev->fp_obj->completed_status = skeldma_completed_status; ++ dev->fp_obj->burst_capacity = skeldma_burst_capacity; ++ ++ hw = dev->data->dev_private; ++ hw->lcore_id = lcore_id; ++ hw->socket_id = socket_id; ++ ++ dev->state = RTE_DMA_DEV_READY; ++ ++ return dev->data->dev_id; ++} ++ ++static int ++skeldma_destroy(const char *name) ++{ ++ return rte_dma_pmd_release(name); ++} ++ ++static int ++skeldma_parse_lcore(const char *key __rte_unused, ++ const char *value, ++ void *opaque) ++{ ++ int lcore_id = atoi(value); ++ if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE) ++ *(int *)opaque = lcore_id; ++ return 0; ++} ++ ++static void ++skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id) ++{ ++ static const char *const args[] = { ++ SKELDMA_ARG_LCORE, ++ NULL ++ }; ++ ++ struct rte_kvargs *kvlist; ++ const char *params; ++ ++ params = rte_vdev_device_args(vdev); ++ if (params == NULL || params[0] == '\0') ++ return; ++ ++ kvlist = rte_kvargs_parse(params, args); ++ if (!kvlist) ++ return; ++ ++ (void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE, ++ skeldma_parse_lcore, lcore_id); ++ SKELDMA_LOG(INFO, "Parse lcore_id = %d", *lcore_id); ++ ++ rte_kvargs_free(kvlist); ++} ++ ++static int ++skeldma_probe(struct rte_vdev_device *vdev) ++{ ++ const char *name; ++ int lcore_id = -1; ++ int ret; ++ ++ name = rte_vdev_device_name(vdev); ++ if (name == NULL) ++ return -EINVAL; ++ ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { ++ SKELDMA_LOG(ERR, "Multiple process not supported for %s", name); ++ return -EINVAL; ++ } ++ ++ /* More than one instance is not supported */ ++ if (skeldma_count > 0) { ++ SKELDMA_LOG(ERR, "Multiple instance not supported for %s", ++ name); ++ return -EINVAL; ++ } ++ ++ skeldma_parse_vdev_args(vdev, &lcore_id); ++ ++ ret = skeldma_create(name, vdev, lcore_id); ++ if (ret >= 0) { ++ SKELDMA_LOG(INFO, "Create %s dmadev with lcore-id %d", ++ name, lcore_id); ++ skeldma_count = 1; ++ } ++ ++ return ret < 0 ? ret : 0; ++} ++ ++static int ++skeldma_remove(struct rte_vdev_device *vdev) ++{ ++ const char *name; ++ int ret; ++ ++ name = rte_vdev_device_name(vdev); ++ if (name == NULL) ++ return -1; ++ ++ ret = skeldma_destroy(name); ++ if (!ret) { ++ skeldma_count = 0; ++ SKELDMA_LOG(INFO, "Remove %s dmadev", name); ++ } ++ ++ return ret; ++} ++ ++static struct rte_vdev_driver skeldma_pmd_drv = { ++ .probe = skeldma_probe, ++ .remove = skeldma_remove, ++}; ++ ++RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv); ++RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton, ++ SKELDMA_ARG_LCORE "= "); +diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h +new file mode 100644 +index 000000000..91eb5460f +--- /dev/null ++++ b/drivers/dma/skeleton/skeleton_dmadev.h +@@ -0,0 +1,61 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(c) 2021 HiSilicon Limited ++ */ ++ ++#ifndef SKELETON_DMADEV_H ++#define SKELETON_DMADEV_H ++ ++#include ++ ++#include ++ ++#define SKELDMA_ARG_LCORE "lcore" ++ ++struct skeldma_desc { ++ void *src; ++ void *dst; ++ uint32_t len; ++ uint16_t ridx; /* ring idx */ ++}; ++ ++struct skeldma_hw { ++ int lcore_id; /* cpucopy task affinity core */ ++ int socket_id; ++ pthread_t thread; /* cpucopy task thread */ ++ volatile int exit_flag; /* cpucopy task exit flag */ ++ ++ struct skeldma_desc *desc_mem; ++ ++ /* Descriptor ring state machine: ++ * ++ * ----------- enqueue without submit ----------- ++ * | empty |------------------------------->| pending | ++ * -----------\ ----------- ++ * ^ \------------ | ++ * | | |submit doorbell ++ * | | | ++ * | |enqueue with submit | ++ * |get completed |------------------| | ++ * | | | ++ * | v v ++ * ----------- cpucopy thread working ----------- ++ * |completed|<-------------------------------| running | ++ * ----------- ----------- ++ */ ++ struct rte_ring *desc_empty; ++ struct rte_ring *desc_pending; ++ struct rte_ring *desc_running; ++ struct rte_ring *desc_completed; ++ ++ /* Cache delimiter for dataplane API's operation data */ ++ char cache1 __rte_cache_aligned; ++ uint16_t ridx; /* ring idx */ ++ uint64_t submitted_count; ++ ++ /* Cache delimiter for cpucopy thread's operation data */ ++ char cache2 __rte_cache_aligned; ++ volatile uint32_t zero_req_count; ++ uint64_t completed_count; ++}; ++ ++#endif /* SKELETON_DMADEV_H */ +diff --git a/drivers/dma/skeleton/version.map b/drivers/dma/skeleton/version.map +new file mode 100644 +index 000000000..c2e0723b4 +--- /dev/null ++++ b/drivers/dma/skeleton/version.map +@@ -0,0 +1,3 @@ ++DPDK_22 { ++ local: *; ++}; +diff --git a/drivers/meson.build b/drivers/meson.build +index f9febc579..996df2210 100644 +--- a/drivers/meson.build ++++ b/drivers/meson.build +@@ -16,6 +16,7 @@ subdirs = [ + 'vdpa', # depends on common, bus and mempool. + 'event', # depends on common, bus, mempool and net. + 'baseband', # depends on common and bus. ++ 'dma' + ] + + disabled_drivers = run_command(list_dir_globs, get_option('disable_drivers'), +diff --git a/examples/dma/Makefile b/examples/dma/Makefile +new file mode 100644 +index 000000000..59af6478b +--- /dev/null ++++ b/examples/dma/Makefile +@@ -0,0 +1,51 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright(c) 2019 Intel Corporation ++ ++# binary name ++APP = dmafwd ++ ++# all source are stored in SRCS-y ++SRCS-y := dmafwd.c ++ ++PKGCONF ?= pkg-config ++ ++# Build using pkg-config variables if possible ++ifneq ($(shell $(PKGCONF) --exists libdpdk && echo 0),0) ++$(error "no installation of DPDK found") ++endif ++ ++all: shared ++.PHONY: shared static ++shared: build/$(APP)-shared ++ ln -sf $(APP)-shared build/$(APP) ++static: build/$(APP)-static ++ ln -sf $(APP)-static build/$(APP) ++ ++PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null) ++CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) ++LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk) ++LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk) ++ ++ifeq ($(MAKECMDGOALS),static) ++# check for broken pkg-config ++ifeq ($(shell echo $(LDFLAGS_STATIC) | grep 'whole-archive.*l:lib.*no-whole-archive'),) ++$(warning "pkg-config output list does not contain drivers between 'whole-archive'/'no-whole-archive' flags.") ++$(error "Cannot generate statically-linked binaries with this version of pkg-config") ++endif ++endif ++ ++CFLAGS += -DALLOW_EXPERIMENTAL_API ++ ++build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build ++ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED) ++ ++build/$(APP)-static: $(SRCS-y) Makefile $(PC_FILE) | build ++ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_STATIC) ++ ++build: ++ @mkdir -p $@ ++ ++.PHONY: clean ++clean: ++ rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared ++ test -d build && rmdir -p build || true +diff --git a/examples/dma/dmafwd.c b/examples/dma/dmafwd.c +new file mode 100644 +index 000000000..9ff2593bb +--- /dev/null ++++ b/examples/dma/dmafwd.c +@@ -0,0 +1,1105 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(c) 2019-2021 Intel Corporation ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++/* size of ring used for software copying between rx and tx. */ ++#define RTE_LOGTYPE_DMA RTE_LOGTYPE_USER1 ++#define MAX_PKT_BURST 32 ++#define MEMPOOL_CACHE_SIZE 512 ++#define MIN_POOL_SIZE 65536U ++#define CMD_LINE_OPT_MAC_UPDATING "mac-updating" ++#define CMD_LINE_OPT_NO_MAC_UPDATING "no-mac-updating" ++#define CMD_LINE_OPT_PORTMASK "portmask" ++#define CMD_LINE_OPT_NB_QUEUE "nb-queue" ++#define CMD_LINE_OPT_COPY_TYPE "copy-type" ++#define CMD_LINE_OPT_RING_SIZE "ring-size" ++#define CMD_LINE_OPT_BATCH_SIZE "dma-batch-size" ++#define CMD_LINE_OPT_FRAME_SIZE "max-frame-size" ++#define CMD_LINE_OPT_STATS_INTERVAL "stats-interval" ++ ++/* configurable number of RX/TX ring descriptors */ ++#define RX_DEFAULT_RINGSIZE 1024 ++#define TX_DEFAULT_RINGSIZE 1024 ++ ++/* max number of RX queues per port */ ++#define MAX_RX_QUEUES_COUNT 8 ++ ++struct rxtx_port_config { ++ /* common config */ ++ uint16_t rxtx_port; ++ uint16_t nb_queues; ++ /* for software copy mode */ ++ struct rte_ring *rx_to_tx_ring; ++ /* for dmadev HW copy mode */ ++ uint16_t dmadev_ids[MAX_RX_QUEUES_COUNT]; ++}; ++ ++/* Configuring ports and number of assigned lcores in struct. 8< */ ++struct rxtx_transmission_config { ++ struct rxtx_port_config ports[RTE_MAX_ETHPORTS]; ++ uint16_t nb_ports; ++ uint16_t nb_lcores; ++}; ++/* >8 End of configuration of ports and number of assigned lcores. */ ++ ++/* per-port statistics struct */ ++struct dma_port_statistics { ++ uint64_t rx[RTE_MAX_ETHPORTS]; ++ uint64_t tx[RTE_MAX_ETHPORTS]; ++ uint64_t tx_dropped[RTE_MAX_ETHPORTS]; ++ uint64_t copy_dropped[RTE_MAX_ETHPORTS]; ++}; ++struct dma_port_statistics port_statistics; ++struct total_statistics { ++ uint64_t total_packets_dropped; ++ uint64_t total_packets_tx; ++ uint64_t total_packets_rx; ++ uint64_t total_submitted; ++ uint64_t total_completed; ++ uint64_t total_failed; ++}; ++ ++typedef enum copy_mode_t { ++#define COPY_MODE_SW "sw" ++ COPY_MODE_SW_NUM, ++#define COPY_MODE_DMA "hw" ++ COPY_MODE_DMA_NUM, ++ COPY_MODE_INVALID_NUM, ++ COPY_MODE_SIZE_NUM = COPY_MODE_INVALID_NUM ++} copy_mode_t; ++ ++/* mask of enabled ports */ ++static uint32_t dma_enabled_port_mask; ++ ++/* number of RX queues per port */ ++static uint16_t nb_queues = 1; ++ ++/* MAC updating enabled by default. */ ++static int mac_updating = 1; ++ ++/* hardare copy mode enabled by default. */ ++static copy_mode_t copy_mode = COPY_MODE_DMA_NUM; ++ ++/* size of descriptor ring for hardware copy mode or ++ * rte_ring for software copy mode ++ */ ++static unsigned short ring_size = 2048; ++ ++/* interval, in seconds, between stats prints */ ++static unsigned short stats_interval = 1; ++/* global mbuf arrays for tracking DMA bufs */ ++#define MBUF_RING_SIZE 2048 ++#define MBUF_RING_MASK (MBUF_RING_SIZE - 1) ++struct dma_bufs { ++ struct rte_mbuf *bufs[MBUF_RING_SIZE]; ++ struct rte_mbuf *copies[MBUF_RING_SIZE]; ++ uint16_t sent; ++}; ++static struct dma_bufs dma_bufs[RTE_DMADEV_DEFAULT_MAX]; ++ ++/* global transmission config */ ++struct rxtx_transmission_config cfg; ++ ++/* configurable number of RX/TX ring descriptors */ ++static uint16_t nb_rxd = RX_DEFAULT_RINGSIZE; ++static uint16_t nb_txd = TX_DEFAULT_RINGSIZE; ++ ++static volatile bool force_quit; ++ ++static uint32_t dma_batch_sz = MAX_PKT_BURST; ++static uint32_t max_frame_size = RTE_ETHER_MAX_LEN; ++ ++/* ethernet addresses of ports */ ++static struct rte_ether_addr dma_ports_eth_addr[RTE_MAX_ETHPORTS]; ++ ++static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; ++struct rte_mempool *dma_pktmbuf_pool; ++ ++/* Print out statistics for one port. */ ++static void ++print_port_stats(uint16_t port_id) ++{ ++ printf("\nStatistics for port %u ------------------------------" ++ "\nPackets sent: %34"PRIu64 ++ "\nPackets received: %30"PRIu64 ++ "\nPackets dropped on tx: %25"PRIu64 ++ "\nPackets dropped on copy: %23"PRIu64, ++ port_id, ++ port_statistics.tx[port_id], ++ port_statistics.rx[port_id], ++ port_statistics.tx_dropped[port_id], ++ port_statistics.copy_dropped[port_id]); ++} ++ ++/* Print out statistics for one dmadev device. */ ++static void ++print_dmadev_stats(uint32_t dev_id, struct rte_dma_stats stats) ++{ ++ printf("\nDMA channel %u", dev_id); ++ printf("\n\t Total submitted ops: %"PRIu64"", stats.submitted); ++ printf("\n\t Total completed ops: %"PRIu64"", stats.completed); ++ printf("\n\t Total failed ops: %"PRIu64"", stats.errors); ++} ++ ++static void ++print_total_stats(struct total_statistics *ts) ++{ ++ printf("\nAggregate statistics ===============================" ++ "\nTotal packets Tx: %22"PRIu64" [pkt/s]" ++ "\nTotal packets Rx: %22"PRIu64" [pkt/s]" ++ "\nTotal packets dropped: %17"PRIu64" [pkt/s]", ++ ts->total_packets_tx / stats_interval, ++ ts->total_packets_rx / stats_interval, ++ ts->total_packets_dropped / stats_interval); ++ ++ if (copy_mode == COPY_MODE_DMA_NUM) { ++ printf("\nTotal submitted ops: %19"PRIu64" [ops/s]" ++ "\nTotal completed ops: %19"PRIu64" [ops/s]" ++ "\nTotal failed ops: %22"PRIu64" [ops/s]", ++ ts->total_submitted / stats_interval, ++ ts->total_completed / stats_interval, ++ ts->total_failed / stats_interval); ++ } ++ ++ printf("\n====================================================\n"); ++} ++ ++/* Print out statistics on packets dropped. */ ++static void ++print_stats(char *prgname) ++{ ++ struct total_statistics ts, delta_ts; ++ struct rte_dma_stats stats = {0}; ++ uint32_t i, port_id, dev_id; ++ char status_string[255]; /* to print at the top of the output */ ++ int status_strlen; ++ ++ const char clr[] = { 27, '[', '2', 'J', '\0' }; ++ const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' }; ++ ++ status_strlen = snprintf(status_string, sizeof(status_string), ++ "%s, ", prgname); ++ status_strlen += snprintf(status_string + status_strlen, ++ sizeof(status_string) - status_strlen, ++ "Worker Threads = %d, ", ++ rte_lcore_count() > 2 ? 2 : 1); ++ status_strlen += snprintf(status_string + status_strlen, ++ sizeof(status_string) - status_strlen, ++ "Copy Mode = %s,\n", copy_mode == COPY_MODE_SW_NUM ? ++ COPY_MODE_SW : COPY_MODE_DMA); ++ status_strlen += snprintf(status_string + status_strlen, ++ sizeof(status_string) - status_strlen, ++ "Updating MAC = %s, ", mac_updating ? ++ "enabled" : "disabled"); ++ status_strlen += snprintf(status_string + status_strlen, ++ sizeof(status_string) - status_strlen, ++ "Rx Queues = %d, ", nb_queues); ++ status_strlen += snprintf(status_string + status_strlen, ++ sizeof(status_string) - status_strlen, ++ "Ring Size = %d", ring_size); ++ ++ memset(&ts, 0, sizeof(struct total_statistics)); ++ ++ while (!force_quit) { ++ /* Sleep for "stats_interval" seconds each round - init sleep allows reading ++ * messages from app startup. ++ */ ++ sleep(stats_interval); ++ ++ /* Clear screen and move to top left */ ++ printf("%s%s", clr, topLeft); ++ ++ memset(&delta_ts, 0, sizeof(struct total_statistics)); ++ ++ printf("%s\n", status_string); ++ ++ for (i = 0; i < cfg.nb_ports; i++) { ++ port_id = cfg.ports[i].rxtx_port; ++ print_port_stats(port_id); ++ ++ delta_ts.total_packets_dropped += ++ port_statistics.tx_dropped[port_id] ++ + port_statistics.copy_dropped[port_id]; ++ delta_ts.total_packets_tx += ++ port_statistics.tx[port_id]; ++ delta_ts.total_packets_rx += ++ port_statistics.rx[port_id]; ++ ++ if (copy_mode == COPY_MODE_DMA_NUM) { ++ uint32_t j; ++ ++ for (j = 0; j < cfg.ports[i].nb_queues; j++) { ++ dev_id = cfg.ports[i].dmadev_ids[j]; ++ rte_dma_stats_get(dev_id, 0, &stats); ++ print_dmadev_stats(dev_id, stats); ++ ++ delta_ts.total_submitted += stats.submitted; ++ delta_ts.total_completed += stats.completed; ++ delta_ts.total_failed += stats.errors; ++ } ++ } ++ } ++ ++ delta_ts.total_packets_tx -= ts.total_packets_tx; ++ delta_ts.total_packets_rx -= ts.total_packets_rx; ++ delta_ts.total_packets_dropped -= ts.total_packets_dropped; ++ delta_ts.total_submitted -= ts.total_submitted; ++ delta_ts.total_completed -= ts.total_completed; ++ delta_ts.total_failed -= ts.total_failed; ++ ++ printf("\n"); ++ print_total_stats(&delta_ts); ++ ++ fflush(stdout); ++ ++ ts.total_packets_tx += delta_ts.total_packets_tx; ++ ts.total_packets_rx += delta_ts.total_packets_rx; ++ ts.total_packets_dropped += delta_ts.total_packets_dropped; ++ ts.total_submitted += delta_ts.total_submitted; ++ ts.total_completed += delta_ts.total_completed; ++ ts.total_failed += delta_ts.total_failed; ++ } ++} ++ ++static void ++update_mac_addrs(struct rte_mbuf *m, uint32_t dest_portid) ++{ ++ struct rte_ether_hdr *eth; ++ void *tmp; ++ ++ eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); ++ ++ /* 02:00:00:00:00:xx - overwriting 2 bytes of source address but ++ * it's acceptable cause it gets overwritten by rte_ether_addr_copy ++ */ ++ tmp = ð->d_addr.addr_bytes[0]; ++ *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40); ++ ++ /* src addr */ ++ rte_ether_addr_copy(&dma_ports_eth_addr[dest_portid], ð->s_addr); ++} ++ ++/* Perform packet copy there is a user-defined function. 8< */ ++static inline void ++pktmbuf_metadata_copy(const struct rte_mbuf *src, struct rte_mbuf *dst) ++{ ++ dst->data_off = src->data_off; ++ memcpy(&dst->rx_descriptor_fields1, &src->rx_descriptor_fields1, ++ offsetof(struct rte_mbuf, buf_len) - ++ offsetof(struct rte_mbuf, rx_descriptor_fields1)); ++} ++ ++/* Copy packet data */ ++static inline void ++pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst) ++{ ++ rte_memcpy(rte_pktmbuf_mtod(dst, char *), ++ rte_pktmbuf_mtod(src, char *), src->data_len); ++} ++/* >8 End of perform packet copy there is a user-defined function. */ ++ ++static uint32_t ++dma_enqueue_packets(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[], ++ uint32_t nb_rx, uint16_t dev_id) ++{ ++ struct dma_bufs *dma = &dma_bufs[dev_id]; ++ int ret; ++ uint32_t i; ++ ++ for (i = 0; i < nb_rx; i++) { ++ /* Perform data copy */ ++ ret = rte_dma_copy(dev_id, 0, ++ rte_pktmbuf_iova(pkts[i]), ++ rte_pktmbuf_iova(pkts_copy[i]), ++ rte_pktmbuf_data_len(pkts[i]), 0); ++ ++ if (ret < 0) ++ break; ++ ++ dma->bufs[ret & MBUF_RING_MASK] = pkts[i]; ++ dma->copies[ret & MBUF_RING_MASK] = pkts_copy[i]; ++ } ++ ++ ret = i; ++ return ret; ++} ++ ++static inline uint32_t ++dma_enqueue(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[], ++ uint32_t num, uint32_t step, uint16_t dev_id) ++{ ++ uint32_t i, k, m, n; ++ ++ k = 0; ++ for (i = 0; i < num; i += m) { ++ ++ m = RTE_MIN(step, num - i); ++ n = dma_enqueue_packets(pkts + i, pkts_copy + i, m, dev_id); ++ k += n; ++ if (n > 0) ++ rte_dma_submit(dev_id, 0); ++ ++ /* don't try to enqueue more if HW queue is full */ ++ if (n != m) ++ break; ++ } ++ ++ return k; ++} ++ ++static inline uint32_t ++dma_dequeue(struct rte_mbuf *src[], struct rte_mbuf *dst[], uint32_t num, ++ uint16_t dev_id) ++{ ++ struct dma_bufs *dma = &dma_bufs[dev_id]; ++ uint16_t nb_dq, filled; ++ /* Dequeue the mbufs from DMA device. Since all memory ++ * is DPDK pinned memory and therefore all addresses should ++ * be valid, we don't check for copy errors ++ */ ++ nb_dq = rte_dma_completed(dev_id, 0, num, NULL, NULL); ++ ++ /* Return early if no work to do */ ++ if (unlikely(nb_dq == 0)) ++ return nb_dq; ++ ++ /* Populate pkts_copy with the copies bufs from dma->copies for tx */ ++ for (filled = 0; filled < nb_dq; filled++) { ++ src[filled] = dma->bufs[(dma->sent + filled) & MBUF_RING_MASK]; ++ dst[filled] = dma->copies[(dma->sent + filled) & MBUF_RING_MASK]; ++ } ++ dma->sent += nb_dq; ++ ++ return filled; ++ ++} ++ ++/* Receive packets on one port and enqueue to dmadev or rte_ring. 8< */ ++static void ++dma_rx_port(struct rxtx_port_config *rx_config) ++{ ++ int32_t ret; ++ uint32_t nb_rx, nb_enq, i, j; ++ struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; ++ struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST]; ++ ++ for (i = 0; i < rx_config->nb_queues; i++) { ++ ++ nb_rx = rte_eth_rx_burst(rx_config->rxtx_port, i, ++ pkts_burst, MAX_PKT_BURST); ++ ++ if (nb_rx == 0) ++ continue; ++ ++ port_statistics.rx[rx_config->rxtx_port] += nb_rx; ++ ++ ret = rte_mempool_get_bulk(dma_pktmbuf_pool, ++ (void *)pkts_burst_copy, nb_rx); ++ ++ if (unlikely(ret < 0)) ++ rte_exit(EXIT_FAILURE, ++ "Unable to allocate memory.\n"); ++ ++ for (j = 0; j < nb_rx; j++) ++ pktmbuf_metadata_copy(pkts_burst[j], ++ pkts_burst_copy[j]); ++ ++ if (copy_mode == COPY_MODE_DMA_NUM) { ++ /* enqueue packets for hardware copy */ ++ nb_enq = dma_enqueue(pkts_burst, pkts_burst_copy, ++ nb_rx, dma_batch_sz, rx_config->dmadev_ids[i]); ++ ++ /* free any not enqueued packets. */ ++ rte_mempool_put_bulk(dma_pktmbuf_pool, ++ (void *)&pkts_burst[nb_enq], ++ nb_rx - nb_enq); ++ rte_mempool_put_bulk(dma_pktmbuf_pool, ++ (void *)&pkts_burst_copy[nb_enq], ++ nb_rx - nb_enq); ++ ++ port_statistics.copy_dropped[rx_config->rxtx_port] += ++ (nb_rx - nb_enq); ++ ++ /* get completed copies */ ++ nb_rx = dma_dequeue(pkts_burst, pkts_burst_copy, ++ MAX_PKT_BURST, rx_config->dmadev_ids[i]); ++ } else { ++ /* Perform packet software copy, free source packets */ ++ for (j = 0; j < nb_rx; j++) ++ pktmbuf_sw_copy(pkts_burst[j], ++ pkts_burst_copy[j]); ++ } ++ ++ rte_mempool_put_bulk(dma_pktmbuf_pool, ++ (void *)pkts_burst, nb_rx); ++ ++ nb_enq = rte_ring_enqueue_burst(rx_config->rx_to_tx_ring, ++ (void *)pkts_burst_copy, nb_rx, NULL); ++ ++ /* Free any not enqueued packets. */ ++ rte_mempool_put_bulk(dma_pktmbuf_pool, ++ (void *)&pkts_burst_copy[nb_enq], ++ nb_rx - nb_enq); ++ ++ port_statistics.copy_dropped[rx_config->rxtx_port] += ++ (nb_rx - nb_enq); ++ } ++} ++/* >8 End of receive packets on one port and enqueue to dmadev or rte_ring. */ ++ ++/* Transmit packets from dmadev/rte_ring for one port. 8< */ ++static void ++dma_tx_port(struct rxtx_port_config *tx_config) ++{ ++ uint32_t i, j, nb_dq, nb_tx; ++ struct rte_mbuf *mbufs[MAX_PKT_BURST]; ++ ++ for (i = 0; i < tx_config->nb_queues; i++) { ++ ++ /* Dequeue the mbufs from rx_to_tx_ring. */ ++ nb_dq = rte_ring_dequeue_burst(tx_config->rx_to_tx_ring, ++ (void *)mbufs, MAX_PKT_BURST, NULL); ++ if (nb_dq == 0) ++ continue; ++ ++ /* Update macs if enabled */ ++ if (mac_updating) { ++ for (j = 0; j < nb_dq; j++) ++ update_mac_addrs(mbufs[j], ++ tx_config->rxtx_port); ++ } ++ ++ nb_tx = rte_eth_tx_burst(tx_config->rxtx_port, 0, ++ (void *)mbufs, nb_dq); ++ ++ port_statistics.tx[tx_config->rxtx_port] += nb_tx; ++ ++ /* Free any unsent packets. */ ++ if (unlikely(nb_tx < nb_dq)) ++ rte_mempool_put_bulk(dma_pktmbuf_pool, ++ (void *)&mbufs[nb_tx], nb_dq - nb_tx); ++ } ++} ++/* >8 End of transmitting packets from dmadev. */ ++ ++/* Main rx processing loop for dmadev. */ ++static void ++rx_main_loop(void) ++{ ++ uint16_t i; ++ uint16_t nb_ports = cfg.nb_ports; ++ ++ RTE_LOG(INFO, DMA, "Entering main rx loop for copy on lcore %u\n", ++ rte_lcore_id()); ++ ++ while (!force_quit) ++ for (i = 0; i < nb_ports; i++) ++ dma_rx_port(&cfg.ports[i]); ++} ++ ++/* Main tx processing loop for hardware copy. */ ++static void ++tx_main_loop(void) ++{ ++ uint16_t i; ++ uint16_t nb_ports = cfg.nb_ports; ++ ++ RTE_LOG(INFO, DMA, "Entering main tx loop for copy on lcore %u\n", ++ rte_lcore_id()); ++ ++ while (!force_quit) ++ for (i = 0; i < nb_ports; i++) ++ dma_tx_port(&cfg.ports[i]); ++} ++ ++/* Main rx and tx loop if only one worker lcore available */ ++static void ++rxtx_main_loop(void) ++{ ++ uint16_t i; ++ uint16_t nb_ports = cfg.nb_ports; ++ ++ RTE_LOG(INFO, DMA, "Entering main rx and tx loop for copy on" ++ " lcore %u\n", rte_lcore_id()); ++ ++ while (!force_quit) ++ for (i = 0; i < nb_ports; i++) { ++ dma_rx_port(&cfg.ports[i]); ++ dma_tx_port(&cfg.ports[i]); ++ } ++} ++ ++/* Start processing for each lcore. 8< */ ++static void start_forwarding_cores(void) ++{ ++ uint32_t lcore_id = rte_lcore_id(); ++ ++ RTE_LOG(INFO, DMA, "Entering %s on lcore %u\n", ++ __func__, rte_lcore_id()); ++ ++ if (cfg.nb_lcores == 1) { ++ lcore_id = rte_get_next_lcore(lcore_id, true, true); ++ rte_eal_remote_launch((lcore_function_t *)rxtx_main_loop, ++ NULL, lcore_id); ++ } else if (cfg.nb_lcores > 1) { ++ lcore_id = rte_get_next_lcore(lcore_id, true, true); ++ rte_eal_remote_launch((lcore_function_t *)rx_main_loop, ++ NULL, lcore_id); ++ ++ lcore_id = rte_get_next_lcore(lcore_id, true, true); ++ rte_eal_remote_launch((lcore_function_t *)tx_main_loop, NULL, ++ lcore_id); ++ } ++} ++/* >8 End of starting to processfor each lcore. */ ++ ++/* Display usage */ ++static void ++dma_usage(const char *prgname) ++{ ++ printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n" ++ " -b --dma-batch-size: number of requests per DMA batch\n" ++ " -f --max-frame-size: max frame size\n" ++ " -p --portmask: hexadecimal bitmask of ports to configure\n" ++ " -q NQ: number of RX queues per port (default is 1)\n" ++ " --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n" ++ " When enabled:\n" ++ " - The source MAC address is replaced by the TX port MAC address\n" ++ " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n" ++ " -c --copy-type CT: type of copy: sw|hw\n" ++ " -s --ring-size RS: size of dmadev descriptor ring for hardware copy mode or rte_ring for software copy mode\n" ++ " -i --stats-interval SI: interval, in seconds, between stats prints (default is 1)\n", ++ prgname); ++} ++ ++static int ++dma_parse_portmask(const char *portmask) ++{ ++ char *end = NULL; ++ unsigned long pm; ++ ++ /* Parse hexadecimal string */ ++ pm = strtoul(portmask, &end, 16); ++ if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) ++ return 0; ++ ++ return pm; ++} ++ ++static copy_mode_t ++dma_parse_copy_mode(const char *copy_mode) ++{ ++ if (strcmp(copy_mode, COPY_MODE_SW) == 0) ++ return COPY_MODE_SW_NUM; ++ else if (strcmp(copy_mode, COPY_MODE_DMA) == 0) ++ return COPY_MODE_DMA_NUM; ++ ++ return COPY_MODE_INVALID_NUM; ++} ++ ++/* Parse the argument given in the command line of the application */ ++static int ++dma_parse_args(int argc, char **argv, unsigned int nb_ports) ++{ ++ static const char short_options[] = ++ "b:" /* dma batch size */ ++ "c:" /* copy type (sw|hw) */ ++ "f:" /* max frame size */ ++ "p:" /* portmask */ ++ "q:" /* number of RX queues per port */ ++ "s:" /* ring size */ ++ "i:" /* interval, in seconds, between stats prints */ ++ ; ++ ++ static const struct option lgopts[] = { ++ {CMD_LINE_OPT_MAC_UPDATING, no_argument, &mac_updating, 1}, ++ {CMD_LINE_OPT_NO_MAC_UPDATING, no_argument, &mac_updating, 0}, ++ {CMD_LINE_OPT_PORTMASK, required_argument, NULL, 'p'}, ++ {CMD_LINE_OPT_NB_QUEUE, required_argument, NULL, 'q'}, ++ {CMD_LINE_OPT_COPY_TYPE, required_argument, NULL, 'c'}, ++ {CMD_LINE_OPT_RING_SIZE, required_argument, NULL, 's'}, ++ {CMD_LINE_OPT_BATCH_SIZE, required_argument, NULL, 'b'}, ++ {CMD_LINE_OPT_FRAME_SIZE, required_argument, NULL, 'f'}, ++ {CMD_LINE_OPT_STATS_INTERVAL, required_argument, NULL, 'i'}, ++ {NULL, 0, 0, 0} ++ }; ++ ++ const unsigned int default_port_mask = (1 << nb_ports) - 1; ++ int opt, ret; ++ char **argvopt; ++ int option_index; ++ char *prgname = argv[0]; ++ ++ dma_enabled_port_mask = default_port_mask; ++ argvopt = argv; ++ ++ while ((opt = getopt_long(argc, argvopt, short_options, ++ lgopts, &option_index)) != EOF) { ++ ++ switch (opt) { ++ case 'b': ++ dma_batch_sz = atoi(optarg); ++ if (dma_batch_sz > MAX_PKT_BURST) { ++ printf("Invalid dma batch size, %s.\n", optarg); ++ dma_usage(prgname); ++ return -1; ++ } ++ break; ++ case 'f': ++ max_frame_size = atoi(optarg); ++ if (max_frame_size > RTE_ETHER_MAX_JUMBO_FRAME_LEN) { ++ printf("Invalid max frame size, %s.\n", optarg); ++ dma_usage(prgname); ++ return -1; ++ } ++ break; ++ ++ /* portmask */ ++ case 'p': ++ dma_enabled_port_mask = dma_parse_portmask(optarg); ++ if (dma_enabled_port_mask & ~default_port_mask || ++ dma_enabled_port_mask <= 0) { ++ printf("Invalid portmask, %s, suggest 0x%x\n", ++ optarg, default_port_mask); ++ dma_usage(prgname); ++ return -1; ++ } ++ break; ++ ++ case 'q': ++ nb_queues = atoi(optarg); ++ if (nb_queues == 0 || nb_queues > MAX_RX_QUEUES_COUNT) { ++ printf("Invalid RX queues number %s. Max %u\n", ++ optarg, MAX_RX_QUEUES_COUNT); ++ dma_usage(prgname); ++ return -1; ++ } ++ break; ++ ++ case 'c': ++ copy_mode = dma_parse_copy_mode(optarg); ++ if (copy_mode == COPY_MODE_INVALID_NUM) { ++ printf("Invalid copy type. Use: sw, hw\n"); ++ dma_usage(prgname); ++ return -1; ++ } ++ break; ++ ++ case 's': ++ ring_size = atoi(optarg); ++ if (ring_size == 0) { ++ printf("Invalid ring size, %s.\n", optarg); ++ dma_usage(prgname); ++ return -1; ++ } ++ /* ring_size must be less-than or equal to MBUF_RING_SIZE ++ * to avoid overwriting bufs ++ */ ++ if (ring_size > MBUF_RING_SIZE) { ++ printf("Max ring_size is %d, setting ring_size to max", ++ MBUF_RING_SIZE); ++ ring_size = MBUF_RING_SIZE; ++ } ++ break; ++ ++ case 'i': ++ stats_interval = atoi(optarg); ++ if (stats_interval == 0) { ++ printf("Invalid stats interval, setting to 1\n"); ++ stats_interval = 1; /* set to default */ ++ } ++ break; ++ ++ /* long options */ ++ case 0: ++ break; ++ ++ default: ++ dma_usage(prgname); ++ return -1; ++ } ++ } ++ ++ printf("MAC updating %s\n", mac_updating ? "enabled" : "disabled"); ++ if (optind >= 0) ++ argv[optind - 1] = prgname; ++ ++ ret = optind - 1; ++ optind = 1; /* reset getopt lib */ ++ return ret; ++} ++ ++/* check link status, return true if at least one port is up */ ++static int ++check_link_status(uint32_t port_mask) ++{ ++ uint16_t portid; ++ struct rte_eth_link link; ++ int ret, link_status = 0; ++ char link_status_text[RTE_ETH_LINK_MAX_STR_LEN]; ++ ++ printf("\nChecking link status\n"); ++ RTE_ETH_FOREACH_DEV(portid) { ++ if ((port_mask & (1 << portid)) == 0) ++ continue; ++ ++ memset(&link, 0, sizeof(link)); ++ ret = rte_eth_link_get(portid, &link); ++ if (ret < 0) { ++ printf("Port %u link get failed: err=%d\n", ++ portid, ret); ++ continue; ++ } ++ ++ /* Print link status */ ++ rte_eth_link_to_str(link_status_text, ++ sizeof(link_status_text), &link); ++ printf("Port %d %s\n", portid, link_status_text); ++ ++ if (link.link_status) ++ link_status = 1; ++ } ++ return link_status; ++} ++ ++/* Configuration of device. 8< */ ++static void ++configure_dmadev_queue(uint32_t dev_id) ++{ ++ struct rte_dma_info info; ++ struct rte_dma_conf dev_config = { .nb_vchans = 1 }; ++ struct rte_dma_vchan_conf qconf = { ++ .direction = RTE_DMA_DIR_MEM_TO_MEM, ++ .nb_desc = ring_size ++ }; ++ uint16_t vchan = 0; ++ ++ if (rte_dma_configure(dev_id, &dev_config) != 0) ++ rte_exit(EXIT_FAILURE, "Error with rte_dma_configure()\n"); ++ ++ if (rte_dma_vchan_setup(dev_id, vchan, &qconf) != 0) { ++ printf("Error with queue configuration\n"); ++ rte_panic(); ++ } ++ rte_dma_info_get(dev_id, &info); ++ if (info.nb_vchans != 1) { ++ printf("Error, no configured queues reported on device id %u\n", dev_id); ++ rte_panic(); ++ } ++ if (rte_dma_start(dev_id) != 0) ++ rte_exit(EXIT_FAILURE, "Error with rte_dma_start()\n"); ++} ++/* >8 End of configuration of device. */ ++ ++/* Using dmadev API functions. 8< */ ++static void ++assign_dmadevs(void) ++{ ++ uint16_t nb_dmadev = 0; ++ int16_t dev_id = rte_dma_next_dev(0); ++ uint32_t i, j; ++ ++ for (i = 0; i < cfg.nb_ports; i++) { ++ for (j = 0; j < cfg.ports[i].nb_queues; j++) { ++ if (dev_id == -1) ++ goto end; ++ ++ cfg.ports[i].dmadev_ids[j] = dev_id; ++ configure_dmadev_queue(cfg.ports[i].dmadev_ids[j]); ++ dev_id = rte_dma_next_dev(dev_id + 1); ++ ++nb_dmadev; ++ } ++ } ++end: ++ if (nb_dmadev < cfg.nb_ports * cfg.ports[0].nb_queues) ++ rte_exit(EXIT_FAILURE, ++ "Not enough dmadevs (%u) for all queues (%u).\n", ++ nb_dmadev, cfg.nb_ports * cfg.ports[0].nb_queues); ++ RTE_LOG(INFO, DMA, "Number of used dmadevs: %u.\n", nb_dmadev); ++} ++/* >8 End of using dmadev API functions. */ ++ ++/* Assign ring structures for packet exchanging. 8< */ ++static void ++assign_rings(void) ++{ ++ uint32_t i; ++ ++ for (i = 0; i < cfg.nb_ports; i++) { ++ char ring_name[RTE_RING_NAMESIZE]; ++ ++ snprintf(ring_name, sizeof(ring_name), "rx_to_tx_ring_%u", i); ++ /* Create ring for inter core communication */ ++ cfg.ports[i].rx_to_tx_ring = rte_ring_create( ++ ring_name, ring_size, ++ rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ); ++ ++ if (cfg.ports[i].rx_to_tx_ring == NULL) ++ rte_exit(EXIT_FAILURE, "Ring create failed: %s\n", ++ rte_strerror(rte_errno)); ++ } ++} ++/* >8 End of assigning ring structures for packet exchanging. */ ++ ++/* ++ * Initializes a given port using global settings and with the RX buffers ++ * coming from the mbuf_pool passed as a parameter. ++ */ ++static inline void ++port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues) ++{ ++ /* Configuring port to use RSS for multiple RX queues. 8< */ ++ static const struct rte_eth_conf port_conf = { ++ .rxmode = { ++ .mq_mode = RTE_ETH_MQ_RX_RSS, ++ }, ++ .rx_adv_conf = { ++ .rss_conf = { ++ .rss_key = NULL, ++ .rss_hf = RTE_ETH_RSS_PROTO_MASK, ++ } ++ } ++ }; ++ /* >8 End of configuring port to use RSS for multiple RX queues. */ ++ ++ struct rte_eth_rxconf rxq_conf; ++ struct rte_eth_txconf txq_conf; ++ struct rte_eth_conf local_port_conf = port_conf; ++ struct rte_eth_dev_info dev_info; ++ int ret, i; ++ ++ if (max_frame_size > local_port_conf.rxmode.mtu) ++ local_port_conf.rxmode.mtu = max_frame_size; ++ ++ /* Skip ports that are not enabled */ ++ if ((dma_enabled_port_mask & (1 << portid)) == 0) { ++ printf("Skipping disabled port %u\n", portid); ++ return; ++ } ++ ++ /* Init port */ ++ printf("Initializing port %u... ", portid); ++ fflush(stdout); ++ ret = rte_eth_dev_info_get(portid, &dev_info); ++ if (ret < 0) ++ rte_exit(EXIT_FAILURE, "Cannot get device info: %s, port=%u\n", ++ rte_strerror(-ret), portid); ++ ++ local_port_conf.rx_adv_conf.rss_conf.rss_hf &= ++ dev_info.flow_type_rss_offloads; ++ ret = rte_eth_dev_configure(portid, nb_queues, 1, &local_port_conf); ++ if (ret < 0) ++ rte_exit(EXIT_FAILURE, "Cannot configure device:" ++ " err=%d, port=%u\n", ret, portid); ++ ++ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, ++ &nb_txd); ++ if (ret < 0) ++ rte_exit(EXIT_FAILURE, ++ "Cannot adjust number of descriptors: err=%d, port=%u\n", ++ ret, portid); ++ ++ rte_eth_macaddr_get(portid, &dma_ports_eth_addr[portid]); ++ ++ /* Init RX queues */ ++ rxq_conf = dev_info.default_rxconf; ++ rxq_conf.offloads = local_port_conf.rxmode.offloads; ++ for (i = 0; i < nb_queues; i++) { ++ ret = rte_eth_rx_queue_setup(portid, i, nb_rxd, ++ rte_eth_dev_socket_id(portid), &rxq_conf, ++ mbuf_pool); ++ if (ret < 0) ++ rte_exit(EXIT_FAILURE, ++ "rte_eth_rx_queue_setup:err=%d,port=%u, queue_id=%u\n", ++ ret, portid, i); ++ } ++ ++ /* Init one TX queue on each port */ ++ txq_conf = dev_info.default_txconf; ++ txq_conf.offloads = local_port_conf.txmode.offloads; ++ ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, ++ rte_eth_dev_socket_id(portid), ++ &txq_conf); ++ if (ret < 0) ++ rte_exit(EXIT_FAILURE, ++ "rte_eth_tx_queue_setup:err=%d,port=%u\n", ++ ret, portid); ++ ++ /* Initialize TX buffers */ ++ tx_buffer[portid] = rte_zmalloc_socket("tx_buffer", ++ RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0, ++ rte_eth_dev_socket_id(portid)); ++ if (tx_buffer[portid] == NULL) ++ rte_exit(EXIT_FAILURE, ++ "Cannot allocate buffer for tx on port %u\n", ++ portid); ++ ++ rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST); ++ ++ ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid], ++ rte_eth_tx_buffer_count_callback, ++ &port_statistics.tx_dropped[portid]); ++ if (ret < 0) ++ rte_exit(EXIT_FAILURE, ++ "Cannot set error callback for tx buffer on port %u\n", ++ portid); ++ ++ /* Start device. 8< */ ++ ret = rte_eth_dev_start(portid); ++ if (ret < 0) ++ rte_exit(EXIT_FAILURE, ++ "rte_eth_dev_start:err=%d, port=%u\n", ++ ret, portid); ++ /* >8 End of starting device. */ ++ ++ /* RX port is set in promiscuous mode. 8< */ ++ rte_eth_promiscuous_enable(portid); ++ /* >8 End of RX port is set in promiscuous mode. */ ++ ++ printf("Port %u, MAC address: " RTE_ETHER_ADDR_PRT_FMT "\n\n", ++ portid, ++ RTE_ETHER_ADDR_BYTES(&dma_ports_eth_addr[portid])); ++ ++ cfg.ports[cfg.nb_ports].rxtx_port = portid; ++ cfg.ports[cfg.nb_ports++].nb_queues = nb_queues; ++} ++ ++/* Get a device dump for each device being used by the application */ ++static void ++dmadev_dump(void) ++{ ++ uint32_t i, j; ++ ++ if (copy_mode != COPY_MODE_DMA_NUM) ++ return; ++ ++ for (i = 0; i < cfg.nb_ports; i++) ++ for (j = 0; j < cfg.ports[i].nb_queues; j++) ++ rte_dma_dump(cfg.ports[i].dmadev_ids[j], stdout); ++} ++ ++static void ++signal_handler(int signum) ++{ ++ if (signum == SIGINT || signum == SIGTERM) { ++ printf("\n\nSignal %d received, preparing to exit...\n", ++ signum); ++ force_quit = true; ++ } else if (signum == SIGUSR1) { ++ dmadev_dump(); ++ } ++} ++ ++int ++main(int argc, char **argv) ++{ ++ int ret; ++ uint16_t nb_ports, portid; ++ uint32_t i; ++ unsigned int nb_mbufs; ++ size_t sz; ++ ++ /* Init EAL. 8< */ ++ ret = rte_eal_init(argc, argv); ++ if (ret < 0) ++ rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n"); ++ /* >8 End of init EAL. */ ++ argc -= ret; ++ argv += ret; ++ ++ force_quit = false; ++ signal(SIGINT, signal_handler); ++ signal(SIGTERM, signal_handler); ++ signal(SIGUSR1, signal_handler); ++ ++ nb_ports = rte_eth_dev_count_avail(); ++ if (nb_ports == 0) ++ rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); ++ ++ /* Parse application arguments (after the EAL ones) */ ++ ret = dma_parse_args(argc, argv, nb_ports); ++ if (ret < 0) ++ rte_exit(EXIT_FAILURE, "Invalid DMA arguments\n"); ++ ++ /* Allocates mempool to hold the mbufs. 8< */ ++ nb_mbufs = RTE_MAX(nb_ports * (nb_queues * (nb_rxd + nb_txd + ++ 4 * MAX_PKT_BURST + ring_size) + ring_size + ++ rte_lcore_count() * MEMPOOL_CACHE_SIZE), ++ MIN_POOL_SIZE); ++ ++ /* Create the mbuf pool */ ++ sz = max_frame_size + RTE_PKTMBUF_HEADROOM; ++ sz = RTE_MAX(sz, (size_t)RTE_MBUF_DEFAULT_BUF_SIZE); ++ dma_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", nb_mbufs, ++ MEMPOOL_CACHE_SIZE, 0, sz, rte_socket_id()); ++ if (dma_pktmbuf_pool == NULL) ++ rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n"); ++ /* >8 End of allocates mempool to hold the mbufs. */ ++ ++ /* Initialize each port. 8< */ ++ cfg.nb_ports = 0; ++ RTE_ETH_FOREACH_DEV(portid) ++ port_init(portid, dma_pktmbuf_pool, nb_queues); ++ /* >8 End of initializing each port. */ ++ ++ /* Initialize port xstats */ ++ memset(&port_statistics, 0, sizeof(port_statistics)); ++ ++ /* Assigning each port resources. 8< */ ++ while (!check_link_status(dma_enabled_port_mask) && !force_quit) ++ sleep(1); ++ ++ /* Check if there is enough lcores for all ports. */ ++ cfg.nb_lcores = rte_lcore_count() - 1; ++ if (cfg.nb_lcores < 1) ++ rte_exit(EXIT_FAILURE, ++ "There should be at least one worker lcore.\n"); ++ ++ if (copy_mode == COPY_MODE_DMA_NUM) ++ assign_dmadevs(); ++ ++ assign_rings(); ++ /* >8 End of assigning each port resources. */ ++ ++ start_forwarding_cores(); ++ /* main core prints stats while other cores forward */ ++ print_stats(argv[0]); ++ ++ /* force_quit is true when we get here */ ++ rte_eal_mp_wait_lcore(); ++ ++ uint32_t j; ++ for (i = 0; i < cfg.nb_ports; i++) { ++ printf("Closing port %d\n", cfg.ports[i].rxtx_port); ++ ret = rte_eth_dev_stop(cfg.ports[i].rxtx_port); ++ if (ret != 0) ++ RTE_LOG(ERR, DMA, "rte_eth_dev_stop: err=%s, port=%u\n", ++ rte_strerror(-ret), cfg.ports[i].rxtx_port); ++ ++ rte_eth_dev_close(cfg.ports[i].rxtx_port); ++ if (copy_mode == COPY_MODE_DMA_NUM) { ++ for (j = 0; j < cfg.ports[i].nb_queues; j++) { ++ printf("Stopping dmadev %d\n", ++ cfg.ports[i].dmadev_ids[j]); ++ rte_dma_stop(cfg.ports[i].dmadev_ids[j]); ++ } ++ } else /* copy_mode == COPY_MODE_SW_NUM */ ++ rte_ring_free(cfg.ports[i].rx_to_tx_ring); ++ } ++ ++ /* clean up the EAL */ ++ rte_eal_cleanup(); ++ ++ printf("Bye...\n"); ++ return 0; ++} +diff --git a/examples/dma/meson.build b/examples/dma/meson.build +new file mode 100644 +index 000000000..f70b5d349 +--- /dev/null ++++ b/examples/dma/meson.build +@@ -0,0 +1,15 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright(c) 2019-2021 Intel Corporation ++ ++# meson file, for building this example as part of a main DPDK build. ++# ++# To build this example as a standalone application with an already-installed ++# DPDK instance, use 'make' ++ ++allow_experimental_apis = true ++ ++deps += 'dmadev' ++ ++sources = files( ++ 'dmafwd.c' ++) +diff --git a/examples/meson.build b/examples/meson.build +index 46ec80919..6c57db163 100644 +--- a/examples/meson.build ++++ b/examples/meson.build +@@ -12,7 +12,7 @@ execinfo = cc.find_library('execinfo', required: false) + all_examples = [ + 'bbdev_app', 'bond', + 'cmdline', +- 'distributor', 'ethtool', ++ 'distributor', 'dma', 'ethtool', + 'eventdev_pipeline', + 'fips_validation', 'flow_classify', + 'flow_filtering', 'helloworld', +diff --git a/lib/librte_dmadev/meson.build b/lib/librte_dmadev/meson.build +new file mode 100644 +index 000000000..8d2ed5261 +--- /dev/null ++++ b/lib/librte_dmadev/meson.build +@@ -0,0 +1,5 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright(c) 2021 HiSilicon Limited. ++ ++sources = files('rte_dmadev.c') ++headers = files('rte_dmadev.h', 'rte_dmadev_core.h', 'rte_dmadev_pmd.h') +diff --git a/lib/librte_dmadev/rte_dmadev.c b/lib/librte_dmadev/rte_dmadev.c +new file mode 100644 +index 000000000..7097fe41a +--- /dev/null ++++ b/lib/librte_dmadev/rte_dmadev.c +@@ -0,0 +1,866 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(c) 2021 HiSilicon Limited ++ * Copyright(c) 2021 Intel Corporation ++ */ ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "rte_dmadev.h" ++#include "rte_dmadev_pmd.h" ++ ++static int16_t dma_devices_max; ++ ++struct rte_dma_fp_object *rte_dma_fp_objs; ++static struct rte_dma_dev *rte_dma_devices; ++static struct { ++ /* Hold the dev_max information of the primary process. This field is ++ * set by the primary process and is read by the secondary process. ++ */ ++ int16_t dev_max; ++ struct rte_dma_dev_data data[0]; ++} *dma_devices_shared_data; ++ ++RTE_LOG_REGISTER(rte_dma_logtype, lib.dma, INFO); ++#define RTE_DMA_LOG(level, ...) \ ++ rte_log(RTE_LOG_ ## level, rte_dma_logtype, RTE_FMT("dma: " \ ++ RTE_FMT_HEAD(__VA_ARGS__,) "\n", RTE_FMT_TAIL(__VA_ARGS__,))) ++ ++int ++rte_dma_dev_max(size_t dev_max) ++{ ++ /* This function may be called before rte_eal_init(), so no rte library ++ * function can be called in this function. ++ */ ++ if (dev_max == 0 || dev_max > INT16_MAX) ++ return -EINVAL; ++ ++ if (dma_devices_max > 0) ++ return -EINVAL; ++ ++ dma_devices_max = dev_max; ++ ++ return 0; ++} ++ ++int16_t ++rte_dma_next_dev(int16_t start_dev_id) ++{ ++ int16_t dev_id = start_dev_id; ++ while (dev_id < dma_devices_max && rte_dma_devices[dev_id].state == RTE_DMA_DEV_UNUSED) ++ dev_id++; ++ ++ if (dev_id < dma_devices_max) ++ return dev_id; ++ ++ return -1; ++} ++ ++static int ++dma_check_name(const char *name) ++{ ++ size_t name_len; ++ ++ if (name == NULL) { ++ RTE_DMA_LOG(ERR, "Name can't be NULL"); ++ return -EINVAL; ++ } ++ ++ name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN); ++ if (name_len == 0) { ++ RTE_DMA_LOG(ERR, "Zero length DMA device name"); ++ return -EINVAL; ++ } ++ if (name_len >= RTE_DEV_NAME_MAX_LEN) { ++ RTE_DMA_LOG(ERR, "DMA device name is too long"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int16_t ++dma_find_free_id(void) ++{ ++ int16_t i; ++ ++ if (rte_dma_devices == NULL || dma_devices_shared_data == NULL) ++ return -1; ++ ++ for (i = 0; i < dma_devices_max; i++) { ++ if (dma_devices_shared_data->data[i].dev_name[0] == '\0') ++ return i; ++ } ++ ++ return -1; ++} ++ ++static struct rte_dma_dev* ++dma_find_by_name(const char *name) ++{ ++ int16_t i; ++ ++ if (rte_dma_devices == NULL) ++ return NULL; ++ ++ for (i = 0; i < dma_devices_max; i++) { ++ if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) && ++ (!strcmp(name, rte_dma_devices[i].data->dev_name))) ++ return &rte_dma_devices[i]; ++ } ++ ++ return NULL; ++} ++ ++static void dma_fp_object_dummy(struct rte_dma_fp_object *obj); ++ ++static int ++dma_fp_data_prepare(void) ++{ ++ size_t size; ++ void *ptr; ++ int i; ++ ++ if (rte_dma_fp_objs != NULL) ++ return 0; ++ ++ /* Fast-path object must align cacheline, but the return value of malloc ++ * may not be aligned to the cache line. Therefore, extra memory is ++ * applied for realignment. ++ * note: We do not call posix_memalign/aligned_alloc because it is ++ * version dependent on libc. ++ */ ++ size = dma_devices_max * sizeof(struct rte_dma_fp_object) + ++ RTE_CACHE_LINE_SIZE; ++ ptr = malloc(size); ++ if (ptr == NULL) ++ return -ENOMEM; ++ memset(ptr, 0, size); ++ ++ rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE); ++ for (i = 0; i < dma_devices_max; i++) ++ dma_fp_object_dummy(&rte_dma_fp_objs[i]); ++ ++ return 0; ++} ++ ++static int ++dma_dev_data_prepare(void) ++{ ++ size_t size; ++ ++ if (rte_dma_devices != NULL) ++ return 0; ++ ++ size = dma_devices_max * sizeof(struct rte_dma_dev); ++ rte_dma_devices = malloc(size); ++ if (rte_dma_devices == NULL) ++ return -ENOMEM; ++ memset(rte_dma_devices, 0, size); ++ ++ return 0; ++} ++ ++static int ++dma_shared_data_prepare(void) ++{ ++ const char *mz_name = "rte_dma_dev_data"; ++ const struct rte_memzone *mz; ++ size_t size; ++ ++ if (dma_devices_shared_data != NULL) ++ return 0; ++ ++ size = sizeof(*dma_devices_shared_data) + ++ sizeof(struct rte_dma_dev_data) * dma_devices_max; ++ ++ if (rte_eal_process_type() == RTE_PROC_PRIMARY) ++ mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0); ++ else ++ mz = rte_memzone_lookup(mz_name); ++ if (mz == NULL) ++ return -ENOMEM; ++ ++ dma_devices_shared_data = mz->addr; ++ if (rte_eal_process_type() == RTE_PROC_PRIMARY) { ++ memset(dma_devices_shared_data, 0, size); ++ dma_devices_shared_data->dev_max = dma_devices_max; ++ } else { ++ dma_devices_max = dma_devices_shared_data->dev_max; ++ } ++ ++ return 0; ++} ++ ++static int ++dma_data_prepare(void) ++{ ++ int ret; ++ ++ if (rte_eal_process_type() == RTE_PROC_PRIMARY) { ++ if (dma_devices_max == 0) ++ dma_devices_max = RTE_DMADEV_DEFAULT_MAX; ++ ret = dma_fp_data_prepare(); ++ if (ret) ++ return ret; ++ ret = dma_dev_data_prepare(); ++ if (ret) ++ return ret; ++ ret = dma_shared_data_prepare(); ++ if (ret) ++ return ret; ++ } else { ++ ret = dma_shared_data_prepare(); ++ if (ret) ++ return ret; ++ ret = dma_fp_data_prepare(); ++ if (ret) ++ return ret; ++ ret = dma_dev_data_prepare(); ++ if (ret) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static struct rte_dma_dev * ++dma_allocate_primary(const char *name, int numa_node, size_t private_data_size) ++{ ++ struct rte_dma_dev *dev; ++ void *dev_private; ++ int16_t dev_id; ++ int ret; ++ ++ ret = dma_data_prepare(); ++ if (ret < 0) { ++ RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data"); ++ return NULL; ++ } ++ ++ dev = dma_find_by_name(name); ++ if (dev != NULL) { ++ RTE_DMA_LOG(ERR, "DMA device already allocated"); ++ return NULL; ++ } ++ ++ dev_private = rte_zmalloc_socket(name, private_data_size, ++ RTE_CACHE_LINE_SIZE, numa_node); ++ if (dev_private == NULL) { ++ RTE_DMA_LOG(ERR, "Cannot allocate private data"); ++ return NULL; ++ } ++ ++ dev_id = dma_find_free_id(); ++ if (dev_id < 0) { ++ RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices"); ++ rte_free(dev_private); ++ return NULL; ++ } ++ ++ dev = &rte_dma_devices[dev_id]; ++ dev->data = &dma_devices_shared_data->data[dev_id]; ++ rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name)); ++ dev->data->dev_id = dev_id; ++ dev->data->numa_node = numa_node; ++ dev->data->dev_private = dev_private; ++ ++ return dev; ++} ++ ++static struct rte_dma_dev * ++dma_attach_secondary(const char *name) ++{ ++ struct rte_dma_dev *dev; ++ int16_t i; ++ int ret; ++ ++ ret = dma_data_prepare(); ++ if (ret < 0) { ++ RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data"); ++ return NULL; ++ } ++ ++ for (i = 0; i < dma_devices_max; i++) { ++ if (!strcmp(dma_devices_shared_data->data[i].dev_name, name)) ++ break; ++ } ++ if (i == dma_devices_max) { ++ RTE_DMA_LOG(ERR, ++ "Device %s is not driven by the primary process", ++ name); ++ return NULL; ++ } ++ ++ dev = &rte_dma_devices[i]; ++ dev->data = &dma_devices_shared_data->data[i]; ++ ++ return dev; ++} ++ ++static struct rte_dma_dev * ++dma_allocate(const char *name, int numa_node, size_t private_data_size) ++{ ++ struct rte_dma_dev *dev; ++ ++ if (rte_eal_process_type() == RTE_PROC_PRIMARY) ++ dev = dma_allocate_primary(name, numa_node, private_data_size); ++ else ++ dev = dma_attach_secondary(name); ++ ++ if (dev) { ++ dev->fp_obj = &rte_dma_fp_objs[dev->data->dev_id]; ++ dma_fp_object_dummy(dev->fp_obj); ++ } ++ ++ return dev; ++} ++ ++static void ++dma_release(struct rte_dma_dev *dev) ++{ ++ if (rte_eal_process_type() == RTE_PROC_PRIMARY) { ++ rte_free(dev->data->dev_private); ++ memset(dev->data, 0, sizeof(struct rte_dma_dev_data)); ++ } ++ ++ dma_fp_object_dummy(dev->fp_obj); ++ memset(dev, 0, sizeof(struct rte_dma_dev)); ++} ++ ++struct rte_dma_dev * ++rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size) ++{ ++ struct rte_dma_dev *dev; ++ ++ if (dma_check_name(name) != 0 || private_data_size == 0) ++ return NULL; ++ ++ dev = dma_allocate(name, numa_node, private_data_size); ++ if (dev == NULL) ++ return NULL; ++ ++ dev->state = RTE_DMA_DEV_REGISTERED; ++ ++ return dev; ++} ++ ++int ++rte_dma_pmd_release(const char *name) ++{ ++ struct rte_dma_dev *dev; ++ ++ if (dma_check_name(name) != 0) ++ return -EINVAL; ++ ++ dev = dma_find_by_name(name); ++ if (dev == NULL) ++ return -EINVAL; ++ ++ if (dev->state == RTE_DMA_DEV_READY) ++ return rte_dma_close(dev->data->dev_id); ++ ++ dma_release(dev); ++ return 0; ++} ++ ++int ++rte_dma_get_dev_id_by_name(const char *name) ++{ ++ struct rte_dma_dev *dev; ++ ++ if (dma_check_name(name) != 0) ++ return -EINVAL; ++ ++ dev = dma_find_by_name(name); ++ if (dev == NULL) ++ return -EINVAL; ++ ++ return dev->data->dev_id; ++} ++ ++bool ++rte_dma_is_valid(int16_t dev_id) ++{ ++ return (dev_id >= 0) && (dev_id < dma_devices_max) && ++ rte_dma_devices != NULL && ++ rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED; ++} ++ ++uint16_t ++rte_dma_count_avail(void) ++{ ++ uint16_t count = 0; ++ uint16_t i; ++ ++ if (rte_dma_devices == NULL) ++ return count; ++ ++ for (i = 0; i < dma_devices_max; i++) { ++ if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) ++ count++; ++ } ++ ++ return count; ++} ++ ++int ++rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info) ++{ ++ const struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; ++ int ret; ++ ++ if (!rte_dma_is_valid(dev_id) || dev_info == NULL) ++ return -EINVAL; ++ ++ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP); ++ memset(dev_info, 0, sizeof(struct rte_dma_info)); ++ ret = (*dev->dev_ops->dev_info_get)(dev, dev_info, ++ sizeof(struct rte_dma_info)); ++ if (ret != 0) ++ return ret; ++ ++ dev_info->dev_name = dev->data->dev_name; ++ dev_info->numa_node = dev->device->numa_node; ++ dev_info->nb_vchans = dev->data->dev_conf.nb_vchans; ++ ++ return 0; ++} ++ ++int ++rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf) ++{ ++ struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; ++ struct rte_dma_info dev_info; ++ int ret; ++ ++ if (!rte_dma_is_valid(dev_id) || dev_conf == NULL) ++ return -EINVAL; ++ ++ if (dev->data->dev_started != 0) { ++ RTE_DMA_LOG(ERR, ++ "Device %d must be stopped to allow configuration", ++ dev_id); ++ return -EBUSY; ++ } ++ ++ ret = rte_dma_info_get(dev_id, &dev_info); ++ if (ret != 0) { ++ RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id); ++ return -EINVAL; ++ } ++ if (dev_conf->nb_vchans == 0) { ++ RTE_DMA_LOG(ERR, ++ "Device %d configure zero vchans", dev_id); ++ return -EINVAL; ++ } ++ if (dev_conf->nb_vchans > dev_info.max_vchans) { ++ RTE_DMA_LOG(ERR, ++ "Device %d configure too many vchans", dev_id); ++ return -EINVAL; ++ } ++ if (dev_conf->enable_silent && ++ !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) { ++ RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id); ++ return -EINVAL; ++ } ++ ++ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); ++ ret = (*dev->dev_ops->dev_configure)(dev, dev_conf, ++ sizeof(struct rte_dma_conf)); ++ if (ret == 0) ++ memcpy(&dev->data->dev_conf, dev_conf, ++ sizeof(struct rte_dma_conf)); ++ ++ return ret; ++} ++ ++int ++rte_dma_start(int16_t dev_id) ++{ ++ struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; ++ int ret; ++ ++ if (!rte_dma_is_valid(dev_id)) ++ return -EINVAL; ++ ++ if (dev->data->dev_conf.nb_vchans == 0) { ++ RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id); ++ return -EINVAL; ++ } ++ ++ if (dev->data->dev_started != 0) { ++ RTE_DMA_LOG(WARNING, "Device %d already started", dev_id); ++ return 0; ++ } ++ ++ if (dev->dev_ops->dev_start == NULL) ++ goto mark_started; ++ ++ ret = (*dev->dev_ops->dev_start)(dev); ++ if (ret != 0) ++ return ret; ++ ++mark_started: ++ dev->data->dev_started = 1; ++ return 0; ++} ++ ++int ++rte_dma_stop(int16_t dev_id) ++{ ++ struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; ++ int ret; ++ ++ if (!rte_dma_is_valid(dev_id)) ++ return -EINVAL; ++ ++ if (dev->data->dev_started == 0) { ++ RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id); ++ return 0; ++ } ++ ++ if (dev->dev_ops->dev_stop == NULL) ++ goto mark_stopped; ++ ++ ret = (*dev->dev_ops->dev_stop)(dev); ++ if (ret != 0) ++ return ret; ++ ++mark_stopped: ++ dev->data->dev_started = 0; ++ return 0; ++} ++ ++int ++rte_dma_close(int16_t dev_id) ++{ ++ struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; ++ int ret; ++ ++ if (!rte_dma_is_valid(dev_id)) ++ return -EINVAL; ++ ++ /* Device must be stopped before it can be closed */ ++ if (dev->data->dev_started == 1) { ++ RTE_DMA_LOG(ERR, ++ "Device %d must be stopped before closing", dev_id); ++ return -EBUSY; ++ } ++ ++ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); ++ ret = (*dev->dev_ops->dev_close)(dev); ++ if (ret == 0) ++ dma_release(dev); ++ ++ return ret; ++} ++ ++int ++rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan, ++ const struct rte_dma_vchan_conf *conf) ++{ ++ struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; ++ struct rte_dma_info dev_info; ++ bool src_is_dev, dst_is_dev; ++ int ret; ++ ++ if (!rte_dma_is_valid(dev_id) || conf == NULL) ++ return -EINVAL; ++ ++ if (dev->data->dev_started != 0) { ++ RTE_DMA_LOG(ERR, ++ "Device %d must be stopped to allow configuration", ++ dev_id); ++ return -EBUSY; ++ } ++ ++ ret = rte_dma_info_get(dev_id, &dev_info); ++ if (ret != 0) { ++ RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id); ++ return -EINVAL; ++ } ++ if (dev->data->dev_conf.nb_vchans == 0) { ++ RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id); ++ return -EINVAL; ++ } ++ if (vchan >= dev_info.nb_vchans) { ++ RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id); ++ return -EINVAL; ++ } ++ if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM && ++ conf->direction != RTE_DMA_DIR_MEM_TO_DEV && ++ conf->direction != RTE_DMA_DIR_DEV_TO_MEM && ++ conf->direction != RTE_DMA_DIR_DEV_TO_DEV) { ++ RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id); ++ return -EINVAL; ++ } ++ if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM && ++ !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) { ++ RTE_DMA_LOG(ERR, ++ "Device %d don't support mem2mem transfer", dev_id); ++ return -EINVAL; ++ } ++ if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV && ++ !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) { ++ RTE_DMA_LOG(ERR, ++ "Device %d don't support mem2dev transfer", dev_id); ++ return -EINVAL; ++ } ++ if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM && ++ !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) { ++ RTE_DMA_LOG(ERR, ++ "Device %d don't support dev2mem transfer", dev_id); ++ return -EINVAL; ++ } ++ if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV && ++ !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) { ++ RTE_DMA_LOG(ERR, ++ "Device %d don't support dev2dev transfer", dev_id); ++ return -EINVAL; ++ } ++ if (conf->nb_desc < dev_info.min_desc || ++ conf->nb_desc > dev_info.max_desc) { ++ RTE_DMA_LOG(ERR, ++ "Device %d number of descriptors invalid", dev_id); ++ return -EINVAL; ++ } ++ src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM || ++ conf->direction == RTE_DMA_DIR_DEV_TO_DEV; ++ if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) || ++ (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) { ++ RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id); ++ return -EINVAL; ++ } ++ dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV || ++ conf->direction == RTE_DMA_DIR_DEV_TO_DEV; ++ if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) || ++ (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) { ++ RTE_DMA_LOG(ERR, ++ "Device %d destination port type invalid", dev_id); ++ return -EINVAL; ++ } ++ ++ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP); ++ return (*dev->dev_ops->vchan_setup)(dev, vchan, conf, ++ sizeof(struct rte_dma_vchan_conf)); ++} ++ ++int ++rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats) ++{ ++ const struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; ++ ++ if (!rte_dma_is_valid(dev_id) || stats == NULL) ++ return -EINVAL; ++ ++ if (vchan >= dev->data->dev_conf.nb_vchans && ++ vchan != RTE_DMA_ALL_VCHAN) { ++ RTE_DMA_LOG(ERR, ++ "Device %d vchan %u out of range", dev_id, vchan); ++ return -EINVAL; ++ } ++ ++ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); ++ memset(stats, 0, sizeof(struct rte_dma_stats)); ++ return (*dev->dev_ops->stats_get)(dev, vchan, stats, ++ sizeof(struct rte_dma_stats)); ++} ++ ++int ++rte_dma_stats_reset(int16_t dev_id, uint16_t vchan) ++{ ++ struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; ++ ++ if (!rte_dma_is_valid(dev_id)) ++ return -EINVAL; ++ ++ if (vchan >= dev->data->dev_conf.nb_vchans && ++ vchan != RTE_DMA_ALL_VCHAN) { ++ RTE_DMA_LOG(ERR, ++ "Device %d vchan %u out of range", dev_id, vchan); ++ return -EINVAL; ++ } ++ ++ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); ++ return (*dev->dev_ops->stats_reset)(dev, vchan); ++} ++ ++int ++rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status) ++{ ++ struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; ++ ++ if (!rte_dma_is_valid(dev_id)) ++ return -EINVAL; ++ ++ if (vchan >= dev->data->dev_conf.nb_vchans) { ++ RTE_DMA_LOG(ERR, "Device %u vchan %u out of range\n", dev_id, vchan); ++ return -EINVAL; ++ } ++ ++ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_status, -ENOTSUP); ++ return (*dev->dev_ops->vchan_status)(dev, vchan, status); ++} ++ ++static const char * ++dma_capability_name(uint64_t capability) ++{ ++ static const struct { ++ uint64_t capability; ++ const char *name; ++ } capa_names[] = { ++ { RTE_DMA_CAPA_MEM_TO_MEM, "mem2mem" }, ++ { RTE_DMA_CAPA_MEM_TO_DEV, "mem2dev" }, ++ { RTE_DMA_CAPA_DEV_TO_MEM, "dev2mem" }, ++ { RTE_DMA_CAPA_DEV_TO_DEV, "dev2dev" }, ++ { RTE_DMA_CAPA_SVA, "sva" }, ++ { RTE_DMA_CAPA_SILENT, "silent" }, ++ { RTE_DMA_CAPA_HANDLES_ERRORS, "handles_errors" }, ++ { RTE_DMA_CAPA_OPS_COPY, "copy" }, ++ { RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" }, ++ { RTE_DMA_CAPA_OPS_FILL, "fill" }, ++ }; ++ ++ const char *name = "unknown"; ++ uint32_t i; ++ ++ for (i = 0; i < RTE_DIM(capa_names); i++) { ++ if (capability == capa_names[i].capability) { ++ name = capa_names[i].name; ++ break; ++ } ++ } ++ ++ return name; ++} ++ ++static void ++dma_dump_capability(FILE *f, uint64_t dev_capa) ++{ ++ uint64_t capa; ++ ++ (void)fprintf(f, " dev_capa: 0x%" PRIx64 " -", dev_capa); ++ while (dev_capa > 0) { ++ capa = 1ull << __builtin_ctzll(dev_capa); ++ (void)fprintf(f, " %s", dma_capability_name(capa)); ++ dev_capa &= ~capa; ++ } ++ (void)fprintf(f, "\n"); ++} ++ ++int ++rte_dma_dump(int16_t dev_id, FILE *f) ++{ ++ const struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; ++ struct rte_dma_info dev_info; ++ int ret; ++ ++ if (!rte_dma_is_valid(dev_id) || f == NULL) ++ return -EINVAL; ++ ++ ret = rte_dma_info_get(dev_id, &dev_info); ++ if (ret != 0) { ++ RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id); ++ return -EINVAL; ++ } ++ ++ (void)fprintf(f, "DMA Dev %d, '%s' [%s]\n", ++ dev->data->dev_id, ++ dev->data->dev_name, ++ dev->data->dev_started ? "started" : "stopped"); ++ dma_dump_capability(f, dev_info.dev_capa); ++ (void)fprintf(f, " max_vchans_supported: %u\n", dev_info.max_vchans); ++ (void)fprintf(f, " nb_vchans_configured: %u\n", dev_info.nb_vchans); ++ (void)fprintf(f, " silent_mode: %s\n", ++ dev->data->dev_conf.enable_silent ? "on" : "off"); ++ ++ if (dev->dev_ops->dev_dump != NULL) ++ return (*dev->dev_ops->dev_dump)(dev, f); ++ ++ return 0; ++} ++ ++static int ++dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan, ++ __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst, ++ __rte_unused uint32_t length, __rte_unused uint64_t flags) ++{ ++ RTE_DMA_LOG(ERR, "copy is not configured or not supported."); ++ return -EINVAL; ++} ++ ++static int ++dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan, ++ __rte_unused const struct rte_dma_sge *src, ++ __rte_unused const struct rte_dma_sge *dst, ++ __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst, ++ __rte_unused uint64_t flags) ++{ ++ RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported."); ++ return -EINVAL; ++} ++ ++static int ++dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan, ++ __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst, ++ __rte_unused uint32_t length, __rte_unused uint64_t flags) ++{ ++ RTE_DMA_LOG(ERR, "fill is not configured or not supported."); ++ return -EINVAL; ++} ++ ++static int ++dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan) ++{ ++ RTE_DMA_LOG(ERR, "submit is not configured or not supported."); ++ return -EINVAL; ++} ++ ++static uint16_t ++dummy_completed(__rte_unused void *dev_private, __rte_unused uint16_t vchan, ++ __rte_unused const uint16_t nb_cpls, ++ __rte_unused uint16_t *last_idx, __rte_unused bool *has_error) ++{ ++ RTE_DMA_LOG(ERR, "completed is not configured or not supported."); ++ return 0; ++} ++ ++static uint16_t ++dummy_completed_status(__rte_unused void *dev_private, ++ __rte_unused uint16_t vchan, ++ __rte_unused const uint16_t nb_cpls, ++ __rte_unused uint16_t *last_idx, ++ __rte_unused enum rte_dma_status_code *status) ++{ ++ RTE_DMA_LOG(ERR, ++ "completed_status is not configured or not supported."); ++ return 0; ++} ++ ++static uint16_t ++dummy_burst_capacity(__rte_unused const void *dev_private, ++ __rte_unused uint16_t vchan) ++{ ++ RTE_DMA_LOG(ERR, "burst_capacity is not configured or not supported."); ++ return 0; ++} ++ ++static void ++dma_fp_object_dummy(struct rte_dma_fp_object *obj) ++{ ++ obj->dev_private = NULL; ++ obj->copy = dummy_copy; ++ obj->copy_sg = dummy_copy_sg; ++ obj->fill = dummy_fill; ++ obj->submit = dummy_submit; ++ obj->completed = dummy_completed; ++ obj->completed_status = dummy_completed_status; ++ obj->burst_capacity = dummy_burst_capacity; ++} +diff --git a/lib/librte_dmadev/rte_dmadev.h b/lib/librte_dmadev/rte_dmadev.h +new file mode 100644 +index 000000000..9942c6ec2 +--- /dev/null ++++ b/lib/librte_dmadev/rte_dmadev.h +@@ -0,0 +1,1138 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(c) 2021 HiSilicon Limited ++ * Copyright(c) 2021 Intel Corporation ++ * Copyright(c) 2021 Marvell International Ltd ++ * Copyright(c) 2021 SmartShare Systems ++ */ ++ ++#ifndef RTE_DMADEV_H ++#define RTE_DMADEV_H ++ ++/** ++ * @file rte_dmadev.h ++ * ++ * DMA (Direct Memory Access) device API. ++ * ++ * The DMA framework is built on the following model: ++ * ++ * --------------- --------------- --------------- ++ * | virtual DMA | | virtual DMA | | virtual DMA | ++ * | channel | | channel | | channel | ++ * --------------- --------------- --------------- ++ * | | | ++ * ------------------ | ++ * | | ++ * ------------ ------------ ++ * | dmadev | | dmadev | ++ * ------------ ------------ ++ * | | ++ * ------------------ ------------------ ++ * | HW DMA channel | | HW DMA channel | ++ * ------------------ ------------------ ++ * | | ++ * -------------------------------- ++ * | ++ * --------------------- ++ * | HW DMA Controller | ++ * --------------------- ++ * ++ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues), ++ * each HW-DMA-channel should be represented by a dmadev. ++ * ++ * The dmadev could create multiple virtual DMA channels, each virtual DMA ++ * channel represents a different transfer context. The DMA operation request ++ * must be submitted to the virtual DMA channel. e.g. Application could create ++ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create ++ * virtual DMA channel 1 for memory-to-device transfer scenario. ++ * ++ * This framework uses 'int16_t dev_id' as the device identifier of a dmadev, ++ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev. ++ * ++ * The functions exported by the dmadev API to setup a device designated by its ++ * device identifier must be invoked in the following order: ++ * - rte_dma_configure() ++ * - rte_dma_vchan_setup() ++ * - rte_dma_start() ++ * ++ * Then, the application can invoke dataplane functions to process jobs. ++ * ++ * If the application wants to change the configuration (i.e. invoke ++ * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke ++ * rte_dma_stop() first to stop the device and then do the reconfiguration ++ * before invoking rte_dma_start() again. The dataplane functions should not ++ * be invoked when the device is stopped. ++ * ++ * Finally, an application can close a dmadev by invoking the rte_dma_close() ++ * function. ++ * ++ * The dataplane APIs include two parts: ++ * The first part is the submission of operation requests: ++ * - rte_dma_copy() ++ * - rte_dma_copy_sg() ++ * - rte_dma_fill() ++ * - rte_dma_submit() ++ * ++ * These APIs could work with different virtual DMA channels which have ++ * different contexts. ++ * ++ * The first three APIs are used to submit the operation request to the virtual ++ * DMA channel, if the submission is successful, a positive ++ * ring_idx <= UINT16_MAX is returned, otherwise a negative number is returned. ++ * ++ * The last API is used to issue doorbell to hardware, and also there are flags ++ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the ++ * same work. ++ * @note When enqueuing a set of jobs to the device, having a separate submit ++ * outside a loop makes for clearer code than having a check for the last ++ * iteration inside the loop to set a special submit flag. However, for cases ++ * where one item alone is to be submitted or there is a small set of jobs to ++ * be submitted sequentially, having a submit flag provides a lower-overhead ++ * way of doing the submission while still keeping the code clean. ++ * ++ * The second part is to obtain the result of requests: ++ * - rte_dma_completed() ++ * - return the number of operation requests completed successfully. ++ * - rte_dma_completed_status() ++ * - return the number of operation requests completed. ++ * ++ * @note If the dmadev works in silent mode (@see RTE_DMA_CAPA_SILENT), ++ * application does not invoke the above two completed APIs. ++ * ++ * About the ring_idx which enqueue APIs (e.g. rte_dma_copy(), rte_dma_fill()) ++ * return, the rules are as follows: ++ * - ring_idx for each virtual DMA channel are independent. ++ * - For a virtual DMA channel, the ring_idx is monotonically incremented, ++ * when it reach UINT16_MAX, it wraps back to zero. ++ * - This ring_idx can be used by applications to track per-operation ++ * metadata in an application-defined circular ring. ++ * - The initial ring_idx of a virtual DMA channel is zero, after the ++ * device is stopped, the ring_idx needs to be reset to zero. ++ * ++ * One example: ++ * - step-1: start one dmadev ++ * - step-2: enqueue a copy operation, the ring_idx return is 0 ++ * - step-3: enqueue a copy operation again, the ring_idx return is 1 ++ * - ... ++ * - step-101: stop the dmadev ++ * - step-102: start the dmadev ++ * - step-103: enqueue a copy operation, the ring_idx return is 0 ++ * - ... ++ * - step-x+0: enqueue a fill operation, the ring_idx return is 65535 ++ * - step-x+1: enqueue a copy operation, the ring_idx return is 0 ++ * - ... ++ * ++ * The DMA operation address used in enqueue APIs (i.e. rte_dma_copy(), ++ * rte_dma_copy_sg(), rte_dma_fill()) is defined as rte_iova_t type. ++ * ++ * The dmadev supports two types of address: memory address and device address. ++ * ++ * - memory address: the source and destination address of the memory-to-memory ++ * transfer type, or the source address of the memory-to-device transfer type, ++ * or the destination address of the device-to-memory transfer type. ++ * @note If the device support SVA (@see RTE_DMA_CAPA_SVA), the memory address ++ * can be any VA address, otherwise it must be an IOVA address. ++ * ++ * - device address: the source and destination address of the device-to-device ++ * transfer type, or the source address of the device-to-memory transfer type, ++ * or the destination address of the memory-to-device transfer type. ++ * ++ * About MT-safe, all the functions of the dmadev API implemented by a PMD are ++ * lock-free functions which assume to not be invoked in parallel on different ++ * logical cores to work on the same target dmadev object. ++ * @note Different virtual DMA channels on the same dmadev *DO NOT* support ++ * parallel invocation because these virtual DMA channels share the same ++ * HW-DMA-channel. ++ */ ++ ++#include ++ ++#include ++#include ++#include ++#include ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++/** Maximum number of devices if rte_dma_dev_max() is not called. */ ++#define RTE_DMADEV_DEFAULT_MAX 64 ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Configure the maximum number of dmadevs. ++ * @note This function can be invoked before the primary process rte_eal_init() ++ * to change the maximum number of dmadevs. If not invoked, the maximum number ++ * of dmadevs is @see RTE_DMADEV_DEFAULT_MAX ++ * ++ * @param dev_max ++ * maximum number of dmadevs. ++ * ++ * @return ++ * 0 on success. Otherwise negative value is returned. ++ */ ++__rte_experimental ++int rte_dma_dev_max(size_t dev_max); ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Get the device identifier for the named DMA device. ++ * ++ * @param name ++ * DMA device name. ++ * ++ * @return ++ * Returns DMA device identifier on success. ++ * - <0: Failure to find named DMA device. ++ */ ++__rte_experimental ++int rte_dma_get_dev_id_by_name(const char *name); ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Check whether the dev_id is valid. ++ * ++ * @param dev_id ++ * DMA device index. ++ * ++ * @return ++ * - If the device index is valid (true) or not (false). ++ */ ++__rte_experimental ++bool rte_dma_is_valid(int16_t dev_id); ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Get the total number of DMA devices that have been successfully ++ * initialised. ++ * ++ * @return ++ * The total number of usable DMA devices. ++ */ ++__rte_experimental ++uint16_t rte_dma_count_avail(void); ++ ++/** ++ * Iterates over valid dmadev instances. ++ * ++ * @param start_dev_id ++ * The id of the next possible dmadev. ++ * @return ++ * Next valid dmadev, UINT16_MAX if there is none. ++ */ ++__rte_experimental ++int16_t rte_dma_next_dev(int16_t start_dev_id); ++ ++/** Utility macro to iterate over all available dmadevs */ ++#define RTE_DMA_FOREACH_DEV(p) \ ++ for (p = rte_dma_next_dev(0); \ ++ p != -1; \ ++ p = rte_dma_next_dev(p + 1)) ++ ++ ++/**@{@name DMA capability ++ * @see struct rte_dma_info::dev_capa ++ */ ++/** Support memory-to-memory transfer */ ++#define RTE_DMA_CAPA_MEM_TO_MEM RTE_BIT64(0) ++/** Support memory-to-device transfer. */ ++#define RTE_DMA_CAPA_MEM_TO_DEV RTE_BIT64(1) ++/** Support device-to-memory transfer. */ ++#define RTE_DMA_CAPA_DEV_TO_MEM RTE_BIT64(2) ++/** Support device-to-device transfer. */ ++#define RTE_DMA_CAPA_DEV_TO_DEV RTE_BIT64(3) ++/** Support SVA which could use VA as DMA address. ++ * If device support SVA then application could pass any VA address like memory ++ * from rte_malloc(), rte_memzone(), malloc, stack memory. ++ * If device don't support SVA, then application should pass IOVA address which ++ * from rte_malloc(), rte_memzone(). ++ */ ++#define RTE_DMA_CAPA_SVA RTE_BIT64(4) ++/** Support work in silent mode. ++ * In this mode, application don't required to invoke rte_dma_completed*() ++ * API. ++ * @see struct rte_dma_conf::silent_mode ++ */ ++#define RTE_DMA_CAPA_SILENT RTE_BIT64(5) ++/** Supports error handling ++ * ++ * With this bit set, invalid input addresses will be reported as operation failures ++ * to the user but other operations can continue. ++ * Without this bit set, invalid data is not handled by either HW or driver, so user ++ * must ensure that all memory addresses are valid and accessible by HW. ++ */ ++#define RTE_DMA_CAPA_HANDLES_ERRORS RTE_BIT64(6) ++/** Support copy operation. ++ * This capability start with index of 32, so that it could leave gap between ++ * normal capability and ops capability. ++ */ ++#define RTE_DMA_CAPA_OPS_COPY RTE_BIT64(32) ++/** Support scatter-gather list copy operation. */ ++#define RTE_DMA_CAPA_OPS_COPY_SG RTE_BIT64(33) ++/** Support fill operation. */ ++#define RTE_DMA_CAPA_OPS_FILL RTE_BIT64(34) ++/**@}*/ ++ ++/** ++ * A structure used to retrieve the information of a DMA device. ++ * ++ * @see rte_dma_info_get ++ */ ++struct rte_dma_info { ++ const char *dev_name; /**< Unique device name. */ ++ /** Device capabilities (RTE_DMA_CAPA_*). */ ++ uint64_t dev_capa; ++ /** Maximum number of virtual DMA channels supported. */ ++ uint16_t max_vchans; ++ /** Maximum allowed number of virtual DMA channel descriptors. */ ++ uint16_t max_desc; ++ /** Minimum allowed number of virtual DMA channel descriptors. */ ++ uint16_t min_desc; ++ /** Maximum number of source or destination scatter-gather entry ++ * supported. ++ * If the device does not support COPY_SG capability, this value can be ++ * zero. ++ * If the device supports COPY_SG capability, then rte_dma_copy_sg() ++ * parameter nb_src/nb_dst should not exceed this value. ++ */ ++ uint16_t max_sges; ++ /** NUMA node connection, -1 if unknown. */ ++ int16_t numa_node; ++ /** Number of virtual DMA channel configured. */ ++ uint16_t nb_vchans; ++}; ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Retrieve information of a DMA device. ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * @param[out] dev_info ++ * A pointer to a structure of type *rte_dma_info* to be filled with the ++ * information of the device. ++ * ++ * @return ++ * 0 on success. Otherwise negative value is returned. ++ */ ++__rte_experimental ++int rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info); ++ ++/** ++ * A structure used to configure a DMA device. ++ * ++ * @see rte_dma_configure ++ */ ++struct rte_dma_conf { ++ /** The number of virtual DMA channels to set up for the DMA device. ++ * This value cannot be greater than the field 'max_vchans' of struct ++ * rte_dma_info which get from rte_dma_info_get(). ++ */ ++ uint16_t nb_vchans; ++ /** Indicates whether to enable silent mode. ++ * false-default mode, true-silent mode. ++ * This value can be set to true only when the SILENT capability is ++ * supported. ++ * ++ * @see RTE_DMA_CAPA_SILENT ++ */ ++ bool enable_silent; ++}; ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Configure a DMA device. ++ * ++ * This function must be invoked first before any other function in the ++ * API. This function can also be re-invoked when a device is in the ++ * stopped state. ++ * ++ * @param dev_id ++ * The identifier of the device to configure. ++ * @param dev_conf ++ * The DMA device configuration structure encapsulated into rte_dma_conf ++ * object. ++ * ++ * @return ++ * 0 on success. Otherwise negative value is returned. ++ */ ++__rte_experimental ++int rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf); ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Start a DMA device. ++ * ++ * The device start step is the last one and consists of setting the DMA ++ * to start accepting jobs. ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * ++ * @return ++ * 0 on success. Otherwise negative value is returned. ++ */ ++__rte_experimental ++int rte_dma_start(int16_t dev_id); ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Stop a DMA device. ++ * ++ * The device can be restarted with a call to rte_dma_start(). ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * ++ * @return ++ * 0 on success. Otherwise negative value is returned. ++ */ ++__rte_experimental ++int rte_dma_stop(int16_t dev_id); ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Close a DMA device. ++ * ++ * The device cannot be restarted after this call. ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * ++ * @return ++ * 0 on success. Otherwise negative value is returned. ++ */ ++__rte_experimental ++int rte_dma_close(int16_t dev_id); ++ ++/** ++ * DMA transfer direction defines. ++ * ++ * @see struct rte_dma_vchan_conf::direction ++ */ ++enum rte_dma_direction { ++ /** DMA transfer direction - from memory to memory. ++ * ++ * @see struct rte_dma_vchan_conf::direction ++ */ ++ RTE_DMA_DIR_MEM_TO_MEM, ++ /** DMA transfer direction - from memory to device. ++ * In a typical scenario, the SoCs are installed on host servers as ++ * iNICs through the PCIe interface. In this case, the SoCs works in ++ * EP(endpoint) mode, it could initiate a DMA move request from memory ++ * (which is SoCs memory) to device (which is host memory). ++ * ++ * @see struct rte_dma_vchan_conf::direction ++ */ ++ RTE_DMA_DIR_MEM_TO_DEV, ++ /** DMA transfer direction - from device to memory. ++ * In a typical scenario, the SoCs are installed on host servers as ++ * iNICs through the PCIe interface. In this case, the SoCs works in ++ * EP(endpoint) mode, it could initiate a DMA move request from device ++ * (which is host memory) to memory (which is SoCs memory). ++ * ++ * @see struct rte_dma_vchan_conf::direction ++ */ ++ RTE_DMA_DIR_DEV_TO_MEM, ++ /** DMA transfer direction - from device to device. ++ * In a typical scenario, the SoCs are installed on host servers as ++ * iNICs through the PCIe interface. In this case, the SoCs works in ++ * EP(endpoint) mode, it could initiate a DMA move request from device ++ * (which is host memory) to the device (which is another host memory). ++ * ++ * @see struct rte_dma_vchan_conf::direction ++ */ ++ RTE_DMA_DIR_DEV_TO_DEV, ++}; ++ ++/** ++ * DMA access port type defines. ++ * ++ * @see struct rte_dma_port_param::port_type ++ */ ++enum rte_dma_port_type { ++ RTE_DMA_PORT_NONE, ++ RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */ ++}; ++ ++/** ++ * A structure used to descript DMA access port parameters. ++ * ++ * @see struct rte_dma_vchan_conf::src_port ++ * @see struct rte_dma_vchan_conf::dst_port ++ */ ++struct rte_dma_port_param { ++ /** The device access port type. ++ * ++ * @see enum rte_dma_port_type ++ */ ++ enum rte_dma_port_type port_type; ++ RTE_STD_C11 ++ union { ++ /** PCIe access port parameters. ++ * ++ * The following model shows SoC's PCIe module connects to ++ * multiple PCIe hosts and multiple endpoints. The PCIe module ++ * has an integrated DMA controller. ++ * ++ * If the DMA wants to access the memory of host A, it can be ++ * initiated by PF1 in core0, or by VF0 of PF0 in core0. ++ * ++ * \code{.unparsed} ++ * System Bus ++ * | ----------PCIe module---------- ++ * | Bus ++ * | Interface ++ * | ----- ------------------ ++ * | | | | PCIe Core0 | ++ * | | | | | ----------- ++ * | | | | PF-0 -- VF-0 | | Host A | ++ * | | |--------| |- VF-1 |--------| Root | ++ * | | | | PF-1 | | Complex | ++ * | | | | PF-2 | ----------- ++ * | | | ------------------ ++ * | | | ++ * | | | ------------------ ++ * | | | | PCIe Core1 | ++ * | | | | | ----------- ++ * | | | | PF-0 -- VF-0 | | Host B | ++ * |-----| |--------| PF-1 -- VF-0 |--------| Root | ++ * | | | | |- VF-1 | | Complex | ++ * | | | | PF-2 | ----------- ++ * | | | ------------------ ++ * | | | ++ * | | | ------------------ ++ * | |DMA| | | ------ ++ * | | | | |--------| EP | ++ * | | |--------| PCIe Core2 | ------ ++ * | | | | | ------ ++ * | | | | |--------| EP | ++ * | | | | | ------ ++ * | ----- ------------------ ++ * ++ * \endcode ++ * ++ * @note If some fields can not be supported by the ++ * hardware/driver, then the driver ignores those fields. ++ * Please check driver-specific documentation for limitations ++ * and capablites. ++ */ ++ __extension__ ++ struct { ++ uint64_t coreid : 4; /**< PCIe core id used. */ ++ uint64_t pfid : 8; /**< PF id used. */ ++ uint64_t vfen : 1; /**< VF enable bit. */ ++ uint64_t vfid : 16; /**< VF id used. */ ++ /** The pasid filed in TLP packet. */ ++ uint64_t pasid : 20; ++ /** The attributes filed in TLP packet. */ ++ uint64_t attr : 3; ++ /** The processing hint filed in TLP packet. */ ++ uint64_t ph : 2; ++ /** The steering tag filed in TLP packet. */ ++ uint64_t st : 16; ++ } pcie; ++ }; ++ uint64_t reserved[2]; /**< Reserved for future fields. */ ++}; ++ ++/** ++ * A structure used to configure a virtual DMA channel. ++ * ++ * @see rte_dma_vchan_setup ++ */ ++struct rte_dma_vchan_conf { ++ /** Transfer direction ++ * ++ * @see enum rte_dma_direction ++ */ ++ enum rte_dma_direction direction; ++ /** Number of descriptor for the virtual DMA channel */ ++ uint16_t nb_desc; ++ /** 1) Used to describes the device access port parameter in the ++ * device-to-memory transfer scenario. ++ * 2) Used to describes the source device access port parameter in the ++ * device-to-device transfer scenario. ++ * ++ * @see struct rte_dma_port_param ++ */ ++ struct rte_dma_port_param src_port; ++ /** 1) Used to describes the device access port parameter in the ++ * memory-to-device transfer scenario. ++ * 2) Used to describes the destination device access port parameter in ++ * the device-to-device transfer scenario. ++ * ++ * @see struct rte_dma_port_param ++ */ ++ struct rte_dma_port_param dst_port; ++}; ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Allocate and set up a virtual DMA channel. ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * @param vchan ++ * The identifier of virtual DMA channel. The value must be in the range ++ * [0, nb_vchans - 1] previously supplied to rte_dma_configure(). ++ * @param conf ++ * The virtual DMA channel configuration structure encapsulated into ++ * rte_dma_vchan_conf object. ++ * ++ * @return ++ * 0 on success. Otherwise negative value is returned. ++ */ ++__rte_experimental ++int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan, ++ const struct rte_dma_vchan_conf *conf); ++ ++/** ++ * A structure used to retrieve statistics. ++ * ++ * @see rte_dma_stats_get ++ */ ++struct rte_dma_stats { ++ /** Count of operations which were submitted to hardware. */ ++ uint64_t submitted; ++ /** Count of operations which were completed, including successful and ++ * failed completions. ++ */ ++ uint64_t completed; ++ /** Count of operations which failed to complete. */ ++ uint64_t errors; ++}; ++ ++/** ++ * Special ID, which is used to represent all virtual DMA channels. ++ * ++ * @see rte_dma_stats_get ++ * @see rte_dma_stats_reset ++ */ ++#define RTE_DMA_ALL_VCHAN 0xFFFFu ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Retrieve basic statistics of a or all virtual DMA channel(s). ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * @param vchan ++ * The identifier of virtual DMA channel. ++ * If equal RTE_DMA_ALL_VCHAN means all channels. ++ * @param[out] stats ++ * The basic statistics structure encapsulated into rte_dma_stats ++ * object. ++ * ++ * @return ++ * 0 on success. Otherwise negative value is returned. ++ */ ++__rte_experimental ++int rte_dma_stats_get(int16_t dev_id, uint16_t vchan, ++ struct rte_dma_stats *stats); ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Reset basic statistics of a or all virtual DMA channel(s). ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * @param vchan ++ * The identifier of virtual DMA channel. ++ * If equal RTE_DMA_ALL_VCHAN means all channels. ++ * ++ * @return ++ * 0 on success. Otherwise negative value is returned. ++ */ ++__rte_experimental ++int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan); ++ ++/** ++ * device vchannel status ++ * ++ * Enum with the options for the channel status, either idle, active or halted due to error ++ * @see rte_dma_vchan_status ++ */ ++enum rte_dma_vchan_status { ++ RTE_DMA_VCHAN_IDLE, /**< not processing, awaiting ops */ ++ RTE_DMA_VCHAN_ACTIVE, /**< currently processing jobs */ ++ RTE_DMA_VCHAN_HALTED_ERROR, /**< not processing due to error, cannot accept new ops */ ++}; ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Determine if all jobs have completed on a device channel. ++ * This function is primarily designed for testing use, as it allows a process to check if ++ * all jobs are completed, without actually gathering completions from those jobs. ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * @param vchan ++ * The identifier of virtual DMA channel. ++ * @param[out] status ++ * The vchan status ++ * @return ++ * 0 - call completed successfully ++ * < 0 - error code indicating there was a problem calling the API ++ */ ++__rte_experimental ++int ++rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status); ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Dump DMA device info. ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * @param f ++ * The file to write the output to. ++ * ++ * @return ++ * 0 on success. Otherwise negative value is returned. ++ */ ++__rte_experimental ++int rte_dma_dump(int16_t dev_id, FILE *f); ++ ++/** ++ * DMA transfer result status code defines. ++ * ++ * @see rte_dma_completed_status ++ */ ++enum rte_dma_status_code { ++ /** The operation completed successfully. */ ++ RTE_DMA_STATUS_SUCCESSFUL, ++ /** The operation failed to complete due abort by user. ++ * This is mainly used when processing dev_stop, user could modidy the ++ * descriptors (e.g. change one bit to tell hardware abort this job), ++ * it allows outstanding requests to be complete as much as possible, ++ * so reduce the time to stop the device. ++ */ ++ RTE_DMA_STATUS_USER_ABORT, ++ /** The operation failed to complete due to following scenarios: ++ * The jobs in a particular batch are not attempted because they ++ * appeared after a fence where a previous job failed. In some HW ++ * implementation it's possible for jobs from later batches would be ++ * completed, though, so report the status from the not attempted jobs ++ * before reporting those newer completed jobs. ++ */ ++ RTE_DMA_STATUS_NOT_ATTEMPTED, ++ /** The operation failed to complete due invalid source address. */ ++ RTE_DMA_STATUS_INVALID_SRC_ADDR, ++ /** The operation failed to complete due invalid destination address. */ ++ RTE_DMA_STATUS_INVALID_DST_ADDR, ++ /** The operation failed to complete due invalid source or destination ++ * address, cover the case that only knows the address error, but not ++ * sure which address error. ++ */ ++ RTE_DMA_STATUS_INVALID_ADDR, ++ /** The operation failed to complete due invalid length. */ ++ RTE_DMA_STATUS_INVALID_LENGTH, ++ /** The operation failed to complete due invalid opcode. ++ * The DMA descriptor could have multiple format, which are ++ * distinguished by the opcode field. ++ */ ++ RTE_DMA_STATUS_INVALID_OPCODE, ++ /** The operation failed to complete due bus read error. */ ++ RTE_DMA_STATUS_BUS_READ_ERROR, ++ /** The operation failed to complete due bus write error. */ ++ RTE_DMA_STATUS_BUS_WRITE_ERROR, ++ /** The operation failed to complete due bus error, cover the case that ++ * only knows the bus error, but not sure which direction error. ++ */ ++ RTE_DMA_STATUS_BUS_ERROR, ++ /** The operation failed to complete due data poison. */ ++ RTE_DMA_STATUS_DATA_POISION, ++ /** The operation failed to complete due descriptor read error. */ ++ RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR, ++ /** The operation failed to complete due device link error. ++ * Used to indicates that the link error in the memory-to-device/ ++ * device-to-memory/device-to-device transfer scenario. ++ */ ++ RTE_DMA_STATUS_DEV_LINK_ERROR, ++ /** The operation failed to complete due lookup page fault. */ ++ RTE_DMA_STATUS_PAGE_FAULT, ++ /** The operation failed to complete due unknown reason. ++ * The initial value is 256, which reserves space for future errors. ++ */ ++ RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100, ++}; ++ ++/** ++ * A structure used to hold scatter-gather DMA operation request entry. ++ * ++ * @see rte_dma_copy_sg ++ */ ++struct rte_dma_sge { ++ rte_iova_t addr; /**< The DMA operation address. */ ++ uint32_t length; /**< The DMA operation length. */ ++}; ++ ++#include "rte_dmadev_core.h" ++ ++/**@{@name DMA operation flag ++ * @see rte_dma_copy() ++ * @see rte_dma_copy_sg() ++ * @see rte_dma_fill() ++ */ ++/** Fence flag. ++ * It means the operation with this flag must be processed only after all ++ * previous operations are completed. ++ * If the specify DMA HW works in-order (it means it has default fence between ++ * operations), this flag could be NOP. ++ */ ++#define RTE_DMA_OP_FLAG_FENCE RTE_BIT64(0) ++/** Submit flag. ++ * It means the operation with this flag must issue doorbell to hardware after ++ * enqueued jobs. ++ */ ++#define RTE_DMA_OP_FLAG_SUBMIT RTE_BIT64(1) ++/** Write data to low level cache hint. ++ * Used for performance optimization, this is just a hint, and there is no ++ * capability bit for this, driver should not return error if this flag was set. ++ */ ++#define RTE_DMA_OP_FLAG_LLC RTE_BIT64(2) ++/**@}*/ ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Enqueue a copy operation onto the virtual DMA channel. ++ * ++ * This queues up a copy operation to be performed by hardware, if the 'flags' ++ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin ++ * this operation, otherwise do not trigger doorbell. ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * @param vchan ++ * The identifier of virtual DMA channel. ++ * @param src ++ * The address of the source buffer. ++ * @param dst ++ * The address of the destination buffer. ++ * @param length ++ * The length of the data to be copied. ++ * @param flags ++ * An flags for this operation. ++ * @see RTE_DMA_OP_FLAG_* ++ * ++ * @return ++ * - 0..UINT16_MAX: index of enqueued job. ++ * - -ENOSPC: if no space left to enqueue. ++ * - other values < 0 on failure. ++ */ ++__rte_experimental ++static inline int ++rte_dma_copy(int16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst, ++ uint32_t length, uint64_t flags) ++{ ++ struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; ++ ++#ifdef RTE_DMADEV_DEBUG ++ if (!rte_dma_is_valid(dev_id) || length == 0) ++ return -EINVAL; ++ RTE_FUNC_PTR_OR_ERR_RET(*obj->copy, -ENOTSUP); ++#endif ++ ++ return (*obj->copy)(obj->dev_private, vchan, src, dst, length, flags); ++} ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel. ++ * ++ * This queues up a scatter-gather list copy operation to be performed by ++ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then ++ * trigger doorbell to begin this operation, otherwise do not trigger doorbell. ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * @param vchan ++ * The identifier of virtual DMA channel. ++ * @param src ++ * The pointer of source scatter-gather entry array. ++ * @param dst ++ * The pointer of destination scatter-gather entry array. ++ * @param nb_src ++ * The number of source scatter-gather entry. ++ * @see struct rte_dma_info::max_sges ++ * @param nb_dst ++ * The number of destination scatter-gather entry. ++ * @see struct rte_dma_info::max_sges ++ * @param flags ++ * An flags for this operation. ++ * @see RTE_DMA_OP_FLAG_* ++ * ++ * @return ++ * - 0..UINT16_MAX: index of enqueued job. ++ * - -ENOSPC: if no space left to enqueue. ++ * - other values < 0 on failure. ++ */ ++__rte_experimental ++static inline int ++rte_dma_copy_sg(int16_t dev_id, uint16_t vchan, struct rte_dma_sge *src, ++ struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst, ++ uint64_t flags) ++{ ++ struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; ++ ++#ifdef RTE_DMADEV_DEBUG ++ if (!rte_dma_is_valid(dev_id) || src == NULL || dst == NULL || ++ nb_src == 0 || nb_dst == 0) ++ return -EINVAL; ++ RTE_FUNC_PTR_OR_ERR_RET(*obj->copy_sg, -ENOTSUP); ++#endif ++ ++ return (*obj->copy_sg)(obj->dev_private, vchan, src, dst, nb_src, ++ nb_dst, flags); ++} ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Enqueue a fill operation onto the virtual DMA channel. ++ * ++ * This queues up a fill operation to be performed by hardware, if the 'flags' ++ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin ++ * this operation, otherwise do not trigger doorbell. ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * @param vchan ++ * The identifier of virtual DMA channel. ++ * @param pattern ++ * The pattern to populate the destination buffer with. ++ * @param dst ++ * The address of the destination buffer. ++ * @param length ++ * The length of the destination buffer. ++ * @param flags ++ * An flags for this operation. ++ * @see RTE_DMA_OP_FLAG_* ++ * ++ * @return ++ * - 0..UINT16_MAX: index of enqueued job. ++ * - -ENOSPC: if no space left to enqueue. ++ * - other values < 0 on failure. ++ */ ++__rte_experimental ++static inline int ++rte_dma_fill(int16_t dev_id, uint16_t vchan, uint64_t pattern, ++ rte_iova_t dst, uint32_t length, uint64_t flags) ++{ ++ struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; ++ ++#ifdef RTE_DMADEV_DEBUG ++ if (!rte_dma_is_valid(dev_id) || length == 0) ++ return -EINVAL; ++ RTE_FUNC_PTR_OR_ERR_RET(*obj->fill, -ENOTSUP); ++#endif ++ ++ return (*obj->fill)(obj->dev_private, vchan, pattern, dst, length, ++ flags); ++} ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Trigger hardware to begin performing enqueued operations. ++ * ++ * This API is used to write the "doorbell" to the hardware to trigger it ++ * to begin the operations previously enqueued by rte_dma_copy/fill(). ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * @param vchan ++ * The identifier of virtual DMA channel. ++ * ++ * @return ++ * 0 on success. Otherwise negative value is returned. ++ */ ++__rte_experimental ++static inline int ++rte_dma_submit(int16_t dev_id, uint16_t vchan) ++{ ++ struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; ++ ++#ifdef RTE_DMADEV_DEBUG ++ if (!rte_dma_is_valid(dev_id)) ++ return -EINVAL; ++ RTE_FUNC_PTR_OR_ERR_RET(*obj->submit, -ENOTSUP); ++#endif ++ ++ return (*obj->submit)(obj->dev_private, vchan); ++} ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Return the number of operations that have been successfully completed. ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * @param vchan ++ * The identifier of virtual DMA channel. ++ * @param nb_cpls ++ * The maximum number of completed operations that can be processed. ++ * @param[out] last_idx ++ * The last completed operation's ring_idx. ++ * If not required, NULL can be passed in. ++ * @param[out] has_error ++ * Indicates if there are transfer error. ++ * If not required, NULL can be passed in. ++ * ++ * @return ++ * The number of operations that successfully completed. This return value ++ * must be less than or equal to the value of nb_cpls. ++ */ ++__rte_experimental ++static inline uint16_t ++rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls, ++ uint16_t *last_idx, bool *has_error) ++{ ++ struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; ++ uint16_t idx; ++ bool err; ++ ++#ifdef RTE_DMADEV_DEBUG ++ if (!rte_dma_is_valid(dev_id) || nb_cpls == 0) ++ return 0; ++ RTE_FUNC_PTR_OR_ERR_RET(*obj->completed, 0); ++#endif ++ ++ /* Ensure the pointer values are non-null to simplify drivers. ++ * In most cases these should be compile time evaluated, since this is ++ * an inline function. ++ * - If NULL is explicitly passed as parameter, then compiler knows the ++ * value is NULL ++ * - If address of local variable is passed as parameter, then compiler ++ * can know it's non-NULL. ++ */ ++ if (last_idx == NULL) ++ last_idx = &idx; ++ if (has_error == NULL) ++ has_error = &err; ++ ++ *has_error = false; ++ return (*obj->completed)(obj->dev_private, vchan, nb_cpls, last_idx, ++ has_error); ++} ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Return the number of operations that have been completed, and the operations ++ * result may succeed or fail. ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * @param vchan ++ * The identifier of virtual DMA channel. ++ * @param nb_cpls ++ * Indicates the size of status array. ++ * @param[out] last_idx ++ * The last completed operation's ring_idx. ++ * If not required, NULL can be passed in. ++ * @param[out] status ++ * This is a pointer to an array of length 'nb_cpls' that holds the completion ++ * status code of each operation. ++ * @see enum rte_dma_status_code ++ * ++ * @return ++ * The number of operations that completed. This return value must be less ++ * than or equal to the value of nb_cpls. ++ * If this number is greater than zero (assuming n), then n values in the ++ * status array are also set. ++ */ ++__rte_experimental ++static inline uint16_t ++rte_dma_completed_status(int16_t dev_id, uint16_t vchan, ++ const uint16_t nb_cpls, uint16_t *last_idx, ++ enum rte_dma_status_code *status) ++{ ++ struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; ++ uint16_t idx; ++ ++#ifdef RTE_DMADEV_DEBUG ++ if (!rte_dma_is_valid(dev_id) || nb_cpls == 0 || status == NULL) ++ return 0; ++ RTE_FUNC_PTR_OR_ERR_RET(*obj->completed_status, 0); ++#endif ++ ++ if (last_idx == NULL) ++ last_idx = &idx; ++ ++ return (*obj->completed_status)(obj->dev_private, vchan, nb_cpls, ++ last_idx, status); ++} ++ ++/** ++ * @warning ++ * @b EXPERIMENTAL: this API may change without prior notice. ++ * ++ * Check remaining capacity in descriptor ring for the current burst. ++ * ++ * @param dev_id ++ * The identifier of the device. ++ * @param vchan ++ * The identifier of virtual DMA channel. ++ * ++ * @return ++ * - Remaining space in the descriptor ring for the current burst. ++ * - 0 on error ++ */ ++__rte_experimental ++static inline uint16_t ++rte_dma_burst_capacity(int16_t dev_id, uint16_t vchan) ++{ ++ struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; ++ ++#ifdef RTE_DMADEV_DEBUG ++ if (!rte_dma_is_valid(dev_id)) ++ return 0; ++ RTE_FUNC_PTR_OR_ERR_RET(*obj->burst_capacity, 0); ++#endif ++ return (*obj->burst_capacity)(obj->dev_private, vchan); ++} ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif /* RTE_DMADEV_H */ +diff --git a/lib/librte_dmadev/rte_dmadev_core.h b/lib/librte_dmadev/rte_dmadev_core.h +new file mode 100644 +index 000000000..e42d8739a +--- /dev/null ++++ b/lib/librte_dmadev/rte_dmadev_core.h +@@ -0,0 +1,80 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(c) 2021 HiSilicon Limited ++ * Copyright(c) 2021 Intel Corporation ++ */ ++ ++#ifndef RTE_DMADEV_CORE_H ++#define RTE_DMADEV_CORE_H ++ ++/** ++ * @file ++ * ++ * DMA Device internal header. ++ * ++ * This header contains internal data types which are used by dataplane inline ++ * function. ++ * ++ * Applications should not use these functions directly. ++ */ ++ ++/** @internal Used to enqueue a copy operation. */ ++typedef int (*rte_dma_copy_t)(void *dev_private, uint16_t vchan, ++ rte_iova_t src, rte_iova_t dst, ++ uint32_t length, uint64_t flags); ++ ++/** @internal Used to enqueue a scatter-gather list copy operation. */ ++typedef int (*rte_dma_copy_sg_t)(void *dev_private, uint16_t vchan, ++ const struct rte_dma_sge *src, ++ const struct rte_dma_sge *dst, ++ uint16_t nb_src, uint16_t nb_dst, ++ uint64_t flags); ++ ++/** @internal Used to enqueue a fill operation. */ ++typedef int (*rte_dma_fill_t)(void *dev_private, uint16_t vchan, ++ uint64_t pattern, rte_iova_t dst, ++ uint32_t length, uint64_t flags); ++ ++/** @internal Used to trigger hardware to begin working. */ ++typedef int (*rte_dma_submit_t)(void *dev_private, uint16_t vchan); ++ ++/** @internal Used to return number of successful completed operations. */ ++typedef uint16_t (*rte_dma_completed_t)(void *dev_private, ++ uint16_t vchan, const uint16_t nb_cpls, ++ uint16_t *last_idx, bool *has_error); ++ ++/** @internal Used to return number of completed operations. */ ++typedef uint16_t (*rte_dma_completed_status_t)(void *dev_private, ++ uint16_t vchan, const uint16_t nb_cpls, ++ uint16_t *last_idx, enum rte_dma_status_code *status); ++ ++/** @internal Used to check the remaining space in descriptor ring. */ ++typedef uint16_t (*rte_dma_burst_capacity_t)(const void *dev_private, uint16_t vchan); ++ ++/** ++ * @internal ++ * Fast-path dmadev functions and related data are hold in a flat array. ++ * One entry per dmadev. ++ * ++ * This structure occupy exactly 128B which reserve space for future IO ++ * functions. ++ * ++ * The 'dev_private' field was placed in the first cache line to optimize ++ * performance because the PMD driver mainly depends on this field. ++ */ ++struct rte_dma_fp_object { ++ /** PMD-specific private data. The driver should copy ++ * rte_dma_dev.data->dev_private to this field during initialization. ++ */ ++ void *dev_private; ++ rte_dma_copy_t copy; ++ rte_dma_copy_sg_t copy_sg; ++ rte_dma_fill_t fill; ++ rte_dma_submit_t submit; ++ rte_dma_completed_t completed; ++ rte_dma_completed_status_t completed_status; ++ rte_dma_burst_capacity_t burst_capacity; ++} __rte_aligned(128); ++ ++extern struct rte_dma_fp_object *rte_dma_fp_objs; ++ ++#endif /* RTE_DMADEV_CORE_H */ +diff --git a/lib/librte_dmadev/rte_dmadev_pmd.h b/lib/librte_dmadev/rte_dmadev_pmd.h +new file mode 100644 +index 000000000..5316ad5b5 +--- /dev/null ++++ b/lib/librte_dmadev/rte_dmadev_pmd.h +@@ -0,0 +1,171 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(c) 2021 HiSilicon Limited ++ */ ++ ++#ifndef RTE_DMADEV_PMD_H ++#define RTE_DMADEV_PMD_H ++ ++/** ++ * @file ++ * ++ * DMA Device PMD interface ++ * ++ * Driver facing interface for a DMA device. These are not to be called directly ++ * by any application. ++ */ ++ ++#include "rte_dmadev.h" ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++struct rte_dma_dev; ++ ++/** @internal Used to get device information of a device. */ ++typedef int (*rte_dma_info_get_t)(const struct rte_dma_dev *dev, ++ struct rte_dma_info *dev_info, ++ uint32_t info_sz); ++ ++/** @internal Used to configure a device. */ ++typedef int (*rte_dma_configure_t)(struct rte_dma_dev *dev, ++ const struct rte_dma_conf *dev_conf, ++ uint32_t conf_sz); ++ ++/** @internal Used to start a configured device. */ ++typedef int (*rte_dma_start_t)(struct rte_dma_dev *dev); ++ ++/** @internal Used to stop a configured device. */ ++typedef int (*rte_dma_stop_t)(struct rte_dma_dev *dev); ++ ++/** @internal Used to close a configured device. */ ++typedef int (*rte_dma_close_t)(struct rte_dma_dev *dev); ++ ++/** @internal Used to allocate and set up a virtual DMA channel. */ ++typedef int (*rte_dma_vchan_setup_t)(struct rte_dma_dev *dev, uint16_t vchan, ++ const struct rte_dma_vchan_conf *conf, ++ uint32_t conf_sz); ++ ++/** @internal Used to retrieve basic statistics. */ ++typedef int (*rte_dma_stats_get_t)(const struct rte_dma_dev *dev, ++ uint16_t vchan, struct rte_dma_stats *stats, ++ uint32_t stats_sz); ++ ++/** @internal Used to reset basic statistics. */ ++typedef int (*rte_dma_stats_reset_t)(struct rte_dma_dev *dev, uint16_t vchan); ++ ++/** @internal Used to check if a virtual channel has finished all jobs. */ ++typedef int (*rte_dma_vchan_status_t)(const struct rte_dma_dev *dev, uint16_t vchan, ++ enum rte_dma_vchan_status *status); ++ ++/** @internal Used to dump internal information. */ ++typedef int (*rte_dma_dump_t)(const struct rte_dma_dev *dev, FILE *f); ++ ++/** ++ * DMA device operations function pointer table. ++ * ++ * @see struct rte_dma_dev:dev_ops ++ */ ++struct rte_dma_dev_ops { ++ rte_dma_info_get_t dev_info_get; ++ rte_dma_configure_t dev_configure; ++ rte_dma_start_t dev_start; ++ rte_dma_stop_t dev_stop; ++ rte_dma_close_t dev_close; ++ ++ rte_dma_vchan_setup_t vchan_setup; ++ ++ rte_dma_stats_get_t stats_get; ++ rte_dma_stats_reset_t stats_reset; ++ ++ rte_dma_vchan_status_t vchan_status; ++ rte_dma_dump_t dev_dump; ++}; ++ ++/** ++ * @internal ++ * The data part, with no function pointers, associated with each DMA device. ++ * ++ * This structure is safe to place in shared memory to be common among different ++ * processes in a multi-process configuration. ++ * ++ * @see struct rte_dma_dev::data ++ */ ++struct rte_dma_dev_data { ++ char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */ ++ int16_t dev_id; /**< Device [external] identifier. */ ++ int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */ ++ void *dev_private; /**< PMD-specific private data. */ ++ struct rte_dma_conf dev_conf; /**< DMA device configuration. */ ++ __extension__ ++ uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */ ++ uint64_t reserved[2]; /**< Reserved for future fields */ ++} __rte_cache_aligned; ++ ++/** ++ * Possible states of a DMA device. ++ * ++ * @see struct rte_dma_dev::state ++ */ ++enum rte_dma_dev_state { ++ RTE_DMA_DEV_UNUSED = 0, /**< Device is unused. */ ++ /** Device is registered, but not ready to be used. */ ++ RTE_DMA_DEV_REGISTERED, ++ /** Device is ready for use. This is set by the PMD. */ ++ RTE_DMA_DEV_READY, ++}; ++ ++/** ++ * @internal ++ * The generic data structure associated with each DMA device. ++ */ ++struct rte_dma_dev { ++ /** Device info which supplied during device initialization. */ ++ struct rte_device *device; ++ struct rte_dma_dev_data *data; /**< Pointer to shared device data. */ ++ /**< Fast-path functions and related data. */ ++ struct rte_dma_fp_object *fp_obj; ++ /** Functions implemented by PMD. */ ++ const struct rte_dma_dev_ops *dev_ops; ++ enum rte_dma_dev_state state; /**< Flag indicating the device state. */ ++ uint64_t reserved[2]; /**< Reserved for future fields. */ ++} __rte_cache_aligned; ++ ++/** ++ * @internal ++ * Allocate a new dmadev slot for an DMA device and return the pointer to that ++ * slot for the driver to use. ++ * ++ * @param name ++ * DMA device name. ++ * @param numa_node ++ * Driver's private data's NUMA node. ++ * @param private_data_size ++ * Driver's private data size. ++ * ++ * @return ++ * A pointer to the DMA device slot case of success, ++ * NULL otherwise. ++ */ ++__rte_internal ++struct rte_dma_dev *rte_dma_pmd_allocate(const char *name, int numa_node, ++ size_t private_data_size); ++ ++/** ++ * @internal ++ * Release the specified dmadev. ++ * ++ * @param name ++ * DMA device name. ++ * ++ * @return ++ * - 0 on success, negative on error. ++ */ ++__rte_internal ++int rte_dma_pmd_release(const char *name); ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif /* RTE_DMADEV_PMD_H */ +diff --git a/lib/librte_dmadev/version.map b/lib/librte_dmadev/version.map +new file mode 100644 +index 000000000..7031d6b33 +--- /dev/null ++++ b/lib/librte_dmadev/version.map +@@ -0,0 +1,31 @@ ++EXPERIMENTAL { ++ global: ++ ++ rte_dma_close; ++ rte_dma_configure; ++ rte_dma_count_avail; ++ rte_dma_dev_max; ++ rte_dma_dump; ++ rte_dma_get_dev_id_by_name; ++ rte_dma_info_get; ++ rte_dma_is_valid; ++ rte_dma_next_dev; ++ rte_dma_start; ++ rte_dma_stats_get; ++ rte_dma_stats_reset; ++ rte_dma_stop; ++ rte_dma_vchan_setup; ++ rte_dma_vchan_status; ++ ++ local: *; ++}; ++ ++INTERNAL { ++ global: ++ ++ rte_dma_fp_objs; ++ rte_dma_pmd_allocate; ++ rte_dma_pmd_release; ++ ++ local: *; ++}; +diff --git a/lib/meson.build b/lib/meson.build +index ed00f8914..86106240d 100644 +--- a/lib/meson.build ++++ b/lib/meson.build +@@ -25,7 +25,7 @@ libraries = [ + 'gro', 'gso', 'ip_frag', 'jobstats', + 'kni', 'latencystats', 'lpm', 'member', + 'power', 'pdump', 'rawdev', 'regexdev', +- 'rib', 'reorder', 'sched', 'security', 'stack', 'vhost', ++ 'rib', 'reorder', 'sched', 'security', 'stack', 'vhost', 'dmadev', + # ipsec lib depends on net, crypto and security + 'ipsec', + #fib lib depends on rib +-- +2.23.0 + diff --git a/0236-net-hns3-rename-multicast-address-function.patch b/0236-net-hns3-rename-multicast-address-function.patch new file mode 100644 index 0000000000000000000000000000000000000000..cf3067b9d5cbcbbb406b2594abf058b6bd49e919 --- /dev/null +++ b/0236-net-hns3-rename-multicast-address-function.patch @@ -0,0 +1,75 @@ +From d7faa4a4fee44a2fdaa003e3ab8df477d710f76c Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Fri, 22 Oct 2021 17:19:52 +0800 +Subject: [PATCH 03/33] net/hns3: rename multicast address function + +This patch renames hns3_add_mc_addr() to hns3_add_mc_mac_addr(). + +Signed-off-by: Huisong Li +--- + drivers/net/hns3/hns3_ethdev.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index 2ae4cb9b7..b67386b1f 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -95,8 +95,8 @@ static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, + static int hns3_update_link_info(struct rte_eth_dev *eth_dev); + static bool hns3_update_link_status(struct hns3_hw *hw); + +-static int hns3_add_mc_addr(struct hns3_hw *hw, +- struct rte_ether_addr *mac_addr); ++static int hns3_add_mc_mac_addr(struct hns3_hw *hw, ++ struct rte_ether_addr *mac_addr); + static int hns3_remove_mc_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr); + static int hns3_restore_fec(struct hns3_hw *hw); +@@ -1630,7 +1630,7 @@ hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + } + } + +- ret = hns3_add_mc_addr(hw, mac_addr); ++ ret = hns3_add_mc_mac_addr(hw, mac_addr); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -1826,7 +1826,7 @@ hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) + continue; + if (rte_is_multicast_ether_addr(addr)) + ret = del ? hns3_remove_mc_addr(hw, addr) : +- hns3_add_mc_addr(hw, addr); ++ hns3_add_mc_mac_addr(hw, addr); + else + ret = del ? hns3_remove_uc_addr_common(hw, addr) : + hns3_add_uc_addr_common(hw, addr); +@@ -1872,7 +1872,7 @@ hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr) + } + + static int +-hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) ++hns3_add_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + { + struct hns3_cmd_desc desc[HNS3_MC_MAC_VLAN_OPS_DESC_NUM]; + struct hns3_mac_vlan_tbl_entry_cmd req; +@@ -2156,7 +2156,7 @@ hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, + /* Add mc mac addresses */ + for (i = 0; i < add_addr_num; i++) { + addr = &add_addr_list[i]; +- ret = hns3_add_mc_addr(hw, addr); ++ ret = hns3_add_mc_mac_addr(hw, addr); + if (ret) { + rte_spinlock_unlock(&hw->lock); + return ret; +@@ -2188,7 +2188,7 @@ hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) + if (del) + ret = hns3_remove_mc_addr(hw, addr); + else +- ret = hns3_add_mc_addr(hw, addr); ++ ret = hns3_add_mc_mac_addr(hw, addr); + if (ret) { + err = ret; + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +-- +2.33.0 + diff --git a/0237-net-hns3-rename-unicast-address-function.patch b/0237-net-hns3-rename-unicast-address-function.patch new file mode 100644 index 0000000000000000000000000000000000000000..14441afab8e0809ef4ed1520b82b8aaa34d1f934 --- /dev/null +++ b/0237-net-hns3-rename-unicast-address-function.patch @@ -0,0 +1,65 @@ +From 5182a373038fc21368ffb61450e5e63d63471d4f Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Fri, 22 Oct 2021 17:19:53 +0800 +Subject: [PATCH 04/33] net/hns3: rename unicast address function + +This patch renames hns3_add_uc_addr() to hns3_add_uc_mac_addr(). + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index b67386b1f..83472a83b 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -1540,7 +1540,7 @@ hns3_remove_mac_vlan_tbl(struct hns3_hw *hw, + } + + static int +-hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) ++hns3_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + { + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_mac_vlan_tbl_entry_cmd req; +@@ -1678,7 +1678,7 @@ hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + if (rte_is_multicast_ether_addr(mac_addr)) + ret = hns3_add_mc_addr_common(hw, mac_addr); + else +- ret = hns3_add_uc_addr_common(hw, mac_addr); ++ ret = hns3_add_uc_mac_addr(hw, mac_addr); + + if (ret) { + rte_spinlock_unlock(&hw->lock); +@@ -1768,7 +1768,7 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, + return ret; + } + +- ret = hns3_add_uc_addr_common(hw, mac_addr); ++ ret = hns3_add_uc_mac_addr(hw, mac_addr); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -1799,7 +1799,7 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, + } + + err_add_uc_addr: +- ret_val = hns3_add_uc_addr_common(hw, oaddr); ++ ret_val = hns3_add_uc_mac_addr(hw, oaddr); + if (ret_val) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr); + hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d", +@@ -1829,7 +1829,7 @@ hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) + hns3_add_mc_mac_addr(hw, addr); + else + ret = del ? hns3_remove_uc_addr_common(hw, addr) : +- hns3_add_uc_addr_common(hw, addr); ++ hns3_add_uc_mac_addr(hw, addr); + + if (ret) { + err = ret; +-- +2.33.0 + diff --git a/0238-net-hns3-rename-multicast-address-removal-function.patch b/0238-net-hns3-rename-multicast-address-removal-function.patch new file mode 100644 index 0000000000000000000000000000000000000000..15f12796ab4dedd1db68a47e76a8375b7bf66498 --- /dev/null +++ b/0238-net-hns3-rename-multicast-address-removal-function.patch @@ -0,0 +1,76 @@ +From 11e3b4820eba69f93a623565609fa3e48de6dbdb Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Fri, 22 Oct 2021 17:19:54 +0800 +Subject: [PATCH 05/33] net/hns3: rename multicast address removal function + +This patch renames hns3_remove_mc_addr() to hns3_remove_mc_mac_addr(). + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index 83472a83b..e0ec99811 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -97,8 +97,8 @@ static bool hns3_update_link_status(struct hns3_hw *hw); + + static int hns3_add_mc_mac_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr); +-static int hns3_remove_mc_addr(struct hns3_hw *hw, +- struct rte_ether_addr *mac_addr); ++static int hns3_remove_mc_mac_addr(struct hns3_hw *hw, ++ struct rte_ether_addr *mac_addr); + static int hns3_restore_fec(struct hns3_hw *hw); + static int hns3_query_dev_fec_info(struct hns3_hw *hw); + static int hns3_do_stop(struct hns3_adapter *hns); +@@ -1646,7 +1646,7 @@ hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + +- ret = hns3_remove_mc_addr(hw, mac_addr); ++ ret = hns3_remove_mc_mac_addr(hw, mac_addr); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -1825,7 +1825,7 @@ hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) + if (rte_is_zero_ether_addr(addr)) + continue; + if (rte_is_multicast_ether_addr(addr)) +- ret = del ? hns3_remove_mc_addr(hw, addr) : ++ ret = del ? hns3_remove_mc_mac_addr(hw, addr) : + hns3_add_mc_mac_addr(hw, addr); + else + ret = del ? hns3_remove_uc_addr_common(hw, addr) : +@@ -1921,7 +1921,7 @@ hns3_add_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + } + + static int +-hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) ++hns3_remove_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + { + struct hns3_mac_vlan_tbl_entry_cmd req; + struct hns3_cmd_desc desc[3]; +@@ -2145,7 +2145,7 @@ hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, + for (i = 0; i < rm_addr_num; i++) { + num = rm_addr_num - i - 1; + addr = &rm_addr_list[num]; +- ret = hns3_remove_mc_addr(hw, addr); ++ ret = hns3_remove_mc_mac_addr(hw, addr); + if (ret) { + rte_spinlock_unlock(&hw->lock); + return ret; +@@ -2186,7 +2186,7 @@ hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) + if (!rte_is_multicast_ether_addr(addr)) + continue; + if (del) +- ret = hns3_remove_mc_addr(hw, addr); ++ ret = hns3_remove_mc_mac_addr(hw, addr); + else + ret = hns3_add_mc_mac_addr(hw, addr); + if (ret) { +-- +2.33.0 + diff --git a/0239-net-hns3-extract-common-interface-to-check-duplicate.patch b/0239-net-hns3-extract-common-interface-to-check-duplicate.patch new file mode 100644 index 0000000000000000000000000000000000000000..e8d00ea8c57d531cf37a5170b677dd70f2dfdc9c --- /dev/null +++ b/0239-net-hns3-extract-common-interface-to-check-duplicate.patch @@ -0,0 +1,114 @@ +From e7ad3ead98f61e7f759293ad05dfe48627c72e2c Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Fri, 22 Oct 2021 17:19:55 +0800 +Subject: [PATCH 06/33] net/hns3: extract common interface to check duplicates + +Extract a common interface for PF and VF to check whether the configured +multicast MAC address from rte_eth_dev_mac_addr_add() is the same as the +multicast MAC address from rte_eth_dev_set_mc_addr_list(). + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 25 ++++++++++++++++++------- + drivers/net/hns3/hns3_ethdev.h | 4 ++++ + drivers/net/hns3/hns3_ethdev_vf.c | 16 ++-------------- + 3 files changed, 24 insertions(+), 21 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index e0ec99811..f1346ee9f 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -1609,27 +1609,38 @@ hns3_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + return ret; + } + +-static int +-hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) ++bool ++hns3_find_duplicate_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mc_addr) + { + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct rte_ether_addr *addr; +- int ret; + int i; + + for (i = 0; i < hw->mc_addrs_num; i++) { + addr = &hw->mc_addrs[i]; +- /* Check if there are duplicate addresses */ +- if (rte_is_same_ether_addr(addr, mac_addr)) { ++ /* Check if there are duplicate addresses in mc_addrs[] */ ++ if (rte_is_same_ether_addr(addr, mc_addr)) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- addr); ++ addr); + hns3_err(hw, "failed to add mc mac addr, same addrs" + "(%s) is added by the set_mc_mac_addr_list " + "API", mac_str); +- return -EINVAL; ++ return true; + } + } + ++ return false; ++} ++ ++static int ++hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) ++{ ++ char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ int ret; ++ ++ if (hns3_find_duplicate_mc_addr(hw, mac_addr)) ++ return -EINVAL; ++ + ret = hns3_add_mc_mac_addr(hw, mac_addr); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h +index 84f5a9f29..a97406198 100644 +--- a/drivers/net/hns3/hns3_ethdev.h ++++ b/drivers/net/hns3/hns3_ethdev.h +@@ -1049,6 +1049,10 @@ void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, + uint32_t link_speed, uint8_t link_duplex); + void hns3_parse_devargs(struct rte_eth_dev *dev); + void hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported); ++ ++bool hns3_find_duplicate_mc_addr(struct hns3_hw *hw, ++ struct rte_ether_addr *mc_addr); ++ + int hns3_restore_ptp(struct hns3_adapter *hns); + int hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev, + struct rte_eth_conf *conf); +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index 29313c2f7..f60849606 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -208,22 +208,10 @@ static int + hns3vf_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + { + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- struct rte_ether_addr *addr; + int ret; +- int i; + +- for (i = 0; i < hw->mc_addrs_num; i++) { +- addr = &hw->mc_addrs[i]; +- /* Check if there are duplicate addresses */ +- if (rte_is_same_ether_addr(addr, mac_addr)) { +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- addr); +- hns3_err(hw, "failed to add mc mac addr, same addrs" +- "(%s) is added by the set_mc_mac_addr_list " +- "API", mac_str); +- return -EINVAL; +- } +- } ++ if (hns3_find_duplicate_mc_addr(hw, mac_addr)) ++ return -EINVAL; + + ret = hns3vf_add_mc_mac_addr(hw, mac_addr); + if (ret) { +-- +2.33.0 + diff --git a/0240-net-hns3-remove-redundant-multicast-MAC-interface.patch b/0240-net-hns3-remove-redundant-multicast-MAC-interface.patch new file mode 100644 index 0000000000000000000000000000000000000000..f23b4e174144fe2a6d30334da385114628b7c665 --- /dev/null +++ b/0240-net-hns3-remove-redundant-multicast-MAC-interface.patch @@ -0,0 +1,117 @@ +From b84b0e518dc64f3aada5b30511db1c6f6fdb0694 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Fri, 22 Oct 2021 17:19:56 +0800 +Subject: [PATCH 07/33] net/hns3: remove redundant multicast MAC interface + +This patch removes hns3_add_mc_addr_common() in PF and +hns3vf_add_mc_addr_common() in VF. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 31 ++++++++----------------------- + drivers/net/hns3/hns3_ethdev_vf.c | 30 ++++++++---------------------- + 2 files changed, 16 insertions(+), 45 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index f1346ee9f..7f4419c54 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -1632,25 +1632,6 @@ hns3_find_duplicate_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mc_addr) + return false; + } + +-static int +-hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +-{ +- char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- int ret; +- +- if (hns3_find_duplicate_mc_addr(hw, mac_addr)) +- return -EINVAL; +- +- ret = hns3_add_mc_mac_addr(hw, mac_addr); +- if (ret) { +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- mac_addr); +- hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", +- mac_str, ret); +- } +- return ret; +-} +- + static int + hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + { +@@ -1686,11 +1667,15 @@ hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + * using the rte_eth_dev_mac_addr_add API function to set MC mac address + * may affect the specifications of UC mac addresses. + */ +- if (rte_is_multicast_ether_addr(mac_addr)) +- ret = hns3_add_mc_addr_common(hw, mac_addr); +- else ++ if (rte_is_multicast_ether_addr(mac_addr)) { ++ if (hns3_find_duplicate_mc_addr(hw, mac_addr)) { ++ rte_spinlock_unlock(&hw->lock); ++ return -EINVAL; ++ } ++ ret = hns3_add_mc_mac_addr(hw, mac_addr); ++ } else { + ret = hns3_add_uc_mac_addr(hw, mac_addr); +- ++ } + if (ret) { + rte_spinlock_unlock(&hw->lock); + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index f60849606..92673d29b 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -204,25 +204,6 @@ hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + return ret; + } + +-static int +-hns3vf_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +-{ +- char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- int ret; +- +- if (hns3_find_duplicate_mc_addr(hw, mac_addr)) +- return -EINVAL; +- +- ret = hns3vf_add_mc_mac_addr(hw, mac_addr); +- if (ret) { +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- mac_addr); +- hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", +- mac_str, ret); +- } +- return ret; +-} +- + static int + hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + __rte_unused uint32_t idx, +@@ -243,10 +224,15 @@ hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + * using the rte_eth_dev_mac_addr_add API function to set MC mac address + * may affect the specifications of UC mac addresses. + */ +- if (rte_is_multicast_ether_addr(mac_addr)) +- ret = hns3vf_add_mc_addr_common(hw, mac_addr); +- else ++ if (rte_is_multicast_ether_addr(mac_addr)) { ++ if (hns3_find_duplicate_mc_addr(hw, mac_addr)) { ++ rte_spinlock_unlock(&hw->lock); ++ return -EINVAL; ++ } ++ ret = hns3vf_add_mc_mac_addr(hw, mac_addr); ++ } else { + ret = hns3vf_add_uc_mac_addr(hw, mac_addr); ++ } + + rte_spinlock_unlock(&hw->lock); + if (ret) { +-- +2.33.0 + diff --git a/0241-net-hns3-rename-unicast-address-removal-function.patch b/0241-net-hns3-rename-unicast-address-removal-function.patch new file mode 100644 index 0000000000000000000000000000000000000000..03175c30dff5ae0a52d9d96a54cb91e4f973ce15 --- /dev/null +++ b/0241-net-hns3-rename-unicast-address-removal-function.patch @@ -0,0 +1,66 @@ +From d89d75e54b10a18d40306e3dcc8921275cc9b81b Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Fri, 22 Oct 2021 17:19:57 +0800 +Subject: [PATCH 08/33] net/hns3: rename unicast address removal function + +This patch renames hns3_remove_uc_addr_common() to +hns3_remove_uc_mac_addr() in PF. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index 7f4419c54..485995a43 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -1691,7 +1691,7 @@ hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + } + + static int +-hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) ++hns3_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + { + struct hns3_mac_vlan_tbl_entry_cmd req; + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +@@ -1732,7 +1732,7 @@ hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) + if (rte_is_multicast_ether_addr(mac_addr)) + ret = hns3_remove_mc_addr_common(hw, mac_addr); + else +- ret = hns3_remove_uc_addr_common(hw, mac_addr); ++ ret = hns3_remove_uc_mac_addr(hw, mac_addr); + rte_spinlock_unlock(&hw->lock); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +@@ -1753,7 +1753,7 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, + + rte_spinlock_lock(&hw->lock); + oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; +- ret = hns3_remove_uc_addr_common(hw, oaddr); ++ ret = hns3_remove_uc_mac_addr(hw, oaddr); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + oaddr); +@@ -1785,7 +1785,7 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, + return 0; + + err_pause_addr_cfg: +- ret_val = hns3_remove_uc_addr_common(hw, mac_addr); ++ ret_val = hns3_remove_uc_mac_addr(hw, mac_addr); + if (ret_val) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -1824,7 +1824,7 @@ hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) + ret = del ? hns3_remove_mc_mac_addr(hw, addr) : + hns3_add_mc_mac_addr(hw, addr); + else +- ret = del ? hns3_remove_uc_addr_common(hw, addr) : ++ ret = del ? hns3_remove_uc_mac_addr(hw, addr) : + hns3_add_uc_mac_addr(hw, addr); + + if (ret) { +-- +2.33.0 + diff --git a/0242-net-hns3-remove-redundant-multicast-removal-interfac.patch b/0242-net-hns3-remove-redundant-multicast-removal-interfac.patch new file mode 100644 index 0000000000000000000000000000000000000000..f2aa98c7f15121c7dcb966983938eaf93da7d52e --- /dev/null +++ b/0242-net-hns3-remove-redundant-multicast-removal-interfac.patch @@ -0,0 +1,53 @@ +From 3e96668301de4168ef51a6b43535b851c25290da Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Fri, 22 Oct 2021 17:19:58 +0800 +Subject: [PATCH 09/33] net/hns3: remove redundant multicast removal interface + +This patch removes redundant hns3_remove_mc_addr_common(), which can be +replaced by hns3_remove_mc_mac_addr(). + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 18 +----------------- + 1 file changed, 1 insertion(+), 17 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index 485995a43..a2d365a28 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -1632,22 +1632,6 @@ hns3_find_duplicate_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mc_addr) + return false; + } + +-static int +-hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +-{ +- char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- int ret; +- +- ret = hns3_remove_mc_mac_addr(hw, mac_addr); +- if (ret) { +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- mac_addr); +- hns3_err(hw, "failed to remove mc mac addr(%s), ret = %d", +- mac_str, ret); +- } +- return ret; +-} +- + static int + hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + __rte_unused uint32_t idx, __rte_unused uint32_t pool) +@@ -1730,7 +1714,7 @@ hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) + rte_spinlock_lock(&hw->lock); + + if (rte_is_multicast_ether_addr(mac_addr)) +- ret = hns3_remove_mc_addr_common(hw, mac_addr); ++ ret = hns3_remove_mc_mac_addr(hw, mac_addr); + else + ret = hns3_remove_uc_mac_addr(hw, mac_addr); + rte_spinlock_unlock(&hw->lock); +-- +2.33.0 + diff --git a/0243-net-hns3-add-HW-ops-structure-to-operate-hardware.patch b/0243-net-hns3-add-HW-ops-structure-to-operate-hardware.patch new file mode 100644 index 0000000000000000000000000000000000000000..36f62c9d8fac31763a4fecf44bb0ca0085b0e973 --- /dev/null +++ b/0243-net-hns3-add-HW-ops-structure-to-operate-hardware.patch @@ -0,0 +1,106 @@ +From ce3a4cda823aabc34c9166022b0cdb102723ef2a Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Fri, 22 Oct 2021 17:19:59 +0800 +Subject: [PATCH 10/33] net/hns3: add HW ops structure to operate hardware + +This patch adds hns3_hw_ops structure to operate hardware in PF and VF +driver. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 10 ++++++++++ + drivers/net/hns3/hns3_ethdev.h | 13 +++++++++++++ + drivers/net/hns3/hns3_ethdev_vf.c | 10 ++++++++++ + 3 files changed, 33 insertions(+) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index a2d365a28..48c6483e1 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -7478,6 +7478,15 @@ static const struct hns3_reset_ops hns3_reset_ops = { + .start_service = hns3_start_service, + }; + ++static void ++hns3_init_hw_ops(struct hns3_hw *hw) ++{ ++ hw->ops.add_mc_mac_addr = hns3_add_mc_mac_addr; ++ hw->ops.del_mc_mac_addr = hns3_remove_mc_mac_addr; ++ hw->ops.add_uc_mac_addr = hns3_add_uc_mac_addr; ++ hw->ops.del_uc_mac_addr = hns3_remove_uc_mac_addr; ++} ++ + static int + hns3_dev_init(struct rte_eth_dev *eth_dev) + { +@@ -7530,6 +7539,7 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) + goto err_init_reset; + hw->reset.ops = &hns3_reset_ops; + ++ hns3_init_hw_ops(hw); + ret = hns3_init_pf(eth_dev); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret); +diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h +index a97406198..73947e194 100644 +--- a/drivers/net/hns3/hns3_ethdev.h ++++ b/drivers/net/hns3/hns3_ethdev.h +@@ -428,6 +428,17 @@ struct hns3_reset_data { + struct hns3_wait_data *wait_data; + }; + ++struct hns3_hw_ops { ++ int (*add_mc_mac_addr)(struct hns3_hw *hw, ++ struct rte_ether_addr *mac_addr); ++ int (*del_mc_mac_addr)(struct hns3_hw *hw, ++ struct rte_ether_addr *mac_addr); ++ int (*add_uc_mac_addr)(struct hns3_hw *hw, ++ struct rte_ether_addr *mac_addr); ++ int (*del_uc_mac_addr)(struct hns3_hw *hw, ++ struct rte_ether_addr *mac_addr); ++}; ++ + #define HNS3_INTR_MAPPING_VEC_RSV_ONE 0 + #define HNS3_INTR_MAPPING_VEC_ALL 1 + +@@ -638,6 +649,8 @@ struct hns3_hw { + struct hns3_rss_filter_list flow_rss_list; /* flow RSS rule list */ + struct hns3_flow_mem_list flow_list; + ++ struct hns3_hw_ops ops; ++ + /* + * PMD setup and configuration is not thread safe. Since it is not + * performance sensitive, it is better to guarantee thread-safety +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index 92673d29b..1020b42e1 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -2920,6 +2920,15 @@ static const struct hns3_reset_ops hns3vf_reset_ops = { + .start_service = hns3vf_start_service, + }; + ++static void ++hns3vf_init_hw_ops(struct hns3_hw *hw) ++{ ++ hw->ops.add_mc_mac_addr = hns3vf_add_mc_mac_addr; ++ hw->ops.del_mc_mac_addr = hns3vf_remove_mc_mac_addr; ++ hw->ops.add_uc_mac_addr = hns3vf_add_uc_mac_addr; ++ hw->ops.del_uc_mac_addr = hns3vf_remove_uc_mac_addr; ++} ++ + static int + hns3vf_dev_init(struct rte_eth_dev *eth_dev) + { +@@ -2964,6 +2973,7 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) + goto err_init_reset; + hw->reset.ops = &hns3vf_reset_ops; + ++ hns3vf_init_hw_ops(hw); + ret = hns3vf_init_vf(eth_dev); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret); +-- +2.33.0 + diff --git a/0244-net-hns3-use-HW-ops-to-config-MAC-features.patch b/0244-net-hns3-use-HW-ops-to-config-MAC-features.patch new file mode 100644 index 0000000000000000000000000000000000000000..2529e4dcc2dea25319f0f9354c09075a3c9e87b8 --- /dev/null +++ b/0244-net-hns3-use-HW-ops-to-config-MAC-features.patch @@ -0,0 +1,213 @@ +From 6e06ab138687a620035dbc3643c115d2199f5058 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Fri, 22 Oct 2021 17:20:00 +0800 +Subject: [PATCH 11/33] net/hns3: use HW ops to config MAC features + +This patch uses APIs in hns3_hw_ops to configure MAC related features. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 32 +++++++++++++++---------------- + drivers/net/hns3/hns3_ethdev_vf.c | 27 +++++++++++++------------- + 2 files changed, 30 insertions(+), 29 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index 48c6483e1..00016d58e 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -1656,9 +1656,9 @@ hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + rte_spinlock_unlock(&hw->lock); + return -EINVAL; + } +- ret = hns3_add_mc_mac_addr(hw, mac_addr); ++ ret = hw->ops.add_mc_mac_addr(hw, mac_addr); + } else { +- ret = hns3_add_uc_mac_addr(hw, mac_addr); ++ ret = hw->ops.add_uc_mac_addr(hw, mac_addr); + } + if (ret) { + rte_spinlock_unlock(&hw->lock); +@@ -1714,9 +1714,9 @@ hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) + rte_spinlock_lock(&hw->lock); + + if (rte_is_multicast_ether_addr(mac_addr)) +- ret = hns3_remove_mc_mac_addr(hw, mac_addr); ++ ret = hw->ops.del_mc_mac_addr(hw, mac_addr); + else +- ret = hns3_remove_uc_mac_addr(hw, mac_addr); ++ ret = hw->ops.del_uc_mac_addr(hw, mac_addr); + rte_spinlock_unlock(&hw->lock); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +@@ -1737,7 +1737,7 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, + + rte_spinlock_lock(&hw->lock); + oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; +- ret = hns3_remove_uc_mac_addr(hw, oaddr); ++ ret = hw->ops.del_uc_mac_addr(hw, oaddr); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + oaddr); +@@ -1748,7 +1748,7 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, + return ret; + } + +- ret = hns3_add_uc_mac_addr(hw, mac_addr); ++ ret = hw->ops.add_uc_mac_addr(hw, mac_addr); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -1769,7 +1769,7 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, + return 0; + + err_pause_addr_cfg: +- ret_val = hns3_remove_uc_mac_addr(hw, mac_addr); ++ ret_val = hw->ops.del_uc_mac_addr(hw, mac_addr); + if (ret_val) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -1779,7 +1779,7 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, + } + + err_add_uc_addr: +- ret_val = hns3_add_uc_mac_addr(hw, oaddr); ++ ret_val = hw->ops.add_uc_mac_addr(hw, oaddr); + if (ret_val) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr); + hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d", +@@ -1805,11 +1805,11 @@ hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) + if (rte_is_zero_ether_addr(addr)) + continue; + if (rte_is_multicast_ether_addr(addr)) +- ret = del ? hns3_remove_mc_mac_addr(hw, addr) : +- hns3_add_mc_mac_addr(hw, addr); ++ ret = del ? ops->del_mc_mac_addr(hw, addr) : ++ ops->add_mc_mac_addr(hw, addr); + else +- ret = del ? hns3_remove_uc_mac_addr(hw, addr) : +- hns3_add_uc_mac_addr(hw, addr); ++ ret = del ? ops->del_uc_mac_addr(hw, addr) : ++ ops->add_uc_mac_addr(hw, addr); + + if (ret) { + err = ret; +@@ -2125,7 +2125,7 @@ hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, + for (i = 0; i < rm_addr_num; i++) { + num = rm_addr_num - i - 1; + addr = &rm_addr_list[num]; +- ret = hns3_remove_mc_mac_addr(hw, addr); ++ ret = hw->ops.del_mc_mac_addr(hw, addr); + if (ret) { + rte_spinlock_unlock(&hw->lock); + return ret; +@@ -2136,7 +2136,7 @@ hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, + /* Add mc mac addresses */ + for (i = 0; i < add_addr_num; i++) { + addr = &add_addr_list[i]; +- ret = hns3_add_mc_mac_addr(hw, addr); ++ ret = hw->ops.add_mc_mac_addr(hw, addr); + if (ret) { + rte_spinlock_unlock(&hw->lock); + return ret; +@@ -2166,9 +2166,9 @@ hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) + if (!rte_is_multicast_ether_addr(addr)) + continue; + if (del) +- ret = hns3_remove_mc_mac_addr(hw, addr); ++ ret = hw->ops.del_mc_mac_addr(hw, addr); + else +- ret = hns3_add_mc_mac_addr(hw, addr); ++ ret = hw->ops.add_mc_mac_addr(hw, addr); + if (ret) { + err = ret; + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index 1020b42e1..f9c5e3b4f 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -229,9 +229,9 @@ hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + rte_spinlock_unlock(&hw->lock); + return -EINVAL; + } +- ret = hns3vf_add_mc_mac_addr(hw, mac_addr); ++ ret = hw->ops.add_mc_mac_addr(hw, mac_addr); + } else { +- ret = hns3vf_add_uc_mac_addr(hw, mac_addr); ++ ret = hw->ops.add_uc_mac_addr(hw, mac_addr); + } + + rte_spinlock_unlock(&hw->lock); +@@ -257,9 +257,9 @@ hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) + rte_spinlock_lock(&hw->lock); + + if (rte_is_multicast_ether_addr(mac_addr)) +- ret = hns3vf_remove_mc_mac_addr(hw, mac_addr); ++ ret = hw->ops.del_mc_mac_addr(hw, mac_addr); + else +- ret = hns3vf_remove_uc_mac_addr(hw, mac_addr); ++ ret = hw->ops.del_uc_mac_addr(hw, mac_addr); + + rte_spinlock_unlock(&hw->lock); + if (ret) { +@@ -326,9 +326,10 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, + static int + hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del) + { ++ char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct hns3_hw *hw = &hns->hw; ++ struct hns3_hw_ops *ops = &hw->ops; + struct rte_ether_addr *addr; +- char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int err = 0; + int ret; + int i; +@@ -338,11 +339,11 @@ hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del) + if (rte_is_zero_ether_addr(addr)) + continue; + if (rte_is_multicast_ether_addr(addr)) +- ret = del ? hns3vf_remove_mc_mac_addr(hw, addr) : +- hns3vf_add_mc_mac_addr(hw, addr); ++ ret = del ? ops->del_mc_mac_addr(hw, addr) : ++ ops->add_mc_mac_addr(hw, addr); + else +- ret = del ? hns3vf_remove_uc_mac_addr(hw, addr) : +- hns3vf_add_uc_mac_addr(hw, addr); ++ ret = del ? ops->del_uc_mac_addr(hw, addr) : ++ ops->add_uc_mac_addr(hw, addr); + + if (ret) { + err = ret; +@@ -484,7 +485,7 @@ hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev, + for (i = 0; i < cur_addr_num; i++) { + num = cur_addr_num - i - 1; + addr = &hw->mc_addrs[num]; +- ret = hns3vf_remove_mc_mac_addr(hw, addr); ++ ret = hw->ops.del_mc_mac_addr(hw, addr); + if (ret) { + rte_spinlock_unlock(&hw->lock); + return ret; +@@ -496,7 +497,7 @@ hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev, + set_addr_num = (int)nb_mc_addr; + for (i = 0; i < set_addr_num; i++) { + addr = &mc_addr_set[i]; +- ret = hns3vf_add_mc_mac_addr(hw, addr); ++ ret = hw->ops.add_mc_mac_addr(hw, addr); + if (ret) { + rte_spinlock_unlock(&hw->lock); + return ret; +@@ -525,9 +526,9 @@ hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) + if (!rte_is_multicast_ether_addr(addr)) + continue; + if (del) +- ret = hns3vf_remove_mc_mac_addr(hw, addr); ++ ret = hw->ops.del_mc_mac_addr(hw, addr); + else +- ret = hns3vf_add_mc_mac_addr(hw, addr); ++ ret = hw->ops.add_mc_mac_addr(hw, addr); + if (ret) { + err = ret; + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +-- +2.33.0 + diff --git a/0245-net-hns3-unify-MAC-and-multicast-address-configurati.patch b/0245-net-hns3-unify-MAC-and-multicast-address-configurati.patch new file mode 100644 index 0000000000000000000000000000000000000000..7319dd15fc9f83ad3bc17398e455a913a23ce086 --- /dev/null +++ b/0245-net-hns3-unify-MAC-and-multicast-address-configurati.patch @@ -0,0 +1,251 @@ +From 67d013484d9b521fd174e8485f7ebed333195bca Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Fri, 22 Oct 2021 17:20:01 +0800 +Subject: [PATCH 12/33] net/hns3: unify MAC and multicast address configuration + +Currently, the interface logic for adding and deleting all MAC address +and multicast address in PF and VF driver is the same. This patch +extracts two common interfaces to configure them separately. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 32 ++++++------- + drivers/net/hns3/hns3_ethdev.h | 2 + + drivers/net/hns3/hns3_ethdev_vf.c | 77 +++---------------------------- + 3 files changed, 25 insertions(+), 86 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index 00016d58e..bdd29220a 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -1790,17 +1790,20 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, + return ret; + } + +-static int ++int + hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) + { + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct hns3_hw *hw = &hns->hw; ++ struct hns3_hw_ops *ops = &hw->ops; + struct rte_ether_addr *addr; +- int err = 0; +- int ret; ++ uint16_t mac_addrs_capa; ++ int ret = 0; + int i; + +- for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) { ++ mac_addrs_capa = ++ hns->is_vf ? HNS3_VF_UC_MACADDR_NUM : HNS3_UC_MACADDR_NUM; ++ for (i = 0; i < mac_addrs_capa; i++) { + addr = &hw->data->mac_addrs[i]; + if (rte_is_zero_ether_addr(addr)) + continue; +@@ -1812,15 +1815,14 @@ hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) + ops->add_uc_mac_addr(hw, addr); + + if (ret) { +- err = ret; + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- addr); +- hns3_err(hw, "failed to %s mac addr(%s) index:%d " +- "ret = %d.", del ? "remove" : "restore", +- mac_str, i, ret); ++ addr); ++ hns3_err(hw, "failed to %s mac addr(%s) index:%d ret = %d.", ++ del ? "remove" : "restore", mac_str, i, ret); + } + } +- return err; ++ ++ return ret; + } + + static void +@@ -2151,14 +2153,13 @@ hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, + return 0; + } + +-static int ++int + hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) + { + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct hns3_hw *hw = &hns->hw; + struct rte_ether_addr *addr; +- int err = 0; +- int ret; ++ int ret = 0; + int i; + + for (i = 0; i < hw->mc_addrs_num; i++) { +@@ -2170,14 +2171,13 @@ hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) + else + ret = hw->ops.add_mc_mac_addr(hw, addr); + if (ret) { +- err = ret; + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + addr); +- hns3_dbg(hw, "%s mc mac addr: %s failed for pf: ret = %d", ++ hns3_dbg(hw, "failed to %s mc mac addr: %s ret = %d", + del ? "Remove" : "Restore", mac_str, ret); + } + } +- return err; ++ return ret; + } + + static int +diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h +index 73947e194..942e8419c 100644 +--- a/drivers/net/hns3/hns3_ethdev.h ++++ b/drivers/net/hns3/hns3_ethdev.h +@@ -1065,6 +1065,8 @@ void hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported); + + bool hns3_find_duplicate_mc_addr(struct hns3_hw *hw, + struct rte_ether_addr *mc_addr); ++int hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del); ++int hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del); + + int hns3_restore_ptp(struct hns3_adapter *hns); + int hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev, +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index f9c5e3b4f..cce4d3450 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -323,40 +323,6 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, + return ret; + } + +-static int +-hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del) +-{ +- char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- struct hns3_hw *hw = &hns->hw; +- struct hns3_hw_ops *ops = &hw->ops; +- struct rte_ether_addr *addr; +- int err = 0; +- int ret; +- int i; +- +- for (i = 0; i < HNS3_VF_UC_MACADDR_NUM; i++) { +- addr = &hw->data->mac_addrs[i]; +- if (rte_is_zero_ether_addr(addr)) +- continue; +- if (rte_is_multicast_ether_addr(addr)) +- ret = del ? ops->del_mc_mac_addr(hw, addr) : +- ops->add_mc_mac_addr(hw, addr); +- else +- ret = del ? ops->del_uc_mac_addr(hw, addr) : +- ops->add_uc_mac_addr(hw, addr); +- +- if (ret) { +- err = ret; +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- addr); +- hns3_err(hw, "failed to %s mac addr(%s) index:%d " +- "ret = %d.", del ? "remove" : "restore", +- mac_str, i, ret); +- } +- } +- return err; +-} +- + static int + hns3vf_add_mc_mac_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr) +@@ -511,35 +477,6 @@ hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev, + return 0; + } + +-static int +-hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) +-{ +- char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- struct hns3_hw *hw = &hns->hw; +- struct rte_ether_addr *addr; +- int err = 0; +- int ret; +- int i; +- +- for (i = 0; i < hw->mc_addrs_num; i++) { +- addr = &hw->mc_addrs[i]; +- if (!rte_is_multicast_ether_addr(addr)) +- continue; +- if (del) +- ret = hw->ops.del_mc_mac_addr(hw, addr); +- else +- ret = hw->ops.add_mc_mac_addr(hw, addr); +- if (ret) { +- err = ret; +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- addr); +- hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d", +- del ? "Remove" : "Restore", mac_str, ret); +- } +- } +- return err; +-} +- + static int + hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, + bool en_uc_pmc, bool en_mc_pmc) +@@ -2077,7 +2014,7 @@ hns3vf_do_stop(struct hns3_adapter *hns) + hns3_dev_release_mbufs(hns); + + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { +- hns3vf_configure_mac_addr(hns, true); ++ hns3_configure_all_mac_addr(hns, true); + ret = hns3_reset_all_tqps(hns); + if (ret) { + hns3_err(hw, "failed to reset all queues ret = %d", +@@ -2172,7 +2109,7 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) + hns3_reset_abort(hns); + hw->adapter_state = HNS3_NIC_CLOSED; + rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev); +- hns3vf_configure_all_mc_mac_addr(hns, true); ++ hns3_configure_all_mc_mac_addr(hns, true); + hns3vf_remove_all_vlan_table(hns); + hns3vf_uninit_vf(eth_dev); + hns3_free_all_queues(eth_dev); +@@ -2598,7 +2535,7 @@ hns3vf_stop_service(struct hns3_adapter *hns) + * required to delete the entries. + */ + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) +- hns3vf_configure_all_mc_mac_addr(hns, true); ++ hns3_configure_all_mc_mac_addr(hns, true); + rte_spinlock_unlock(&hw->lock); + + return 0; +@@ -2684,11 +2621,11 @@ hns3vf_restore_conf(struct hns3_adapter *hns) + if (ret) + return ret; + +- ret = hns3vf_configure_mac_addr(hns, false); ++ ret = hns3_configure_all_mac_addr(hns, false); + if (ret) + return ret; + +- ret = hns3vf_configure_all_mc_mac_addr(hns, false); ++ ret = hns3_configure_all_mc_mac_addr(hns, false); + if (ret) + goto err_mc_mac; + +@@ -2729,9 +2666,9 @@ hns3vf_restore_conf(struct hns3_adapter *hns) + return 0; + + err_vlan_table: +- hns3vf_configure_all_mc_mac_addr(hns, true); ++ hns3_configure_all_mc_mac_addr(hns, true); + err_mc_mac: +- hns3vf_configure_mac_addr(hns, true); ++ hns3_configure_all_mac_addr(hns, true); + return ret; + } + +-- +2.33.0 + diff --git a/0246-net-hns3-unify-MAC-address-add-and-remove.patch b/0246-net-hns3-unify-MAC-address-add-and-remove.patch new file mode 100644 index 0000000000000000000000000000000000000000..d6b4a5d97f377c65790c90ac581d919dab271b7b --- /dev/null +++ b/0246-net-hns3-unify-MAC-address-add-and-remove.patch @@ -0,0 +1,177 @@ +From f1e4c77136cc5d65606ad07cd7204c0994c14904 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Fri, 22 Oct 2021 17:20:02 +0800 +Subject: [PATCH 13/33] net/hns3: unify MAC address add and remove + +The code logic of adding and removing MAC address in PF and VF is the +same. +This patch extracts two common interfaces to add and remove them +separately. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 11 ++--- + drivers/net/hns3/hns3_ethdev.h | 5 ++- + drivers/net/hns3/hns3_ethdev_vf.c | 70 +------------------------------ + 3 files changed, 9 insertions(+), 77 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index bdd29220a..bf49d5f75 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -1609,7 +1609,7 @@ hns3_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + return ret; + } + +-bool ++static bool + hns3_find_duplicate_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mc_addr) + { + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +@@ -1632,7 +1632,7 @@ hns3_find_duplicate_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mc_addr) + return false; + } + +-static int ++int + hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + __rte_unused uint32_t idx, __rte_unused uint32_t pool) + { +@@ -1660,17 +1660,14 @@ hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + } else { + ret = hw->ops.add_uc_mac_addr(hw, mac_addr); + } ++ rte_spinlock_unlock(&hw->lock); + if (ret) { +- rte_spinlock_unlock(&hw->lock); + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, + ret); +- return ret; + } + +- rte_spinlock_unlock(&hw->lock); +- + return ret; + } + +@@ -1702,7 +1699,7 @@ hns3_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + return ret; + } + +-static void ++void + hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h +index 942e8419c..276ac8b54 100644 +--- a/drivers/net/hns3/hns3_ethdev.h ++++ b/drivers/net/hns3/hns3_ethdev.h +@@ -1063,10 +1063,11 @@ void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, + void hns3_parse_devargs(struct rte_eth_dev *dev); + void hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported); + +-bool hns3_find_duplicate_mc_addr(struct hns3_hw *hw, +- struct rte_ether_addr *mc_addr); + int hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del); + int hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del); ++int hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, ++ __rte_unused uint32_t idx, __rte_unused uint32_t pool); ++void hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx); + + int hns3_restore_ptp(struct hns3_adapter *hns); + int hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev, +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index cce4d3450..fb7eda21d 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -204,72 +204,6 @@ hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + return ret; + } + +-static int +-hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, +- __rte_unused uint32_t idx, +- __rte_unused uint32_t pool) +-{ +- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- int ret; +- +- rte_spinlock_lock(&hw->lock); +- +- /* +- * In hns3 network engine adding UC and MC mac address with different +- * commands with firmware. We need to determine whether the input +- * address is a UC or a MC address to call different commands. +- * By the way, it is recommended calling the API function named +- * rte_eth_dev_set_mc_addr_list to set the MC mac address, because +- * using the rte_eth_dev_mac_addr_add API function to set MC mac address +- * may affect the specifications of UC mac addresses. +- */ +- if (rte_is_multicast_ether_addr(mac_addr)) { +- if (hns3_find_duplicate_mc_addr(hw, mac_addr)) { +- rte_spinlock_unlock(&hw->lock); +- return -EINVAL; +- } +- ret = hw->ops.add_mc_mac_addr(hw, mac_addr); +- } else { +- ret = hw->ops.add_uc_mac_addr(hw, mac_addr); +- } +- +- rte_spinlock_unlock(&hw->lock); +- if (ret) { +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- mac_addr); +- hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, +- ret); +- } +- +- return ret; +-} +- +-static void +-hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) +-{ +- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- /* index will be checked by upper level rte interface */ +- struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx]; +- char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- int ret; +- +- rte_spinlock_lock(&hw->lock); +- +- if (rte_is_multicast_ether_addr(mac_addr)) +- ret = hw->ops.del_mc_mac_addr(hw, mac_addr); +- else +- ret = hw->ops.del_uc_mac_addr(hw, mac_addr); +- +- rte_spinlock_unlock(&hw->lock); +- if (ret) { +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- mac_addr); +- hns3_err(hw, "failed to remove mac addr(%s), ret = %d", +- mac_str, ret); +- } +-} +- + static int + hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr) +@@ -2831,8 +2765,8 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = { + .txq_info_get = hns3_txq_info_get, + .rx_burst_mode_get = hns3_rx_burst_mode_get, + .tx_burst_mode_get = hns3_tx_burst_mode_get, +- .mac_addr_add = hns3vf_add_mac_addr, +- .mac_addr_remove = hns3vf_remove_mac_addr, ++ .mac_addr_add = hns3_add_mac_addr, ++ .mac_addr_remove = hns3_remove_mac_addr, + .mac_addr_set = hns3vf_set_default_mac_addr, + .set_mc_addr_list = hns3vf_set_mc_mac_addr_list, + .link_update = hns3vf_dev_link_update, +-- +2.33.0 + diff --git a/0247-net-hns3-unify-multicast-address-check.patch b/0247-net-hns3-unify-multicast-address-check.patch new file mode 100644 index 0000000000000000000000000000000000000000..0cb7785169a44e0d0ed14c422e34ee4dd44a9119 --- /dev/null +++ b/0247-net-hns3-unify-multicast-address-check.patch @@ -0,0 +1,157 @@ +From 35da8e628f28e84f1f7b1dff10a984a44bae44e0 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Fri, 22 Oct 2021 17:20:03 +0800 +Subject: [PATCH 14/33] net/hns3: unify multicast address check + +This patch uniforms a common function to check multicast address +validity for PF and VF. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 12 ++++-- + drivers/net/hns3/hns3_ethdev.h | 4 +- + drivers/net/hns3/hns3_ethdev_vf.c | 66 +------------------------------ + 3 files changed, 12 insertions(+), 70 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index bf49d5f75..97129c428 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -1947,13 +1947,15 @@ hns3_remove_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + return ret; + } + +-static int ++int + hns3_set_mc_addr_chk_param(struct hns3_hw *hw, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) + { ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct rte_ether_addr *addr; ++ uint16_t mac_addrs_capa; + uint32_t i; + uint32_t j; + +@@ -1993,12 +1995,14 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw, + * Check if there are duplicate addresses between mac_addrs + * and mc_addr_set + */ +- for (j = 0; j < HNS3_UC_MACADDR_NUM; j++) { ++ mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM : ++ HNS3_UC_MACADDR_NUM; ++ for (j = 0; j < mac_addrs_capa; j++) { + if (rte_is_same_ether_addr(addr, + &hw->data->mac_addrs[j])) { + hns3_ether_format_addr(mac_str, +- RTE_ETHER_ADDR_FMT_SIZE, +- addr); ++ RTE_ETHER_ADDR_FMT_SIZE, ++ addr); + hns3_err(hw, "failed to set mc mac addr, " + "addrs invalid. addrs(%s) has already " + "configured in mac_addr add API", +diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h +index 276ac8b54..1606a6407 100644 +--- a/drivers/net/hns3/hns3_ethdev.h ++++ b/drivers/net/hns3/hns3_ethdev.h +@@ -1062,7 +1062,9 @@ void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, + uint32_t link_speed, uint8_t link_duplex); + void hns3_parse_devargs(struct rte_eth_dev *dev); + void hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported); +- ++int hns3_set_mc_addr_chk_param(struct hns3_hw *hw, ++ struct rte_ether_addr *mc_addr_set, ++ uint32_t nb_mc_addr); + int hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del); + int hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del); + int hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index fb7eda21d..835e783c3 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -299,70 +299,6 @@ hns3vf_remove_mc_mac_addr(struct hns3_hw *hw, + return ret; + } + +-static int +-hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw, +- struct rte_ether_addr *mc_addr_set, +- uint32_t nb_mc_addr) +-{ +- char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- struct rte_ether_addr *addr; +- uint32_t i; +- uint32_t j; +- +- if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { +- hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " +- "invalid. valid range: 0~%d", +- nb_mc_addr, HNS3_MC_MACADDR_NUM); +- return -EINVAL; +- } +- +- /* Check if input mac addresses are valid */ +- for (i = 0; i < nb_mc_addr; i++) { +- addr = &mc_addr_set[i]; +- if (!rte_is_multicast_ether_addr(addr)) { +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- addr); +- hns3_err(hw, +- "failed to set mc mac addr, addr(%s) invalid.", +- mac_str); +- return -EINVAL; +- } +- +- /* Check if there are duplicate addresses */ +- for (j = i + 1; j < nb_mc_addr; j++) { +- if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { +- hns3_ether_format_addr(mac_str, +- RTE_ETHER_ADDR_FMT_SIZE, +- addr); +- hns3_err(hw, "failed to set mc mac addr, " +- "addrs invalid. two same addrs(%s).", +- mac_str); +- return -EINVAL; +- } +- } +- +- /* +- * Check if there are duplicate addresses between mac_addrs +- * and mc_addr_set +- */ +- for (j = 0; j < HNS3_VF_UC_MACADDR_NUM; j++) { +- if (rte_is_same_ether_addr(addr, +- &hw->data->mac_addrs[j])) { +- hns3_ether_format_addr(mac_str, +- RTE_ETHER_ADDR_FMT_SIZE, +- addr); +- hns3_err(hw, "failed to set mc mac addr, " +- "addrs invalid. addrs(%s) has already " +- "configured in mac_addr add API", +- mac_str); +- return -EINVAL; +- } +- } +- } +- +- return 0; +-} +- + static int + hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, +@@ -376,7 +312,7 @@ hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev, + int ret; + int i; + +- ret = hns3vf_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); ++ ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); + if (ret) + return ret; + +-- +2.33.0 + diff --git a/0248-net-hns3-refactor-multicast-MAC-address-set-for-PF.patch b/0248-net-hns3-refactor-multicast-MAC-address-set-for-PF.patch new file mode 100644 index 0000000000000000000000000000000000000000..0679a9ee233a866188d982e977b103bed73456a0 --- /dev/null +++ b/0248-net-hns3-refactor-multicast-MAC-address-set-for-PF.patch @@ -0,0 +1,171 @@ +From 4696316a47ed084bdfddc5a7fd12ad743643b602 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Fri, 22 Oct 2021 17:20:04 +0800 +Subject: [PATCH 15/33] net/hns3: refactor multicast MAC address set for PF + +Currently, when configuring a group of multicast MAC addresses, the PF +driver reorder mc_addr array in hw struct to remove multicast MAC +addresses that are not in mc_addr_set array from user and then adds new +multicast MAC addresses. Actually, it can be simplified by removing all +previous MAC addresses and then adding new MAC addresses. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 112 ++++----------------------------- + 1 file changed, 11 insertions(+), 101 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index 97129c428..dd239f6e7 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -2015,94 +2015,15 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw, + return 0; + } + +-static void +-hns3_set_mc_addr_calc_addr(struct hns3_hw *hw, +- struct rte_ether_addr *mc_addr_set, +- int mc_addr_num, +- struct rte_ether_addr *reserved_addr_list, +- int *reserved_addr_num, +- struct rte_ether_addr *add_addr_list, +- int *add_addr_num, +- struct rte_ether_addr *rm_addr_list, +- int *rm_addr_num) +-{ +- struct rte_ether_addr *addr; +- int current_addr_num; +- int reserved_num = 0; +- int add_num = 0; +- int rm_num = 0; +- int num; +- int i; +- int j; +- bool same_addr; +- +- /* Calculate the mc mac address list that should be removed */ +- current_addr_num = hw->mc_addrs_num; +- for (i = 0; i < current_addr_num; i++) { +- addr = &hw->mc_addrs[i]; +- same_addr = false; +- for (j = 0; j < mc_addr_num; j++) { +- if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { +- same_addr = true; +- break; +- } +- } +- +- if (!same_addr) { +- rte_ether_addr_copy(addr, &rm_addr_list[rm_num]); +- rm_num++; +- } else { +- rte_ether_addr_copy(addr, +- &reserved_addr_list[reserved_num]); +- reserved_num++; +- } +- } +- +- /* Calculate the mc mac address list that should be added */ +- for (i = 0; i < mc_addr_num; i++) { +- addr = &mc_addr_set[i]; +- same_addr = false; +- for (j = 0; j < current_addr_num; j++) { +- if (rte_is_same_ether_addr(addr, &hw->mc_addrs[j])) { +- same_addr = true; +- break; +- } +- } +- +- if (!same_addr) { +- rte_ether_addr_copy(addr, &add_addr_list[add_num]); +- add_num++; +- } +- } +- +- /* Reorder the mc mac address list maintained by driver */ +- for (i = 0; i < reserved_num; i++) +- rte_ether_addr_copy(&reserved_addr_list[i], &hw->mc_addrs[i]); +- +- for (i = 0; i < rm_num; i++) { +- num = reserved_num + i; +- rte_ether_addr_copy(&rm_addr_list[i], &hw->mc_addrs[num]); +- } +- +- *reserved_addr_num = reserved_num; +- *add_addr_num = add_num; +- *rm_addr_num = rm_num; +-} +- + static int + hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- struct rte_ether_addr reserved_addr_list[HNS3_MC_MACADDR_NUM]; +- struct rte_ether_addr add_addr_list[HNS3_MC_MACADDR_NUM]; +- struct rte_ether_addr rm_addr_list[HNS3_MC_MACADDR_NUM]; + struct rte_ether_addr *addr; +- int reserved_addr_num; +- int add_addr_num; +- int rm_addr_num; +- int mc_addr_num; ++ int cur_addr_num; ++ int set_addr_num; + int num; + int ret; + int i; +@@ -2113,40 +2034,29 @@ hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, + return ret; + + rte_spinlock_lock(&hw->lock); +- +- /* +- * Calculate the mc mac address lists those should be removed and be +- * added, Reorder the mc mac address list maintained by driver. +- */ +- mc_addr_num = (int)nb_mc_addr; +- hns3_set_mc_addr_calc_addr(hw, mc_addr_set, mc_addr_num, +- reserved_addr_list, &reserved_addr_num, +- add_addr_list, &add_addr_num, +- rm_addr_list, &rm_addr_num); +- +- /* Remove mc mac addresses */ +- for (i = 0; i < rm_addr_num; i++) { +- num = rm_addr_num - i - 1; +- addr = &rm_addr_list[num]; ++ cur_addr_num = hw->mc_addrs_num; ++ for (i = 0; i < cur_addr_num; i++) { ++ num = cur_addr_num - i - 1; ++ addr = &hw->mc_addrs[num]; + ret = hw->ops.del_mc_mac_addr(hw, addr); + if (ret) { + rte_spinlock_unlock(&hw->lock); + return ret; + } ++ + hw->mc_addrs_num--; + } + +- /* Add mc mac addresses */ +- for (i = 0; i < add_addr_num; i++) { +- addr = &add_addr_list[i]; ++ set_addr_num = (int)nb_mc_addr; ++ for (i = 0; i < set_addr_num; i++) { ++ addr = &mc_addr_set[i]; + ret = hw->ops.add_mc_mac_addr(hw, addr); + if (ret) { + rte_spinlock_unlock(&hw->lock); + return ret; + } + +- num = reserved_addr_num + i; +- rte_ether_addr_copy(addr, &hw->mc_addrs[num]); ++ rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]); + hw->mc_addrs_num++; + } + rte_spinlock_unlock(&hw->lock); +-- +2.33.0 + diff --git a/0249-net-hns3-unify-multicast-MAC-address-set-list.patch b/0249-net-hns3-unify-multicast-MAC-address-set-list.patch new file mode 100644 index 0000000000000000000000000000000000000000..8b2f04dcf89f9d7f197e8585e24fd1e69682ef94 --- /dev/null +++ b/0249-net-hns3-unify-multicast-MAC-address-set-list.patch @@ -0,0 +1,132 @@ +From bbe20957bf59a4c2467b768865daa02c74ef907c Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Fri, 22 Oct 2021 17:20:05 +0800 +Subject: [PATCH 16/33] net/hns3: unify multicast MAC address set list + +This patch removes hns3vf_set_mc_mac_addr_list() and uses +hns3_set_mc_mac_addr_list() to do this. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 4 +-- + drivers/net/hns3/hns3_ethdev.h | 7 ++--- + drivers/net/hns3/hns3_ethdev_vf.c | 50 +------------------------------ + 3 files changed, 6 insertions(+), 55 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index dd239f6e7..85c50ce67 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -1947,7 +1947,7 @@ hns3_remove_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + return ret; + } + +-int ++static int + hns3_set_mc_addr_chk_param(struct hns3_hw *hw, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +@@ -2015,7 +2015,7 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw, + return 0; + } + +-static int ++int + hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h +index 1606a6407..1f1364304 100644 +--- a/drivers/net/hns3/hns3_ethdev.h ++++ b/drivers/net/hns3/hns3_ethdev.h +@@ -1062,15 +1062,14 @@ void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, + uint32_t link_speed, uint8_t link_duplex); + void hns3_parse_devargs(struct rte_eth_dev *dev); + void hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported); +-int hns3_set_mc_addr_chk_param(struct hns3_hw *hw, +- struct rte_ether_addr *mc_addr_set, +- uint32_t nb_mc_addr); + int hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del); + int hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del); + int hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + __rte_unused uint32_t idx, __rte_unused uint32_t pool); + void hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx); +- ++int hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, ++ struct rte_ether_addr *mc_addr_set, ++ uint32_t nb_mc_addr); + int hns3_restore_ptp(struct hns3_adapter *hns); + int hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev, + struct rte_eth_conf *conf); +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index 835e783c3..095f635cc 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -299,54 +299,6 @@ hns3vf_remove_mc_mac_addr(struct hns3_hw *hw, + return ret; + } + +-static int +-hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev, +- struct rte_ether_addr *mc_addr_set, +- uint32_t nb_mc_addr) +-{ +- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- struct rte_ether_addr *addr; +- int cur_addr_num; +- int set_addr_num; +- int num; +- int ret; +- int i; +- +- ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); +- if (ret) +- return ret; +- +- rte_spinlock_lock(&hw->lock); +- cur_addr_num = hw->mc_addrs_num; +- for (i = 0; i < cur_addr_num; i++) { +- num = cur_addr_num - i - 1; +- addr = &hw->mc_addrs[num]; +- ret = hw->ops.del_mc_mac_addr(hw, addr); +- if (ret) { +- rte_spinlock_unlock(&hw->lock); +- return ret; +- } +- +- hw->mc_addrs_num--; +- } +- +- set_addr_num = (int)nb_mc_addr; +- for (i = 0; i < set_addr_num; i++) { +- addr = &mc_addr_set[i]; +- ret = hw->ops.add_mc_mac_addr(hw, addr); +- if (ret) { +- rte_spinlock_unlock(&hw->lock); +- return ret; +- } +- +- rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]); +- hw->mc_addrs_num++; +- } +- rte_spinlock_unlock(&hw->lock); +- +- return 0; +-} +- + static int + hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, + bool en_uc_pmc, bool en_mc_pmc) +@@ -2704,7 +2656,7 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = { + .mac_addr_add = hns3_add_mac_addr, + .mac_addr_remove = hns3_remove_mac_addr, + .mac_addr_set = hns3vf_set_default_mac_addr, +- .set_mc_addr_list = hns3vf_set_mc_mac_addr_list, ++ .set_mc_addr_list = hns3_set_mc_mac_addr_list, + .link_update = hns3vf_dev_link_update, + .rss_hash_update = hns3_dev_rss_hash_update, + .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, +-- +2.33.0 + diff --git a/0250-bonding-show-Tx-policy-for-802.3AD-mode.patch b/0250-bonding-show-Tx-policy-for-802.3AD-mode.patch new file mode 100644 index 0000000000000000000000000000000000000000..1d8a3a75a73398b5aa0361a328b288e7bd0ae640 --- /dev/null +++ b/0250-bonding-show-Tx-policy-for-802.3AD-mode.patch @@ -0,0 +1,38 @@ +From 6ae8a77985dd6a896c304fa1c344980747e88e66 Mon Sep 17 00:00:00 2001 +From: "Min Hu (Connor)" +Date: Mon, 25 Oct 2021 11:15:11 +0800 +Subject: [PATCH 17/33] bonding: show Tx policy for 802.3AD mode + +As balance xmit policy is supported in bonding mode 4(802.3AD). This +patch adds balance xmit policy show in testpmd commands for mode 4. Like: +testpmd> show bonding config 2 + Bonding mode: 4 + Balance Xmit Policy: BALANCE_XMIT_POLICY_LAYER34 + IEEE802.3AD Aggregator Mode: stable + Slaves (2): [0 1] + Active Slaves (2): [1 0] + Primary: [1] + +Signed-off-by: Min Hu (Connor) +Acked-by: Ferruh Yigit +--- + app/test-pmd/cmdline.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c +index b701129d8..66e3815de 100644 +--- a/app/test-pmd/cmdline.c ++++ b/app/test-pmd/cmdline.c +@@ -6257,7 +6257,8 @@ static void cmd_show_bonding_config_parsed(void *parsed_result, + } else + printf("\tBonding mode: %d\n", bonding_mode); + +- if (bonding_mode == BONDING_MODE_BALANCE) { ++ if (bonding_mode == BONDING_MODE_BALANCE || ++ bonding_mode == BONDING_MODE_8023AD) { + int balance_xmit_policy; + + balance_xmit_policy = rte_eth_bond_xmit_policy_get(port_id); +-- +2.33.0 + diff --git a/0251-net-hns3-fix-secondary-process-reference-count.patch b/0251-net-hns3-fix-secondary-process-reference-count.patch new file mode 100644 index 0000000000000000000000000000000000000000..9e378fd527ab4f9f41ca0c2ba3f65a9956a9af97 --- /dev/null +++ b/0251-net-hns3-fix-secondary-process-reference-count.patch @@ -0,0 +1,116 @@ +From 1058a9cd1fa03e94b7e8634f1f26902ed9a376b1 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Tue, 2 Nov 2021 09:38:26 +0800 +Subject: [PATCH 18/33] net/hns3: fix secondary process reference count + +The "secondary_cnt" will be increased when a secondary process +initialized. But the value of this variable is not decreased when the +secondary process exits, which causes the primary process senses that +the secondary process still exists. As a result, the primary process +fails to send messages to the secondary process after the secondary +process exits. + +Fixes: 23d4b61fee5d ("net/hns3: support multiple process") +Cc: stable@dpdk.org + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 10 +++++++--- + drivers/net/hns3/hns3_ethdev_vf.c | 10 +++++++--- + drivers/net/hns3/hns3_mp.c | 4 +++- + 3 files changed, 17 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index 85c50ce67..31d027836 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -5894,8 +5894,10 @@ hns3_dev_close(struct rte_eth_dev *eth_dev) + struct hns3_hw *hw = &hns->hw; + int ret = 0; + +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { ++ __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); + return 0; ++ } + + if (hw->adapter_state == HNS3_NIC_STARTED) + ret = hns3_dev_stop(eth_dev); +@@ -7421,7 +7423,7 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) + "process, ret = %d", ret); + goto err_mp_init_secondary; + } +- hw->secondary_cnt++; ++ __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); + hns3_tx_push_init(eth_dev); + return 0; + } +@@ -7524,8 +7526,10 @@ hns3_dev_uninit(struct rte_eth_dev *eth_dev) + + PMD_INIT_FUNC_TRACE(); + +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { ++ __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); + return 0; ++ } + + if (hw->adapter_state < HNS3_NIC_CLOSING) + hns3_dev_close(eth_dev); +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index 095f635cc..76721b0d7 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -1921,8 +1921,10 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) + struct hns3_hw *hw = &hns->hw; + int ret = 0; + +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { ++ __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); + return 0; ++ } + + if (hw->adapter_state == HNS3_NIC_STARTED) + ret = hns3vf_dev_stop(eth_dev); +@@ -2710,7 +2712,7 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) + "process, ret = %d", ret); + goto err_mp_init_secondary; + } +- hw->secondary_cnt++; ++ __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); + hns3_tx_push_init(eth_dev); + return 0; + } +@@ -2812,8 +2814,10 @@ hns3vf_dev_uninit(struct rte_eth_dev *eth_dev) + + PMD_INIT_FUNC_TRACE(); + +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { ++ __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); + return 0; ++ } + + if (hw->adapter_state < HNS3_NIC_CLOSING) + hns3vf_dev_close(eth_dev); +diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c +index 4891c6e4f..184acfe02 100644 +--- a/drivers/net/hns3/hns3_mp.c ++++ b/drivers/net/hns3/hns3_mp.c +@@ -150,8 +150,10 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum hns3_mp_req_type type) + int ret; + int i; + +- if (rte_eal_process_type() == RTE_PROC_SECONDARY || !hw->secondary_cnt) ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY || ++ __atomic_load_n(&hw->secondary_cnt, __ATOMIC_RELAXED) == 0) + return; ++ + if (!mp_req_type_is_valid(type)) { + hns3_err(hw, "port %u unknown request (req_type %d)", + dev->data->port_id, type); +-- +2.33.0 + diff --git a/0252-net-hns3-fix-multi-process-action-register-and-unreg.patch b/0252-net-hns3-fix-multi-process-action-register-and-unreg.patch new file mode 100644 index 0000000000000000000000000000000000000000..ccbbe817e599ebd51949a8b4dfbf7acc43ea99cc --- /dev/null +++ b/0252-net-hns3-fix-multi-process-action-register-and-unreg.patch @@ -0,0 +1,166 @@ +From c4ae016e0b548882c5f777cd1782b8661a34f252 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Tue, 2 Nov 2021 09:38:27 +0800 +Subject: [PATCH 19/33] net/hns3: fix multi-process action register and + unregister + +The multi-process has the following problems: +1) After a port in primary process is closed, the mp action of the + process is unregistered. Which will cause that other device in the + primary process cannot respond to requests from secondary processes. +2) Because variable "hns3_inited" is set to true without returning an + initial value, the mp action cannot be registered again after it is + unregistered. +3) The mp action of primary and secondary process need to be registered + only once regardless of port numbers in the process. That's what + variable "hns3_inited" does. But the variable is difficult to + understand. + +This patch adds a hns3_process_local_data structure to resolve above +problems. + +Fixes: 9570b1fdbdad ("net/hns3: check multi-process action register result") +Fixes: 23d4b61fee5d ("net/hns3: support multiple process") + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 2 ++ + drivers/net/hns3/hns3_ethdev_vf.c | 2 ++ + drivers/net/hns3/hns3_mp.c | 37 ++++++++++++++++++------------- + drivers/net/hns3/hns3_mp.h | 7 ++++++ + 4 files changed, 33 insertions(+), 15 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index 31d027836..2f2d2a605 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -7424,6 +7424,7 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) + goto err_mp_init_secondary; + } + __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); ++ process_data.eth_dev_cnt++; + hns3_tx_push_init(eth_dev); + return 0; + } +@@ -7435,6 +7436,7 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) + ret); + goto err_mp_init_primary; + } ++ process_data.eth_dev_cnt++; + + hw->adapter_state = HNS3_NIC_UNINITIALIZED; + hns->is_vf = false; +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index 76721b0d7..108bd61d5 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -2713,6 +2713,7 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) + goto err_mp_init_secondary; + } + __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); ++ process_data.eth_dev_cnt++; + hns3_tx_push_init(eth_dev); + return 0; + } +@@ -2724,6 +2725,7 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) + ret); + goto err_mp_init_primary; + } ++ process_data.eth_dev_cnt++; + + hw->adapter_state = HNS3_NIC_UNINITIALIZED; + hns->is_vf = true; +diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c +index 184acfe02..753b93f09 100644 +--- a/drivers/net/hns3/hns3_mp.c ++++ b/drivers/net/hns3/hns3_mp.c +@@ -12,7 +12,8 @@ + #include "hns3_rxtx.h" + #include "hns3_mp.h" + +-static bool hns3_inited; ++/* local data for primary or secondary process. */ ++struct hns3_process_local_data process_data; + + /* + * Initialize IPC message. +@@ -230,14 +231,15 @@ int hns3_mp_init_primary(void) + { + int ret; + +- if (!hns3_inited) { +- /* primary is allowed to not support IPC */ +- ret = rte_mp_action_register(HNS3_MP_NAME, mp_primary_handle); +- if (ret && rte_errno != ENOTSUP) +- return ret; ++ if (process_data.init_done) ++ return 0; + +- hns3_inited = true; +- } ++ /* primary is allowed to not support IPC */ ++ ret = rte_mp_action_register(HNS3_MP_NAME, mp_primary_handle); ++ if (ret && rte_errno != ENOTSUP) ++ return ret; ++ ++ process_data.init_done = true; + + return 0; + } +@@ -247,8 +249,12 @@ int hns3_mp_init_primary(void) + */ + void hns3_mp_uninit_primary(void) + { +- if (hns3_inited) ++ process_data.eth_dev_cnt--; ++ ++ if (process_data.eth_dev_cnt == 0) { + rte_mp_action_unregister(HNS3_MP_NAME); ++ process_data.init_done = false; ++ } + } + + /* +@@ -258,13 +264,14 @@ int hns3_mp_init_secondary(void) + { + int ret; + +- if (!hns3_inited) { +- ret = rte_mp_action_register(HNS3_MP_NAME, mp_secondary_handle); +- if (ret) +- return ret; ++ if (process_data.init_done) ++ return 0; + +- hns3_inited = true; +- } ++ ret = rte_mp_action_register(HNS3_MP_NAME, mp_secondary_handle); ++ if (ret) ++ return ret; ++ ++ process_data.init_done = true; + + return 0; + } +diff --git a/drivers/net/hns3/hns3_mp.h b/drivers/net/hns3/hns3_mp.h +index e0e4aeaf6..b49532f98 100644 +--- a/drivers/net/hns3/hns3_mp.h ++++ b/drivers/net/hns3/hns3_mp.h +@@ -5,6 +5,13 @@ + #ifndef _HNS3_MP_H_ + #define _HNS3_MP_H_ + ++/* Local data for primary or secondary process. */ ++struct hns3_process_local_data { ++ bool init_done; /* Process action register completed flag. */ ++ int eth_dev_cnt; /* Ethdev count under the current process. */ ++}; ++extern struct hns3_process_local_data process_data; ++ + void hns3_mp_req_start_rxtx(struct rte_eth_dev *dev); + void hns3_mp_req_stop_rxtx(struct rte_eth_dev *dev); + void hns3_mp_req_start_tx(struct rte_eth_dev *dev); +-- +2.33.0 + diff --git a/0253-net-hns3-unregister-MP-action-on-close-for-secondary.patch b/0253-net-hns3-unregister-MP-action-on-close-for-secondary.patch new file mode 100644 index 0000000000000000000000000000000000000000..cf5888e3bb25c43c80d8bae65f03c1a689f5d53b --- /dev/null +++ b/0253-net-hns3-unregister-MP-action-on-close-for-secondary.patch @@ -0,0 +1,128 @@ +From 9bc0df6a4fa9b3ea1decc519c778e48a27037589 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Tue, 2 Nov 2021 09:38:28 +0800 +Subject: [PATCH 20/33] net/hns3: unregister MP action on close for secondary + +This patch fixes lack of unregistering MP action for secondary process +when PMD is closed. + +Fixes: 9570b1fdbdad ("net/hns3: check multi-process action register result") +Fixes: 23d4b61fee5d ("net/hns3: support multiple process") + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 6 ++++-- + drivers/net/hns3/hns3_ethdev_vf.c | 6 ++++-- + drivers/net/hns3/hns3_mp.c | 5 +---- + drivers/net/hns3/hns3_mp.h | 2 +- + 4 files changed, 10 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index 2f2d2a605..b4f375bf2 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -5896,6 +5896,7 @@ hns3_dev_close(struct rte_eth_dev *eth_dev) + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); ++ hns3_mp_uninit(); + return 0; + } + +@@ -5912,7 +5913,7 @@ hns3_dev_close(struct rte_eth_dev *eth_dev) + hns3_uninit_pf(eth_dev); + hns3_free_all_queues(eth_dev); + rte_free(hw->reset.wait_data); +- hns3_mp_uninit_primary(); ++ hns3_mp_uninit(); + hns3_warn(hw, "Close port %u finished", hw->data->port_id); + + return ret; +@@ -7507,7 +7508,7 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) + rte_free(hw->reset.wait_data); + + err_init_reset: +- hns3_mp_uninit_primary(); ++ hns3_mp_uninit(); + + err_mp_init_primary: + err_mp_init_secondary: +@@ -7530,6 +7531,7 @@ hns3_dev_uninit(struct rte_eth_dev *eth_dev) + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); ++ hns3_mp_uninit(); + return 0; + } + +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index 108bd61d5..ac0dcbe36 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -1923,6 +1923,7 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); ++ hns3_mp_uninit(); + return 0; + } + +@@ -1938,7 +1939,7 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) + hns3vf_uninit_vf(eth_dev); + hns3_free_all_queues(eth_dev); + rte_free(hw->reset.wait_data); +- hns3_mp_uninit_primary(); ++ hns3_mp_uninit(); + hns3_warn(hw, "Close port %u finished", hw->data->port_id); + + return ret; +@@ -2794,7 +2795,7 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) + rte_free(hw->reset.wait_data); + + err_init_reset: +- hns3_mp_uninit_primary(); ++ hns3_mp_uninit(); + + err_mp_init_primary: + err_mp_init_secondary: +@@ -2818,6 +2819,7 @@ hns3vf_dev_uninit(struct rte_eth_dev *eth_dev) + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); ++ hns3_mp_uninit(); + return 0; + } + +diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c +index 753b93f09..2ecb16861 100644 +--- a/drivers/net/hns3/hns3_mp.c ++++ b/drivers/net/hns3/hns3_mp.c +@@ -244,10 +244,7 @@ int hns3_mp_init_primary(void) + return 0; + } + +-/* +- * Un-initialize by primary process. +- */ +-void hns3_mp_uninit_primary(void) ++void hns3_mp_uninit(void) + { + process_data.eth_dev_cnt--; + +diff --git a/drivers/net/hns3/hns3_mp.h b/drivers/net/hns3/hns3_mp.h +index b49532f98..5738ab74a 100644 +--- a/drivers/net/hns3/hns3_mp.h ++++ b/drivers/net/hns3/hns3_mp.h +@@ -18,7 +18,7 @@ void hns3_mp_req_start_tx(struct rte_eth_dev *dev); + void hns3_mp_req_stop_tx(struct rte_eth_dev *dev); + + int hns3_mp_init_primary(void); +-void hns3_mp_uninit_primary(void); ++void hns3_mp_uninit(void); + int hns3_mp_init_secondary(void); + + #endif /* _HNS3_MP_H_ */ +-- +2.33.0 + diff --git a/0254-net-hns3-refactor-multi-process-initialization.patch b/0254-net-hns3-refactor-multi-process-initialization.patch new file mode 100644 index 0000000000000000000000000000000000000000..485d7bff766f5daf2b46f76e734dd0c4a8ddaa6d --- /dev/null +++ b/0254-net-hns3-refactor-multi-process-initialization.patch @@ -0,0 +1,300 @@ +From 8388c42414d8f33ba97b01cbe4bf4e945a9819b4 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Tue, 2 Nov 2021 09:38:29 +0800 +Subject: [PATCH 21/33] net/hns3: refactor multi-process initialization + +Currently, the logic of the PF and VF initialization codes for multiple +process is the same. A common function can be extracted to initialize +and unload multiple process. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 34 +++++------------ + drivers/net/hns3/hns3_ethdev_vf.c | 33 +++++----------- + drivers/net/hns3/hns3_mp.c | 62 ++++++++++++++++++++++++------- + drivers/net/hns3/hns3_mp.h | 6 +-- + 4 files changed, 68 insertions(+), 67 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index b4f375bf2..ecf912a9f 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -5895,8 +5895,7 @@ hns3_dev_close(struct rte_eth_dev *eth_dev) + int ret = 0; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { +- __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); +- hns3_mp_uninit(); ++ hns3_mp_uninit(eth_dev); + return 0; + } + +@@ -5913,7 +5912,7 @@ hns3_dev_close(struct rte_eth_dev *eth_dev) + hns3_uninit_pf(eth_dev); + hns3_free_all_queues(eth_dev); + rte_free(hw->reset.wait_data); +- hns3_mp_uninit(); ++ hns3_mp_uninit(eth_dev); + hns3_warn(hw, "Close port %u finished", hw->data->port_id); + + return ret; +@@ -7417,28 +7416,15 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) + hns3_set_rxtx_function(eth_dev); + eth_dev->dev_ops = &hns3_eth_dev_ops; + eth_dev->rx_queue_count = hns3_rx_queue_count; ++ ret = hns3_mp_init(eth_dev); ++ if (ret) ++ goto err_mp_init; ++ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { +- ret = hns3_mp_init_secondary(); +- if (ret) { +- PMD_INIT_LOG(ERR, "Failed to init for secondary " +- "process, ret = %d", ret); +- goto err_mp_init_secondary; +- } +- __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); +- process_data.eth_dev_cnt++; + hns3_tx_push_init(eth_dev); + return 0; + } + +- ret = hns3_mp_init_primary(); +- if (ret) { +- PMD_INIT_LOG(ERR, +- "Failed to init for primary process, ret = %d", +- ret); +- goto err_mp_init_primary; +- } +- process_data.eth_dev_cnt++; +- + hw->adapter_state = HNS3_NIC_UNINITIALIZED; + hns->is_vf = false; + hw->data = eth_dev->data; +@@ -7508,10 +7494,9 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) + rte_free(hw->reset.wait_data); + + err_init_reset: +- hns3_mp_uninit(); ++ hns3_mp_uninit(eth_dev); + +-err_mp_init_primary: +-err_mp_init_secondary: ++err_mp_init: + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->rx_descriptor_status = NULL; +@@ -7530,8 +7515,7 @@ hns3_dev_uninit(struct rte_eth_dev *eth_dev) + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { +- __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); +- hns3_mp_uninit(); ++ hns3_mp_uninit(eth_dev); + return 0; + } + +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index ac0dcbe36..1e0cb1b63 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -1922,8 +1922,7 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) + int ret = 0; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { +- __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); +- hns3_mp_uninit(); ++ hns3_mp_uninit(eth_dev); + return 0; + } + +@@ -1939,7 +1938,7 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) + hns3vf_uninit_vf(eth_dev); + hns3_free_all_queues(eth_dev); + rte_free(hw->reset.wait_data); +- hns3_mp_uninit(); ++ hns3_mp_uninit(eth_dev); + hns3_warn(hw, "Close port %u finished", hw->data->port_id); + + return ret; +@@ -2706,28 +2705,15 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) + hns3_set_rxtx_function(eth_dev); + eth_dev->dev_ops = &hns3vf_eth_dev_ops; + eth_dev->rx_queue_count = hns3_rx_queue_count; ++ ret = hns3_mp_init(eth_dev); ++ if (ret) ++ goto err_mp_init; ++ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { +- ret = hns3_mp_init_secondary(); +- if (ret) { +- PMD_INIT_LOG(ERR, "Failed to init for secondary " +- "process, ret = %d", ret); +- goto err_mp_init_secondary; +- } +- __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); +- process_data.eth_dev_cnt++; + hns3_tx_push_init(eth_dev); + return 0; + } + +- ret = hns3_mp_init_primary(); +- if (ret) { +- PMD_INIT_LOG(ERR, +- "Failed to init for primary process, ret = %d", +- ret); +- goto err_mp_init_primary; +- } +- process_data.eth_dev_cnt++; +- + hw->adapter_state = HNS3_NIC_UNINITIALIZED; + hns->is_vf = true; + hw->data = eth_dev->data; +@@ -2795,10 +2781,9 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) + rte_free(hw->reset.wait_data); + + err_init_reset: +- hns3_mp_uninit(); ++ hns3_mp_uninit(eth_dev); + +-err_mp_init_primary: +-err_mp_init_secondary: ++err_mp_init: + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->rx_descriptor_status = NULL; +@@ -2819,7 +2804,7 @@ hns3vf_dev_uninit(struct rte_eth_dev *eth_dev) + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); +- hns3_mp_uninit(); ++ hns3_mp_uninit(eth_dev); + return 0; + } + +diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c +index 2ecb16861..63d22bcd3 100644 +--- a/drivers/net/hns3/hns3_mp.c ++++ b/drivers/net/hns3/hns3_mp.c +@@ -13,7 +13,7 @@ + #include "hns3_mp.h" + + /* local data for primary or secondary process. */ +-struct hns3_process_local_data process_data; ++static struct hns3_process_local_data process_data; + + /* + * Initialize IPC message. +@@ -227,7 +227,8 @@ hns3_mp_req_start_tx(struct rte_eth_dev *dev) + /* + * Initialize by primary process. + */ +-int hns3_mp_init_primary(void) ++static int ++hns3_mp_init_primary(void) + { + int ret; + +@@ -244,20 +245,11 @@ int hns3_mp_init_primary(void) + return 0; + } + +-void hns3_mp_uninit(void) +-{ +- process_data.eth_dev_cnt--; +- +- if (process_data.eth_dev_cnt == 0) { +- rte_mp_action_unregister(HNS3_MP_NAME); +- process_data.init_done = false; +- } +-} +- + /* + * Initialize by secondary process. + */ +-int hns3_mp_init_secondary(void) ++static int ++hns3_mp_init_secondary(void) + { + int ret; + +@@ -265,10 +257,52 @@ int hns3_mp_init_secondary(void) + return 0; + + ret = rte_mp_action_register(HNS3_MP_NAME, mp_secondary_handle); +- if (ret) ++ if (ret && rte_errno != ENOTSUP) + return ret; + + process_data.init_done = true; + + return 0; + } ++ ++int ++hns3_mp_init(struct rte_eth_dev *dev) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { ++ ret = hns3_mp_init_secondary(); ++ if (ret) { ++ PMD_INIT_LOG(ERR, "Failed to init for secondary process, ret = %d", ++ ret); ++ return ret; ++ } ++ __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); ++ } else { ++ ret = hns3_mp_init_primary(); ++ if (ret) { ++ PMD_INIT_LOG(ERR, "Failed to init for primary process, ret = %d", ++ ret); ++ return ret; ++ } ++ } ++ ++ process_data.eth_dev_cnt++; ++ ++ return 0; ++} ++ ++void hns3_mp_uninit(struct rte_eth_dev *dev) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); ++ ++ process_data.eth_dev_cnt--; ++ if (process_data.eth_dev_cnt == 0) { ++ rte_mp_action_unregister(HNS3_MP_NAME); ++ process_data.init_done = false; ++ } ++} +diff --git a/drivers/net/hns3/hns3_mp.h b/drivers/net/hns3/hns3_mp.h +index 5738ab74a..a74221d08 100644 +--- a/drivers/net/hns3/hns3_mp.h ++++ b/drivers/net/hns3/hns3_mp.h +@@ -10,15 +10,13 @@ struct hns3_process_local_data { + bool init_done; /* Process action register completed flag. */ + int eth_dev_cnt; /* Ethdev count under the current process. */ + }; +-extern struct hns3_process_local_data process_data; + + void hns3_mp_req_start_rxtx(struct rte_eth_dev *dev); + void hns3_mp_req_stop_rxtx(struct rte_eth_dev *dev); + void hns3_mp_req_start_tx(struct rte_eth_dev *dev); + void hns3_mp_req_stop_tx(struct rte_eth_dev *dev); + +-int hns3_mp_init_primary(void); +-void hns3_mp_uninit(void); +-int hns3_mp_init_secondary(void); ++int hns3_mp_init(struct rte_eth_dev *dev); ++void hns3_mp_uninit(struct rte_eth_dev *dev); + + #endif /* _HNS3_MP_H_ */ +-- +2.33.0 + diff --git a/0255-usertools-devbind-add-Kunpeng-DMA.patch b/0255-usertools-devbind-add-Kunpeng-DMA.patch new file mode 100644 index 0000000000000000000000000000000000000000..51919c67d79fa392b5313873eb9e3ec05c26af3f --- /dev/null +++ b/0255-usertools-devbind-add-Kunpeng-DMA.patch @@ -0,0 +1,41 @@ +From 62c1169fb30dd7407c35377364a7da2336ac1c24 Mon Sep 17 00:00:00 2001 +From: Chengwen Feng +Date: Tue, 2 Nov 2021 20:37:43 +0800 +Subject: [PATCH 22/33] usertools/devbind: add Kunpeng DMA + +Add Kunpeng DMA device ID to dmadev category. + +Signed-off-by: Chengwen Feng +--- + usertools/dpdk-devbind.py | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + mode change 100755 => 100644 usertools/dpdk-devbind.py + +diff --git a/usertools/dpdk-devbind.py b/usertools/dpdk-devbind.py +old mode 100755 +new mode 100644 +index c2ede3d4d..8af3089ae +--- a/usertools/dpdk-devbind.py ++++ b/usertools/dpdk-devbind.py +@@ -45,6 +45,8 @@ + octeontx2_ree = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f4', + 'SVendor': None, 'SDevice': None} + ++hisilicon_dma = {'Class': '08', 'Vendor': '19e5', 'Device': 'a122', ++ 'SVendor': None, 'SDevice': None} + intel_ioat_bdw = {'Class': '08', 'Vendor': '8086', + 'Device': '6f20,6f21,6f22,6f23,6f24,6f25,6f26,6f27,6f2e,6f2f', + 'SVendor': None, 'SDevice': None} +@@ -62,7 +64,8 @@ + network_devices = [network_class, cavium_pkx, avp_vnic, ifpga_class] + baseband_devices = [acceleration_class] + crypto_devices = [encryption_class, intel_processor_class] +-eventdev_devices = [cavium_sso, cavium_tim, octeontx2_sso] ++dma_devices = [hisilicon_dma] ++eventdev_devices = [cavium_sso, cavium_tim, intel_dlb, octeontx2_sso] + mempool_devices = [cavium_fpa, octeontx2_npa] + compress_devices = [cavium_zip] + regex_devices = [octeontx2_ree] +-- +2.33.0 + diff --git a/0256-kni-check-error-code-of-allmulticast-mode-switch.patch b/0256-kni-check-error-code-of-allmulticast-mode-switch.patch new file mode 100644 index 0000000000000000000000000000000000000000..276d8802a4ca2585e20b2b98b551778042e1593a --- /dev/null +++ b/0256-kni-check-error-code-of-allmulticast-mode-switch.patch @@ -0,0 +1,54 @@ +From 72387188c6847df9a88d77c5428604db88441617 Mon Sep 17 00:00:00 2001 +From: Chengwen Feng +Date: Fri, 23 Apr 2021 16:12:42 +0800 +Subject: [PATCH 23/33] kni: check error code of allmulticast mode switch + +Some drivers may return errcode when switch allmulticast mode, +so it's necessary to check the return code. + +Fixes: b34801d1aa2e ("kni: support allmulticast mode set") +Cc: stable@dpdk.org + +Signed-off-by: Chengwen Feng +Signed-off-by: Min Hu (Connor) +Acked-by: Ferruh Yigit +--- + lib/librte_kni/rte_kni.c | 13 ++++++++++--- + 1 file changed, 10 insertions(+), 3 deletions(-) + +diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c +index 837d0217d..54ea792fc 100644 +--- a/lib/librte_kni/rte_kni.c ++++ b/lib/librte_kni/rte_kni.c +@@ -514,6 +514,8 @@ kni_config_promiscusity(uint16_t port_id, uint8_t to_on) + static int + kni_config_allmulticast(uint16_t port_id, uint8_t to_on) + { ++ int ret; ++ + if (!rte_eth_dev_is_valid_port(port_id)) { + RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id); + return -EINVAL; +@@ -523,11 +525,16 @@ kni_config_allmulticast(uint16_t port_id, uint8_t to_on) + port_id, to_on); + + if (to_on) +- rte_eth_allmulticast_enable(port_id); ++ ret = rte_eth_allmulticast_enable(port_id); + else +- rte_eth_allmulticast_disable(port_id); ++ ret = rte_eth_allmulticast_disable(port_id); ++ if (ret != 0) ++ RTE_LOG(ERR, KNI, ++ "Failed to %s allmulticast mode for port %u: %s\n", ++ to_on ? "enable" : "disable", port_id, ++ rte_strerror(-ret)); + +- return 0; ++ return ret; + } + + int +-- +2.33.0 + diff --git a/0257-net-hns3-simplify-queue-DMA-address-arithmetic.patch b/0257-net-hns3-simplify-queue-DMA-address-arithmetic.patch new file mode 100644 index 0000000000000000000000000000000000000000..1b7aadc5f6f01212f7da6c078c0a9ec57845093e --- /dev/null +++ b/0257-net-hns3-simplify-queue-DMA-address-arithmetic.patch @@ -0,0 +1,42 @@ +From cc6216dd998af8500649b7eeb520af2b80abdb90 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Sat, 6 Nov 2021 09:42:58 +0800 +Subject: [PATCH 24/33] net/hns3: simplify queue DMA address arithmetic + +The patch obtains the upper 32 bits of the Rx/Tx queue DMA address in one +step instead of two steps. + +Fixes: bba636698316 ("net/hns3: support Rx/Tx and related operations") +Cc: stable@dpdk.org + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_rxtx.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c +index bb1723e29..b63024997 100644 +--- a/drivers/net/hns3/hns3_rxtx.c ++++ b/drivers/net/hns3/hns3_rxtx.c +@@ -310,7 +310,7 @@ hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq) + + hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr); + hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG, +- (uint32_t)((dma_addr >> 31) >> 1)); ++ (uint32_t)(dma_addr >> 32)); + + hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG, + hns3_buf_size2type(rx_buf_len)); +@@ -325,7 +325,7 @@ hns3_init_tx_queue_hw(struct hns3_tx_queue *txq) + + hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr); + hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG, +- (uint32_t)((dma_addr >> 31) >> 1)); ++ (uint32_t)(dma_addr >> 32)); + + hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG, + HNS3_CFG_DESC_NUM(txq->nb_tx_desc)); +-- +2.33.0 + diff --git a/0258-net-hns3-remove-redundant-function-declaration.patch b/0258-net-hns3-remove-redundant-function-declaration.patch new file mode 100644 index 0000000000000000000000000000000000000000..ae7f6cb3bdd3f338c9c3cbbba2c087241aa479c5 --- /dev/null +++ b/0258-net-hns3-remove-redundant-function-declaration.patch @@ -0,0 +1,29 @@ +From 0527eaf2489c1657ccf02a9e71f4e684eec5da77 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Sat, 6 Nov 2021 09:42:59 +0800 +Subject: [PATCH 25/33] net/hns3: remove redundant function declaration + +This patch removes a redundant function declaration for +hns3_rx_check_vec_support(). + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_rxtx.h | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h +index cd7c21c1d..4c8b88352 100644 +--- a/drivers/net/hns3/hns3_rxtx.h ++++ b/drivers/net/hns3/hns3_rxtx.h +@@ -712,7 +712,6 @@ uint16_t hns3_recv_pkts_vec_sve(void *rx_queue, struct rte_mbuf **rx_pkts, + int hns3_rx_burst_mode_get(struct rte_eth_dev *dev, + __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode); +-int hns3_rx_check_vec_support(struct rte_eth_dev *dev); + uint16_t hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + uint16_t hns3_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, +-- +2.33.0 + diff --git a/0259-net-hns3-modify-an-indent-alignment.patch b/0259-net-hns3-modify-an-indent-alignment.patch new file mode 100644 index 0000000000000000000000000000000000000000..992a318c28acab341058fe33da28b8b29b4bc5bb --- /dev/null +++ b/0259-net-hns3-modify-an-indent-alignment.patch @@ -0,0 +1,30 @@ +From e3b6924c495f721af74a89c001dbb2497dfbcc1d Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Sat, 6 Nov 2021 09:43:00 +0800 +Subject: [PATCH 26/33] net/hns3: modify an indent alignment + +This patch modifies some code alignment issues to make the code style +more consistent. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_rxtx.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c +index b63024997..d5aa72c8f 100644 +--- a/drivers/net/hns3/hns3_rxtx.c ++++ b/drivers/net/hns3/hns3_rxtx.c +@@ -1895,7 +1895,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, + */ + if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) + rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state == +- HNS3_PORT_BASE_VLAN_ENABLE; ++ HNS3_PORT_BASE_VLAN_ENABLE; + else + rxq->pvid_sw_discard_en = false; + rxq->ptype_en = hns3_dev_get_support(hw, RXD_ADV_LAYOUT) ? true : false; +-- +2.33.0 + diff --git a/0260-net-hns3-use-unsigned-integer-for-bitwise-operations.patch b/0260-net-hns3-use-unsigned-integer-for-bitwise-operations.patch new file mode 100644 index 0000000000000000000000000000000000000000..a19cd64718f0495ae23aa3997de4d9ea02491206 --- /dev/null +++ b/0260-net-hns3-use-unsigned-integer-for-bitwise-operations.patch @@ -0,0 +1,39 @@ +From 36c73d69efda51972d64318ef8cb1c7fefde482f Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Sat, 6 Nov 2021 09:43:01 +0800 +Subject: [PATCH 27/33] net/hns3: use unsigned integer for bitwise operations + +Bitwise operations should be used only with unsigned integer. This patch +modifies some code that does not meet this rule. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index ecf912a9f..03f6da5bc 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -2104,7 +2104,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev) + int max_tc = 0; + int i; + +- if ((rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG) || ++ if (((uint32_t)rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG) || + (tx_mq_mode == ETH_MQ_TX_VMDQ_DCB || + tx_mq_mode == ETH_MQ_TX_VMDQ_ONLY)) { + hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.", +@@ -2114,7 +2114,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev) + + dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; + dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; +- if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) { ++ if ((uint32_t)rx_mq_mode & ETH_MQ_RX_DCB_FLAG) { + if (dcb_rx_conf->nb_tcs > pf->tc_max) { + hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.", + dcb_rx_conf->nb_tcs, pf->tc_max); +-- +2.33.0 + diff --git a/0261-net-hns3-extract-common-code-to-its-own-file.patch b/0261-net-hns3-extract-common-code-to-its-own-file.patch new file mode 100644 index 0000000000000000000000000000000000000000..cfc72d6e5bf0738a4500c758e70a74adbd5f6e5f --- /dev/null +++ b/0261-net-hns3-extract-common-code-to-its-own-file.patch @@ -0,0 +1,1143 @@ +From 78bf7630d452218688fa2760ab847ea06e2c01f5 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Sat, 6 Nov 2021 09:43:02 +0800 +Subject: [PATCH 28/33] net/hns3: extract common code to its own file + +This patch extracts a common file to store the common code for PF and VF +driver. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_cmd.c | 2 +- + drivers/net/hns3/hns3_common.c | 426 +++++++++++++++++++++++++++++ + drivers/net/hns3/hns3_common.h | 48 ++++ + drivers/net/hns3/hns3_ethdev.c | 430 +----------------------------- + drivers/net/hns3/hns3_ethdev.h | 28 +- + drivers/net/hns3/hns3_ethdev_vf.c | 1 + + drivers/net/hns3/hns3_intr.c | 2 +- + drivers/net/hns3/hns3_mbx.c | 2 +- + drivers/net/hns3/hns3_rxtx.c | 2 +- + drivers/net/hns3/meson.build | 4 +- + 10 files changed, 484 insertions(+), 461 deletions(-) + create mode 100644 drivers/net/hns3/hns3_common.c + create mode 100644 drivers/net/hns3/hns3_common.h + +diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c +index 6e49108d2..94cb10c4f 100644 +--- a/drivers/net/hns3/hns3_cmd.c ++++ b/drivers/net/hns3/hns3_cmd.c +@@ -5,7 +5,7 @@ + #include + #include + +-#include "hns3_ethdev.h" ++#include "hns3_common.h" + #include "hns3_regs.h" + #include "hns3_intr.h" + #include "hns3_logs.h" +diff --git a/drivers/net/hns3/hns3_common.c b/drivers/net/hns3/hns3_common.c +new file mode 100644 +index 000000000..85316d342 +--- /dev/null ++++ b/drivers/net/hns3/hns3_common.c +@@ -0,0 +1,426 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(C) 2021 HiSilicon Limited ++ */ ++ ++#include ++ ++#include "hns3_logs.h" ++#include "hns3_common.h" ++ ++static int ++hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args) ++{ ++ uint32_t hint = HNS3_IO_FUNC_HINT_NONE; ++ ++ RTE_SET_USED(key); ++ ++ if (strcmp(value, "vec") == 0) ++ hint = HNS3_IO_FUNC_HINT_VEC; ++ else if (strcmp(value, "sve") == 0) ++ hint = HNS3_IO_FUNC_HINT_SVE; ++ else if (strcmp(value, "simple") == 0) ++ hint = HNS3_IO_FUNC_HINT_SIMPLE; ++ else if (strcmp(value, "common") == 0) ++ hint = HNS3_IO_FUNC_HINT_COMMON; ++ ++ /* If the hint is valid then update output parameters */ ++ if (hint != HNS3_IO_FUNC_HINT_NONE) ++ *(uint32_t *)extra_args = hint; ++ ++ return 0; ++} ++ ++static const char * ++hns3_get_io_hint_func_name(uint32_t hint) ++{ ++ switch (hint) { ++ case HNS3_IO_FUNC_HINT_VEC: ++ return "vec"; ++ case HNS3_IO_FUNC_HINT_SVE: ++ return "sve"; ++ case HNS3_IO_FUNC_HINT_SIMPLE: ++ return "simple"; ++ case HNS3_IO_FUNC_HINT_COMMON: ++ return "common"; ++ default: ++ return "none"; ++ } ++} ++ ++static int ++hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) ++{ ++ uint64_t val; ++ ++ RTE_SET_USED(key); ++ ++ val = strtoull(value, NULL, 16); ++ *(uint64_t *)extra_args = val; ++ ++ return 0; ++} ++ ++static int ++hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) ++{ ++ uint32_t val; ++ ++ RTE_SET_USED(key); ++ ++ val = strtoul(value, NULL, 10); ++ if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX) ++ *(uint16_t *)extra_args = val; ++ ++ return 0; ++} ++ ++void ++hns3_parse_devargs(struct rte_eth_dev *dev) ++{ ++ uint16_t mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS; ++ struct hns3_adapter *hns = dev->data->dev_private; ++ uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE; ++ uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE; ++ struct hns3_hw *hw = &hns->hw; ++ uint64_t dev_caps_mask = 0; ++ struct rte_kvargs *kvlist; ++ ++ if (dev->device->devargs == NULL) ++ return; ++ ++ kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL); ++ if (!kvlist) ++ return; ++ ++ (void)rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT, ++ &hns3_parse_io_hint_func, &rx_func_hint); ++ (void)rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT, ++ &hns3_parse_io_hint_func, &tx_func_hint); ++ (void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK, ++ &hns3_parse_dev_caps_mask, &dev_caps_mask); ++ (void)rte_kvargs_process(kvlist, HNS3_DEVARG_MBX_TIME_LIMIT_MS, ++ &hns3_parse_mbx_time_limit, &mbx_time_limit_ms); ++ ++ rte_kvargs_free(kvlist); ++ ++ if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE) ++ hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT, ++ hns3_get_io_hint_func_name(rx_func_hint)); ++ hns->rx_func_hint = rx_func_hint; ++ if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE) ++ hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT, ++ hns3_get_io_hint_func_name(tx_func_hint)); ++ hns->tx_func_hint = tx_func_hint; ++ ++ if (dev_caps_mask != 0) ++ hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".", ++ HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask); ++ hns->dev_caps_mask = dev_caps_mask; ++} ++ ++void ++hns3_clock_gettime(struct timeval *tv) ++{ ++#ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ ++#define CLOCK_TYPE CLOCK_MONOTONIC_RAW ++#else ++#define CLOCK_TYPE CLOCK_MONOTONIC ++#endif ++#define NSEC_TO_USEC_DIV 1000 ++ ++ struct timespec spec; ++ (void)clock_gettime(CLOCK_TYPE, &spec); ++ ++ tv->tv_sec = spec.tv_sec; ++ tv->tv_usec = spec.tv_nsec / NSEC_TO_USEC_DIV; ++} ++ ++uint64_t ++hns3_clock_calctime_ms(struct timeval *tv) ++{ ++ return (uint64_t)tv->tv_sec * MSEC_PER_SEC + ++ tv->tv_usec / USEC_PER_MSEC; ++} ++ ++uint64_t ++hns3_clock_gettime_ms(void) ++{ ++ struct timeval tv; ++ ++ hns3_clock_gettime(&tv); ++ return hns3_clock_calctime_ms(&tv); ++} ++ ++void hns3_ether_format_addr(char *buf, uint16_t size, ++ const struct rte_ether_addr *ether_addr) ++{ ++ snprintf(buf, size, "%02X:**:**:**:%02X:%02X", ++ ether_addr->addr_bytes[0], ++ ether_addr->addr_bytes[4], ++ ether_addr->addr_bytes[5]); ++} ++ ++static int ++hns3_set_mc_addr_chk_param(struct hns3_hw *hw, ++ struct rte_ether_addr *mc_addr_set, ++ uint32_t nb_mc_addr) ++{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct rte_ether_addr *addr; ++ uint16_t mac_addrs_capa; ++ uint32_t i; ++ uint32_t j; ++ ++ if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { ++ hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " ++ "invalid. valid range: 0~%d", ++ nb_mc_addr, HNS3_MC_MACADDR_NUM); ++ return -EINVAL; ++ } ++ ++ /* Check if input mac addresses are valid */ ++ for (i = 0; i < nb_mc_addr; i++) { ++ addr = &mc_addr_set[i]; ++ if (!rte_is_multicast_ether_addr(addr)) { ++ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, ++ addr); ++ hns3_err(hw, ++ "failed to set mc mac addr, addr(%s) invalid.", ++ mac_str); ++ return -EINVAL; ++ } ++ ++ /* Check if there are duplicate addresses */ ++ for (j = i + 1; j < nb_mc_addr; j++) { ++ if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { ++ hns3_ether_format_addr(mac_str, ++ RTE_ETHER_ADDR_FMT_SIZE, ++ addr); ++ hns3_err(hw, "failed to set mc mac addr, " ++ "addrs invalid. two same addrs(%s).", ++ mac_str); ++ return -EINVAL; ++ } ++ } ++ ++ /* ++ * Check if there are duplicate addresses between mac_addrs ++ * and mc_addr_set ++ */ ++ mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM : ++ HNS3_UC_MACADDR_NUM; ++ for (j = 0; j < mac_addrs_capa; j++) { ++ if (rte_is_same_ether_addr(addr, ++ &hw->data->mac_addrs[j])) { ++ hns3_ether_format_addr(mac_str, ++ RTE_ETHER_ADDR_FMT_SIZE, ++ addr); ++ hns3_err(hw, "failed to set mc mac addr, " ++ "addrs invalid. addrs(%s) has already " ++ "configured in mac_addr add API", ++ mac_str); ++ return -EINVAL; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++int ++hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, ++ struct rte_ether_addr *mc_addr_set, ++ uint32_t nb_mc_addr) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ struct rte_ether_addr *addr; ++ int cur_addr_num; ++ int set_addr_num; ++ int num; ++ int ret; ++ int i; ++ ++ /* Check if input parameters are valid */ ++ ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); ++ if (ret) ++ return ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ cur_addr_num = hw->mc_addrs_num; ++ for (i = 0; i < cur_addr_num; i++) { ++ num = cur_addr_num - i - 1; ++ addr = &hw->mc_addrs[num]; ++ ret = hw->ops.del_mc_mac_addr(hw, addr); ++ if (ret) { ++ rte_spinlock_unlock(&hw->lock); ++ return ret; ++ } ++ ++ hw->mc_addrs_num--; ++ } ++ ++ set_addr_num = (int)nb_mc_addr; ++ for (i = 0; i < set_addr_num; i++) { ++ addr = &mc_addr_set[i]; ++ ret = hw->ops.add_mc_mac_addr(hw, addr); ++ if (ret) { ++ rte_spinlock_unlock(&hw->lock); ++ return ret; ++ } ++ ++ rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]); ++ hw->mc_addrs_num++; ++ } ++ rte_spinlock_unlock(&hw->lock); ++ ++ return 0; ++} ++ ++int ++hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) ++{ ++ char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_hw *hw = &hns->hw; ++ struct rte_ether_addr *addr; ++ int ret = 0; ++ int i; ++ ++ for (i = 0; i < hw->mc_addrs_num; i++) { ++ addr = &hw->mc_addrs[i]; ++ if (!rte_is_multicast_ether_addr(addr)) ++ continue; ++ if (del) ++ ret = hw->ops.del_mc_mac_addr(hw, addr); ++ else ++ ret = hw->ops.add_mc_mac_addr(hw, addr); ++ if (ret) { ++ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, ++ addr); ++ hns3_dbg(hw, "failed to %s mc mac addr: %s ret = %d", ++ del ? "Remove" : "Restore", mac_str, ret); ++ } ++ } ++ return ret; ++} ++ ++int ++hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) ++{ ++ char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_hw *hw = &hns->hw; ++ struct hns3_hw_ops *ops = &hw->ops; ++ struct rte_ether_addr *addr; ++ uint16_t mac_addrs_capa; ++ int ret = 0; ++ int i; ++ ++ mac_addrs_capa = ++ hns->is_vf ? HNS3_VF_UC_MACADDR_NUM : HNS3_UC_MACADDR_NUM; ++ for (i = 0; i < mac_addrs_capa; i++) { ++ addr = &hw->data->mac_addrs[i]; ++ if (rte_is_zero_ether_addr(addr)) ++ continue; ++ if (rte_is_multicast_ether_addr(addr)) ++ ret = del ? ops->del_mc_mac_addr(hw, addr) : ++ ops->add_mc_mac_addr(hw, addr); ++ else ++ ret = del ? ops->del_uc_mac_addr(hw, addr) : ++ ops->add_uc_mac_addr(hw, addr); ++ ++ if (ret) { ++ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, ++ addr); ++ hns3_err(hw, "failed to %s mac addr(%s) index:%d ret = %d.", ++ del ? "remove" : "restore", mac_str, i, ret); ++ } ++ } ++ ++ return ret; ++} ++ ++static bool ++hns3_find_duplicate_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mc_addr) ++{ ++ char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct rte_ether_addr *addr; ++ int i; ++ ++ for (i = 0; i < hw->mc_addrs_num; i++) { ++ addr = &hw->mc_addrs[i]; ++ /* Check if there are duplicate addresses in mc_addrs[] */ ++ if (rte_is_same_ether_addr(addr, mc_addr)) { ++ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, ++ addr); ++ hns3_err(hw, "failed to add mc mac addr, same addrs" ++ "(%s) is added by the set_mc_mac_addr_list " ++ "API", mac_str); ++ return true; ++ } ++ } ++ ++ return false; ++} ++ ++int ++hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, ++ __rte_unused uint32_t idx, __rte_unused uint32_t pool) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ++ /* ++ * In hns3 network engine adding UC and MC mac address with different ++ * commands with firmware. We need to determine whether the input ++ * address is a UC or a MC address to call different commands. ++ * By the way, it is recommended calling the API function named ++ * rte_eth_dev_set_mc_addr_list to set the MC mac address, because ++ * using the rte_eth_dev_mac_addr_add API function to set MC mac address ++ * may affect the specifications of UC mac addresses. ++ */ ++ if (rte_is_multicast_ether_addr(mac_addr)) { ++ if (hns3_find_duplicate_mc_addr(hw, mac_addr)) { ++ rte_spinlock_unlock(&hw->lock); ++ return -EINVAL; ++ } ++ ret = hw->ops.add_mc_mac_addr(hw, mac_addr); ++ } else { ++ ret = hw->ops.add_uc_mac_addr(hw, mac_addr); ++ } ++ rte_spinlock_unlock(&hw->lock); ++ if (ret) { ++ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, ++ mac_addr); ++ hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, ++ ret); ++ } ++ ++ return ret; ++} ++ ++void ++hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ /* index will be checked by upper level rte interface */ ++ struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx]; ++ char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ++ if (rte_is_multicast_ether_addr(mac_addr)) ++ ret = hw->ops.del_mc_mac_addr(hw, mac_addr); ++ else ++ ret = hw->ops.del_uc_mac_addr(hw, mac_addr); ++ rte_spinlock_unlock(&hw->lock); ++ if (ret) { ++ hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, ++ mac_addr); ++ hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str, ++ ret); ++ } ++} +diff --git a/drivers/net/hns3/hns3_common.h b/drivers/net/hns3/hns3_common.h +new file mode 100644 +index 000000000..094a0bc5f +--- /dev/null ++++ b/drivers/net/hns3/hns3_common.h +@@ -0,0 +1,48 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(C) 2021 HiSilicon Limited ++ */ ++ ++#ifndef _HNS3_COMMON_H_ ++#define _HNS3_COMMON_H_ ++ ++#include ++ ++#include "hns3_ethdev.h" ++ ++enum { ++ HNS3_IO_FUNC_HINT_NONE = 0, ++ HNS3_IO_FUNC_HINT_VEC, ++ HNS3_IO_FUNC_HINT_SVE, ++ HNS3_IO_FUNC_HINT_SIMPLE, ++ HNS3_IO_FUNC_HINT_COMMON ++}; ++ ++#define HNS3_DEVARG_RX_FUNC_HINT "rx_func_hint" ++#define HNS3_DEVARG_TX_FUNC_HINT "tx_func_hint" ++ ++#define HNS3_DEVARG_DEV_CAPS_MASK "dev_caps_mask" ++ ++#define HNS3_DEVARG_MBX_TIME_LIMIT_MS "mbx_time_limit_ms" ++ ++#define MSEC_PER_SEC 1000L ++#define USEC_PER_MSEC 1000L ++ ++void hns3_clock_gettime(struct timeval *tv); ++uint64_t hns3_clock_calctime_ms(struct timeval *tv); ++uint64_t hns3_clock_gettime_ms(void); ++ ++void hns3_parse_devargs(struct rte_eth_dev *dev); ++ ++int hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del); ++int hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del); ++int hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, ++ __rte_unused uint32_t idx, __rte_unused uint32_t pool); ++ ++void hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx); ++int hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, ++ struct rte_ether_addr *mc_addr_set, ++ uint32_t nb_mc_addr); ++void hns3_ether_format_addr(char *buf, uint16_t size, ++ const struct rte_ether_addr *ether_addr); ++ ++#endif /* _HNS3_COMMON_H_ */ +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index 03f6da5bc..818835391 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -6,9 +6,9 @@ + #include + #include + #include +-#include + + #include "hns3_ethdev.h" ++#include "hns3_common.h" + #include "hns3_logs.h" + #include "hns3_rxtx.h" + #include "hns3_intr.h" +@@ -105,14 +105,6 @@ static int hns3_do_stop(struct hns3_adapter *hns); + static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds); + static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable); + +-void hns3_ether_format_addr(char *buf, uint16_t size, +- const struct rte_ether_addr *ether_addr) +-{ +- snprintf(buf, size, "%02X:**:**:**:%02X:%02X", +- ether_addr->addr_bytes[0], +- ether_addr->addr_bytes[4], +- ether_addr->addr_bytes[5]); +-} + + static void + hns3_pf_disable_irq0(struct hns3_hw *hw) +@@ -1609,68 +1601,6 @@ hns3_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + return ret; + } + +-static bool +-hns3_find_duplicate_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mc_addr) +-{ +- char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- struct rte_ether_addr *addr; +- int i; +- +- for (i = 0; i < hw->mc_addrs_num; i++) { +- addr = &hw->mc_addrs[i]; +- /* Check if there are duplicate addresses in mc_addrs[] */ +- if (rte_is_same_ether_addr(addr, mc_addr)) { +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- addr); +- hns3_err(hw, "failed to add mc mac addr, same addrs" +- "(%s) is added by the set_mc_mac_addr_list " +- "API", mac_str); +- return true; +- } +- } +- +- return false; +-} +- +-int +-hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, +- __rte_unused uint32_t idx, __rte_unused uint32_t pool) +-{ +- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- int ret; +- +- rte_spinlock_lock(&hw->lock); +- +- /* +- * In hns3 network engine adding UC and MC mac address with different +- * commands with firmware. We need to determine whether the input +- * address is a UC or a MC address to call different commands. +- * By the way, it is recommended calling the API function named +- * rte_eth_dev_set_mc_addr_list to set the MC mac address, because +- * using the rte_eth_dev_mac_addr_add API function to set MC mac address +- * may affect the specifications of UC mac addresses. +- */ +- if (rte_is_multicast_ether_addr(mac_addr)) { +- if (hns3_find_duplicate_mc_addr(hw, mac_addr)) { +- rte_spinlock_unlock(&hw->lock); +- return -EINVAL; +- } +- ret = hw->ops.add_mc_mac_addr(hw, mac_addr); +- } else { +- ret = hw->ops.add_uc_mac_addr(hw, mac_addr); +- } +- rte_spinlock_unlock(&hw->lock); +- if (ret) { +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- mac_addr); +- hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, +- ret); +- } +- +- return ret; +-} +- + static int + hns3_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + { +@@ -1699,30 +1629,6 @@ hns3_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + return ret; + } + +-void +-hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) +-{ +- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- /* index will be checked by upper level rte interface */ +- struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx]; +- char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- int ret; +- +- rte_spinlock_lock(&hw->lock); +- +- if (rte_is_multicast_ether_addr(mac_addr)) +- ret = hw->ops.del_mc_mac_addr(hw, mac_addr); +- else +- ret = hw->ops.del_uc_mac_addr(hw, mac_addr); +- rte_spinlock_unlock(&hw->lock); +- if (ret) { +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- mac_addr); +- hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str, +- ret); +- } +-} +- + static int + hns3_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr) +@@ -1787,41 +1693,6 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, + return ret; + } + +-int +-hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) +-{ +- char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- struct hns3_hw *hw = &hns->hw; +- struct hns3_hw_ops *ops = &hw->ops; +- struct rte_ether_addr *addr; +- uint16_t mac_addrs_capa; +- int ret = 0; +- int i; +- +- mac_addrs_capa = +- hns->is_vf ? HNS3_VF_UC_MACADDR_NUM : HNS3_UC_MACADDR_NUM; +- for (i = 0; i < mac_addrs_capa; i++) { +- addr = &hw->data->mac_addrs[i]; +- if (rte_is_zero_ether_addr(addr)) +- continue; +- if (rte_is_multicast_ether_addr(addr)) +- ret = del ? ops->del_mc_mac_addr(hw, addr) : +- ops->add_mc_mac_addr(hw, addr); +- else +- ret = del ? ops->del_uc_mac_addr(hw, addr) : +- ops->add_uc_mac_addr(hw, addr); +- +- if (ret) { +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- addr); +- hns3_err(hw, "failed to %s mac addr(%s) index:%d ret = %d.", +- del ? "remove" : "restore", mac_str, i, ret); +- } +- } +- +- return ret; +-} +- + static void + hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr) + { +@@ -1947,150 +1818,6 @@ hns3_remove_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + return ret; + } + +-static int +-hns3_set_mc_addr_chk_param(struct hns3_hw *hw, +- struct rte_ether_addr *mc_addr_set, +- uint32_t nb_mc_addr) +-{ +- struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); +- char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- struct rte_ether_addr *addr; +- uint16_t mac_addrs_capa; +- uint32_t i; +- uint32_t j; +- +- if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { +- hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " +- "invalid. valid range: 0~%d", +- nb_mc_addr, HNS3_MC_MACADDR_NUM); +- return -EINVAL; +- } +- +- /* Check if input mac addresses are valid */ +- for (i = 0; i < nb_mc_addr; i++) { +- addr = &mc_addr_set[i]; +- if (!rte_is_multicast_ether_addr(addr)) { +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- addr); +- hns3_err(hw, +- "failed to set mc mac addr, addr(%s) invalid.", +- mac_str); +- return -EINVAL; +- } +- +- /* Check if there are duplicate addresses */ +- for (j = i + 1; j < nb_mc_addr; j++) { +- if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { +- hns3_ether_format_addr(mac_str, +- RTE_ETHER_ADDR_FMT_SIZE, +- addr); +- hns3_err(hw, "failed to set mc mac addr, " +- "addrs invalid. two same addrs(%s).", +- mac_str); +- return -EINVAL; +- } +- } +- +- /* +- * Check if there are duplicate addresses between mac_addrs +- * and mc_addr_set +- */ +- mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM : +- HNS3_UC_MACADDR_NUM; +- for (j = 0; j < mac_addrs_capa; j++) { +- if (rte_is_same_ether_addr(addr, +- &hw->data->mac_addrs[j])) { +- hns3_ether_format_addr(mac_str, +- RTE_ETHER_ADDR_FMT_SIZE, +- addr); +- hns3_err(hw, "failed to set mc mac addr, " +- "addrs invalid. addrs(%s) has already " +- "configured in mac_addr add API", +- mac_str); +- return -EINVAL; +- } +- } +- } +- +- return 0; +-} +- +-int +-hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, +- struct rte_ether_addr *mc_addr_set, +- uint32_t nb_mc_addr) +-{ +- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- struct rte_ether_addr *addr; +- int cur_addr_num; +- int set_addr_num; +- int num; +- int ret; +- int i; +- +- /* Check if input parameters are valid */ +- ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); +- if (ret) +- return ret; +- +- rte_spinlock_lock(&hw->lock); +- cur_addr_num = hw->mc_addrs_num; +- for (i = 0; i < cur_addr_num; i++) { +- num = cur_addr_num - i - 1; +- addr = &hw->mc_addrs[num]; +- ret = hw->ops.del_mc_mac_addr(hw, addr); +- if (ret) { +- rte_spinlock_unlock(&hw->lock); +- return ret; +- } +- +- hw->mc_addrs_num--; +- } +- +- set_addr_num = (int)nb_mc_addr; +- for (i = 0; i < set_addr_num; i++) { +- addr = &mc_addr_set[i]; +- ret = hw->ops.add_mc_mac_addr(hw, addr); +- if (ret) { +- rte_spinlock_unlock(&hw->lock); +- return ret; +- } +- +- rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]); +- hw->mc_addrs_num++; +- } +- rte_spinlock_unlock(&hw->lock); +- +- return 0; +-} +- +-int +-hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) +-{ +- char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; +- struct hns3_hw *hw = &hns->hw; +- struct rte_ether_addr *addr; +- int ret = 0; +- int i; +- +- for (i = 0; i < hw->mc_addrs_num; i++) { +- addr = &hw->mc_addrs[i]; +- if (!rte_is_multicast_ether_addr(addr)) +- continue; +- if (del) +- ret = hw->ops.del_mc_mac_addr(hw, addr); +- else +- ret = hw->ops.add_mc_mac_addr(hw, addr); +- if (ret) { +- hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, +- addr); +- hns3_dbg(hw, "failed to %s mc mac addr: %s ret = %d", +- del ? "Remove" : "Restore", mac_str, ret); +- } +- } +- return ret; +-} +- + static int + hns3_check_mq_mode(struct rte_eth_dev *dev) + { +@@ -7155,161 +6882,6 @@ hns3_get_module_info(struct rte_eth_dev *dev, + return 0; + } + +-void +-hns3_clock_gettime(struct timeval *tv) +-{ +-#ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ +-#define CLOCK_TYPE CLOCK_MONOTONIC_RAW +-#else +-#define CLOCK_TYPE CLOCK_MONOTONIC +-#endif +-#define NSEC_TO_USEC_DIV 1000 +- +- struct timespec spec; +- (void)clock_gettime(CLOCK_TYPE, &spec); +- +- tv->tv_sec = spec.tv_sec; +- tv->tv_usec = spec.tv_nsec / NSEC_TO_USEC_DIV; +-} +- +-uint64_t +-hns3_clock_calctime_ms(struct timeval *tv) +-{ +- return (uint64_t)tv->tv_sec * MSEC_PER_SEC + +- tv->tv_usec / USEC_PER_MSEC; +-} +- +-uint64_t +-hns3_clock_gettime_ms(void) +-{ +- struct timeval tv; +- +- hns3_clock_gettime(&tv); +- return hns3_clock_calctime_ms(&tv); +-} +- +-static int +-hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args) +-{ +- uint32_t hint = HNS3_IO_FUNC_HINT_NONE; +- +- RTE_SET_USED(key); +- +- if (strcmp(value, "vec") == 0) +- hint = HNS3_IO_FUNC_HINT_VEC; +- else if (strcmp(value, "sve") == 0) +- hint = HNS3_IO_FUNC_HINT_SVE; +- else if (strcmp(value, "simple") == 0) +- hint = HNS3_IO_FUNC_HINT_SIMPLE; +- else if (strcmp(value, "common") == 0) +- hint = HNS3_IO_FUNC_HINT_COMMON; +- +- /* If the hint is valid then update output parameters */ +- if (hint != HNS3_IO_FUNC_HINT_NONE) +- *(uint32_t *)extra_args = hint; +- +- return 0; +-} +- +-static const char * +-hns3_get_io_hint_func_name(uint32_t hint) +-{ +- switch (hint) { +- case HNS3_IO_FUNC_HINT_VEC: +- return "vec"; +- case HNS3_IO_FUNC_HINT_SVE: +- return "sve"; +- case HNS3_IO_FUNC_HINT_SIMPLE: +- return "simple"; +- case HNS3_IO_FUNC_HINT_COMMON: +- return "common"; +- default: +- return "none"; +- } +-} +- +-static int +-hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) +-{ +- uint64_t val; +- +- RTE_SET_USED(key); +- +- val = strtoull(value, NULL, 16); +- *(uint64_t *)extra_args = val; +- +- return 0; +-} +- +-static int +-hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) +-{ +- uint32_t val; +- +- RTE_SET_USED(key); +- +- val = strtoul(value, NULL, 10); +- +- /* +- * 500ms is empirical value in process of mailbox communication. If +- * the delay value is set to one lower thanthe empirical value, mailbox +- * communication may fail. +- */ +- if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX) +- *(uint16_t *)extra_args = val; +- +- return 0; +-} +- +-void +-hns3_parse_devargs(struct rte_eth_dev *dev) +-{ +- uint16_t mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS; +- struct hns3_adapter *hns = dev->data->dev_private; +- uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE; +- uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE; +- struct hns3_hw *hw = &hns->hw; +- uint64_t dev_caps_mask = 0; +- struct rte_kvargs *kvlist; +- +- if (dev->device->devargs == NULL) +- return; +- +- kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL); +- if (!kvlist) +- return; +- +- (void)rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT, +- &hns3_parse_io_hint_func, &rx_func_hint); +- (void)rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT, +- &hns3_parse_io_hint_func, &tx_func_hint); +- (void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK, +- &hns3_parse_dev_caps_mask, &dev_caps_mask); +- (void)rte_kvargs_process(kvlist, HNS3_DEVARG_MBX_TIME_LIMIT_MS, +- &hns3_parse_mbx_time_limit, &mbx_time_limit_ms); +- +- rte_kvargs_free(kvlist); +- +- if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE) +- hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT, +- hns3_get_io_hint_func_name(rx_func_hint)); +- hns->rx_func_hint = rx_func_hint; +- if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE) +- hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT, +- hns3_get_io_hint_func_name(tx_func_hint)); +- hns->tx_func_hint = tx_func_hint; +- +- if (dev_caps_mask != 0) +- hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".", +- HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask); +- hns->dev_caps_mask = dev_caps_mask; +- +- if (mbx_time_limit_ms != HNS3_MBX_DEF_TIME_LIMIT_MS) +- hns3_warn(hw, "parsed %s = %u.", HNS3_DEVARG_MBX_TIME_LIMIT_MS, +- mbx_time_limit_ms); +- hns->mbx_time_limit_ms = mbx_time_limit_ms; +-} +- + static const struct eth_dev_ops hns3_eth_dev_ops = { + .dev_configure = hns3_dev_configure, + .dev_start = hns3_dev_start, +diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h +index 1f1364304..96671159b 100644 +--- a/drivers/net/hns3/hns3_ethdev.h ++++ b/drivers/net/hns3/hns3_ethdev.h +@@ -6,7 +6,6 @@ + #define _HNS3_ETHDEV_H_ + + #include +-#include + #include + #include + #include +@@ -869,14 +868,6 @@ struct hns3_adapter { + struct hns3_ptype_table ptype_tbl __rte_cache_aligned; + }; + +-enum { +- HNS3_IO_FUNC_HINT_NONE = 0, +- HNS3_IO_FUNC_HINT_VEC, +- HNS3_IO_FUNC_HINT_SVE, +- HNS3_IO_FUNC_HINT_SIMPLE, +- HNS3_IO_FUNC_HINT_COMMON +-}; +- + #define HNS3_DEVARG_RX_FUNC_HINT "rx_func_hint" + #define HNS3_DEVARG_TX_FUNC_HINT "tx_func_hint" + +@@ -1011,13 +1002,6 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg) + } \ + } while (0) + +-#define MSEC_PER_SEC 1000L +-#define USEC_PER_MSEC 1000L +- +-void hns3_clock_gettime(struct timeval *tv); +-uint64_t hns3_clock_calctime_ms(struct timeval *tv); +-uint64_t hns3_clock_gettime_ms(void); +- + static inline uint64_t + hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr) + { +@@ -1054,22 +1038,12 @@ int hns3_dev_filter_ctrl(struct rte_eth_dev *dev, + bool hns3_is_reset_pending(struct hns3_adapter *hns); + bool hns3vf_is_reset_pending(struct hns3_adapter *hns); + void hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query); +-void hns3_ether_format_addr(char *buf, uint16_t size, +- const struct rte_ether_addr *ether_addr); + int hns3_dev_infos_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *info); + void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, + uint32_t link_speed, uint8_t link_duplex); +-void hns3_parse_devargs(struct rte_eth_dev *dev); + void hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported); +-int hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del); +-int hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del); +-int hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, +- __rte_unused uint32_t idx, __rte_unused uint32_t pool); +-void hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx); +-int hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, +- struct rte_ether_addr *mc_addr_set, +- uint32_t nb_mc_addr); ++ + int hns3_restore_ptp(struct hns3_adapter *hns); + int hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev, + struct rte_eth_conf *conf); +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index 1e0cb1b63..c234e74b8 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -10,6 +10,7 @@ + #include + + #include "hns3_ethdev.h" ++#include "hns3_common.h" + #include "hns3_logs.h" + #include "hns3_rxtx.h" + #include "hns3_regs.h" +diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c +index 3484c76d2..66dc50908 100644 +--- a/drivers/net/hns3/hns3_intr.c ++++ b/drivers/net/hns3/hns3_intr.c +@@ -8,7 +8,7 @@ + #include + #include + +-#include "hns3_ethdev.h" ++#include "hns3_common.h" + #include "hns3_logs.h" + #include "hns3_intr.h" + #include "hns3_regs.h" +diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c +index 3ad85e721..04ae9aced 100644 +--- a/drivers/net/hns3/hns3_mbx.c ++++ b/drivers/net/hns3/hns3_mbx.c +@@ -5,7 +5,7 @@ + #include + #include + +-#include "hns3_ethdev.h" ++#include "hns3_common.h" + #include "hns3_regs.h" + #include "hns3_logs.h" + #include "hns3_intr.h" +diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c +index d5aa72c8f..2e41bb1b5 100644 +--- a/drivers/net/hns3/hns3_rxtx.c ++++ b/drivers/net/hns3/hns3_rxtx.c +@@ -16,7 +16,7 @@ + #include + #endif + +-#include "hns3_ethdev.h" ++#include "hns3_common.h" + #include "hns3_rxtx.h" + #include "hns3_regs.h" + #include "hns3_logs.h" +diff --git a/drivers/net/hns3/meson.build b/drivers/net/hns3/meson.build +index 881aff6de..dd49e38c4 100644 +--- a/drivers/net/hns3/meson.build ++++ b/drivers/net/hns3/meson.build +@@ -27,7 +27,9 @@ sources = files('hns3_cmd.c', + 'hns3_stats.c', + 'hns3_mp.c', + 'hns3_tm.c', +- 'hns3_ptp.c') ++ 'hns3_ptp.c', ++ 'hns3_common.c', ++) + + deps += ['hash'] + +-- +2.33.0 + diff --git a/0262-net-hns3-move-declarations-in-flow-header-file.patch b/0262-net-hns3-move-declarations-in-flow-header-file.patch new file mode 100644 index 0000000000000000000000000000000000000000..01c286a8a2cf28cbf24b0f1119e25f855e3cb8c9 --- /dev/null +++ b/0262-net-hns3-move-declarations-in-flow-header-file.patch @@ -0,0 +1,191 @@ +From f50d0076e2e9dad6e94fcc64108fb52592bf5c00 Mon Sep 17 00:00:00 2001 +From: "Min Hu (Connor)" +Date: Sat, 6 Nov 2021 09:43:03 +0800 +Subject: [PATCH 29/33] net/hns3: move declarations in flow header file + +This patch adds a hns3_flow.h to make the code easier to maintain. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_ethdev.c | 1 + + drivers/net/hns3/hns3_ethdev.h | 1 + + drivers/net/hns3/hns3_ethdev_vf.c | 1 + + drivers/net/hns3/hns3_fdir.h | 31 ---------------------- + drivers/net/hns3/hns3_flow.c | 1 + + drivers/net/hns3/hns3_flow.h | 44 +++++++++++++++++++++++++++++++ + 6 files changed, 48 insertions(+), 31 deletions(-) + create mode 100644 drivers/net/hns3/hns3_flow.h + +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index 818835391..5a826c7aa 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -15,6 +15,7 @@ + #include "hns3_regs.h" + #include "hns3_dcb.h" + #include "hns3_mp.h" ++#include "hns3_flow.h" + + #define HNS3_SERVICE_INTERVAL 1000000 /* us */ + #define HNS3_SERVICE_QUICK_INTERVAL 10 +diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h +index 96671159b..960f781e1 100644 +--- a/drivers/net/hns3/hns3_ethdev.h ++++ b/drivers/net/hns3/hns3_ethdev.h +@@ -17,6 +17,7 @@ + #include "hns3_fdir.h" + #include "hns3_stats.h" + #include "hns3_tm.h" ++#include "hns3_flow.h" + + /* Vendor ID */ + #define PCI_VENDOR_ID_HUAWEI 0x19e5 +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index c234e74b8..84ae26987 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -17,6 +17,7 @@ + #include "hns3_intr.h" + #include "hns3_dcb.h" + #include "hns3_mp.h" ++#include "hns3_flow.h" + + #define HNS3VF_KEEP_ALIVE_INTERVAL 2000000 /* us */ + #define HNS3VF_SERVICE_INTERVAL 1000000 /* us */ +diff --git a/drivers/net/hns3/hns3_fdir.h b/drivers/net/hns3/hns3_fdir.h +index 3f610f7b1..f9efff3b5 100644 +--- a/drivers/net/hns3/hns3_fdir.h ++++ b/drivers/net/hns3/hns3_fdir.h +@@ -5,8 +5,6 @@ + #ifndef _HNS3_FDIR_H_ + #define _HNS3_FDIR_H_ + +-#include +- + struct hns3_fd_key_cfg { + uint8_t key_sel; + uint8_t inner_sipv6_word_en; +@@ -124,14 +122,6 @@ struct hns3_fd_ad_data { + uint16_t rule_id; + }; + +-struct hns3_flow_counter { +- LIST_ENTRY(hns3_flow_counter) next; /* Pointer to the next counter. */ +- uint32_t shared:1; /* Share counter ID with other flow rules. */ +- uint32_t ref_cnt:31; /* Reference counter. */ +- uint16_t id; /* Counter ID. */ +- uint64_t hits; /* Number of packets matched by the rule. */ +-}; +- + #define HNS3_RULE_FLAG_FDID 0x1 + #define HNS3_RULE_FLAG_VF_ID 0x2 + #define HNS3_RULE_FLAG_COUNTER 0x4 +@@ -173,21 +163,7 @@ struct hns3_fdir_rule_ele { + struct hns3_fdir_rule fdir_conf; + }; + +-/* rss filter list structure */ +-struct hns3_rss_conf_ele { +- TAILQ_ENTRY(hns3_rss_conf_ele) entries; +- struct hns3_rss_conf filter_info; +-}; +- +-/* hns3_flow memory list structure */ +-struct hns3_flow_mem { +- TAILQ_ENTRY(hns3_flow_mem) entries; +- struct rte_flow *flow; +-}; +- + TAILQ_HEAD(hns3_fdir_rule_list, hns3_fdir_rule_ele); +-TAILQ_HEAD(hns3_rss_filter_list, hns3_rss_conf_ele); +-TAILQ_HEAD(hns3_flow_mem_list, hns3_flow_mem); + + /* + * A structure used to define fields of a FDIR related info. +@@ -199,11 +175,6 @@ struct hns3_fdir_info { + struct hns3_fd_cfg fd_cfg; + }; + +-struct rte_flow { +- enum rte_filter_type filter_type; +- void *rule; +- uint32_t counter_id; +-}; + struct hns3_adapter; + + int hns3_init_fd_config(struct hns3_adapter *hns); +@@ -213,8 +184,6 @@ int hns3_fdir_filter_program(struct hns3_adapter *hns, + struct hns3_fdir_rule *rule, bool del); + int hns3_clear_all_fdir_filter(struct hns3_adapter *hns); + int hns3_get_count(struct hns3_hw *hw, uint32_t id, uint64_t *value); +-void hns3_flow_init(struct rte_eth_dev *dev); +-void hns3_flow_uninit(struct rte_eth_dev *dev); + int hns3_restore_all_fdir_filter(struct hns3_adapter *hns); + + #endif /* _HNS3_FDIR_H_ */ +diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c +index b25fccbca..73ef91ce9 100644 +--- a/drivers/net/hns3/hns3_flow.c ++++ b/drivers/net/hns3/hns3_flow.c +@@ -8,6 +8,7 @@ + + #include "hns3_ethdev.h" + #include "hns3_logs.h" ++#include "hns3_flow.h" + + /* Default default keys */ + static uint8_t hns3_hash_key[] = { +diff --git a/drivers/net/hns3/hns3_flow.h b/drivers/net/hns3/hns3_flow.h +new file mode 100644 +index 000000000..2eb451b72 +--- /dev/null ++++ b/drivers/net/hns3/hns3_flow.h +@@ -0,0 +1,44 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(C) 2021 HiSilicon Limited ++ */ ++ ++#ifndef _HNS3_FLOW_H_ ++#define _HNS3_FLOW_H_ ++ ++#include ++ ++struct hns3_flow_counter { ++ LIST_ENTRY(hns3_flow_counter) next; /* Pointer to the next counter. */ ++ uint32_t shared:1; /* Share counter ID with other flow rules. */ ++ uint32_t ref_cnt:31; /* Reference counter. */ ++ uint16_t id; /* Counter ID. */ ++ uint64_t hits; /* Number of packets matched by the rule. */ ++}; ++ ++struct rte_flow { ++ enum rte_filter_type filter_type; ++ void *rule; ++ uint32_t counter_id; ++}; ++ ++/* rss filter list structure */ ++struct hns3_rss_conf_ele { ++ TAILQ_ENTRY(hns3_rss_conf_ele) entries; ++ struct hns3_rss_conf filter_info; ++}; ++ ++/* hns3_flow memory list structure */ ++struct hns3_flow_mem { ++ TAILQ_ENTRY(hns3_flow_mem) entries; ++ struct rte_flow *flow; ++}; ++ ++TAILQ_HEAD(hns3_rss_filter_list, hns3_rss_conf_ele); ++TAILQ_HEAD(hns3_flow_mem_list, hns3_flow_mem); ++ ++int hns3_dev_flow_ops_get(struct rte_eth_dev *dev, ++ const struct rte_flow_ops **ops); ++void hns3_flow_init(struct rte_eth_dev *dev); ++void hns3_flow_uninit(struct rte_eth_dev *dev); ++ ++#endif /* _HNS3_FLOW_H_ */ +-- +2.33.0 + diff --git a/0263-net-hns3-remove-magic-numbers.patch b/0263-net-hns3-remove-magic-numbers.patch new file mode 100644 index 0000000000000000000000000000000000000000..7e5bb3f8cd2bdc35491830d0c200e6c24156c13b --- /dev/null +++ b/0263-net-hns3-remove-magic-numbers.patch @@ -0,0 +1,53 @@ +From b96b2ca6d5b510d372137ef4b3ef66b762434c92 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Sat, 6 Nov 2021 09:43:04 +0800 +Subject: [PATCH 30/33] net/hns3: remove magic numbers + +Removing magic numbers with macros. + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_common.c | 4 ++-- + drivers/net/hns3/hns3_common.h | 3 +++ + 2 files changed, 5 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/hns3/hns3_common.c b/drivers/net/hns3/hns3_common.c +index 85316d342..c306e0b0e 100644 +--- a/drivers/net/hns3/hns3_common.c ++++ b/drivers/net/hns3/hns3_common.c +@@ -54,7 +54,7 @@ hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) + + RTE_SET_USED(key); + +- val = strtoull(value, NULL, 16); ++ val = strtoull(value, NULL, HNS3_CONVERT_TO_HEXADECIMAL); + *(uint64_t *)extra_args = val; + + return 0; +@@ -67,7 +67,7 @@ hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) + + RTE_SET_USED(key); + +- val = strtoul(value, NULL, 10); ++ val = strtoul(value, NULL, HNS3_CONVERT_TO_DECIMAL); + if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX) + *(uint16_t *)extra_args = val; + +diff --git a/drivers/net/hns3/hns3_common.h b/drivers/net/hns3/hns3_common.h +index 094a0bc5f..68f9b1b96 100644 +--- a/drivers/net/hns3/hns3_common.h ++++ b/drivers/net/hns3/hns3_common.h +@@ -9,6 +9,9 @@ + + #include "hns3_ethdev.h" + ++#define HNS3_CONVERT_TO_DECIMAL 10 ++#define HNS3_CONVERT_TO_HEXADECIMAL 16 ++ + enum { + HNS3_IO_FUNC_HINT_NONE = 0, + HNS3_IO_FUNC_HINT_VEC, +-- +2.33.0 + diff --git a/0264-net-hns3-mark-unchecked-return-of-snprintf.patch b/0264-net-hns3-mark-unchecked-return-of-snprintf.patch new file mode 100644 index 0000000000000000000000000000000000000000..2734b5fb474a39610218becf85d072b20d6b2a0b --- /dev/null +++ b/0264-net-hns3-mark-unchecked-return-of-snprintf.patch @@ -0,0 +1,38 @@ +From dee0abb3ec0a868c1f213165bd88c7a26c4ee253 Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Sat, 6 Nov 2021 09:43:05 +0800 +Subject: [PATCH 31/33] net/hns3: mark unchecked return of snprintf + +Fixing the return value of the function to clear static warning. + +Fixes: 1181500b2fc5 ("net/hns3: adjust MAC address logging") +Cc: stable@dpdk.org + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_common.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/hns3/hns3_common.c b/drivers/net/hns3/hns3_common.c +index c306e0b0e..9a47fbfbd 100644 +--- a/drivers/net/hns3/hns3_common.c ++++ b/drivers/net/hns3/hns3_common.c +@@ -154,10 +154,10 @@ hns3_clock_gettime_ms(void) + void hns3_ether_format_addr(char *buf, uint16_t size, + const struct rte_ether_addr *ether_addr) + { +- snprintf(buf, size, "%02X:**:**:**:%02X:%02X", +- ether_addr->addr_bytes[0], +- ether_addr->addr_bytes[4], +- ether_addr->addr_bytes[5]); ++ (void)snprintf(buf, size, "%02X:**:**:**:%02X:%02X", ++ ether_addr->addr_bytes[0], ++ ether_addr->addr_bytes[4], ++ ether_addr->addr_bytes[5]); + } + + static int +-- +2.33.0 + diff --git a/0265-net-hns3-remove-PF-VF-duplicate-code.patch b/0265-net-hns3-remove-PF-VF-duplicate-code.patch new file mode 100644 index 0000000000000000000000000000000000000000..a350f7627774792cdcf453b7bee55bb6515d91ec --- /dev/null +++ b/0265-net-hns3-remove-PF-VF-duplicate-code.patch @@ -0,0 +1,1256 @@ +From aa098540a3a8b20e94c4c8b71256663d2e85dd27 Mon Sep 17 00:00:00 2001 +From: Chengwen Feng +Date: Sat, 6 Nov 2021 09:43:06 +0800 +Subject: [PATCH 32/33] net/hns3: remove PF/VF duplicate code + +This patch remove PF/VF duplicate code of: +1. get firmware version. +2. get device info. +3. rx interrupt related functions. + +Signed-off-by: Chengwen Feng +Signed-off-by: Min Hu (Connor) +--- + drivers/net/hns3/hns3_common.c | 339 +++++++++++++++++++++++++++++- + drivers/net/hns3/hns3_common.h | 10 + + drivers/net/hns3/hns3_ethdev.c | 314 +-------------------------- + drivers/net/hns3/hns3_ethdev.h | 15 +- + drivers/net/hns3/hns3_ethdev_vf.c | 323 +--------------------------- + drivers/net/hns3/hns3_tm.c | 2 +- + 6 files changed, 364 insertions(+), 639 deletions(-) + +diff --git a/drivers/net/hns3/hns3_common.c b/drivers/net/hns3/hns3_common.c +index 9a47fbfbd..eac2aa104 100644 +--- a/drivers/net/hns3/hns3_common.c ++++ b/drivers/net/hns3/hns3_common.c +@@ -3,9 +3,154 @@ + */ + + #include ++#include ++#include ++#include + +-#include "hns3_logs.h" + #include "hns3_common.h" ++#include "hns3_logs.h" ++#include "hns3_regs.h" ++#include "hns3_rxtx.h" ++ ++int ++hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, ++ size_t fw_size) ++{ ++ struct hns3_adapter *hns = eth_dev->data->dev_private; ++ struct hns3_hw *hw = &hns->hw; ++ uint32_t version = hw->fw_version; ++ int ret; ++ ++ ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", ++ hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, ++ HNS3_FW_VERSION_BYTE3_S), ++ hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, ++ HNS3_FW_VERSION_BYTE2_S), ++ hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, ++ HNS3_FW_VERSION_BYTE1_S), ++ hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, ++ HNS3_FW_VERSION_BYTE0_S)); ++ if (ret < 0) ++ return -EINVAL; ++ ++ ret += 1; /* add the size of '\0' */ ++ if (fw_size < (size_t)ret) ++ return ret; ++ else ++ return 0; ++} ++ ++int ++hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) ++{ ++ struct hns3_adapter *hns = eth_dev->data->dev_private; ++ struct hns3_hw *hw = &hns->hw; ++ uint16_t queue_num = hw->tqps_num; ++ ++ /* ++ * In interrupt mode, 'max_rx_queues' is set based on the number of ++ * MSI-X interrupt resources of the hardware. ++ */ ++ if (hw->data->dev_conf.intr_conf.rxq == 1) ++ queue_num = hw->intr_tqps_num; ++ ++ info->max_rx_queues = queue_num; ++ info->max_tx_queues = hw->tqps_num; ++ info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ ++ info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE; ++ info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; ++ info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; ++ info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | ++ RTE_ETH_RX_OFFLOAD_TCP_CKSUM | ++ RTE_ETH_RX_OFFLOAD_UDP_CKSUM | ++ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | ++ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | ++ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | ++ RTE_ETH_RX_OFFLOAD_SCATTER | ++ RTE_ETH_RX_OFFLOAD_VLAN_STRIP | ++ RTE_ETH_RX_OFFLOAD_VLAN_FILTER | ++ RTE_ETH_RX_OFFLOAD_RSS_HASH | ++ RTE_ETH_RX_OFFLOAD_TCP_LRO); ++ info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | ++ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | ++ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | ++ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | ++ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | ++ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | ++ RTE_ETH_TX_OFFLOAD_TCP_TSO | ++ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | ++ RTE_ETH_TX_OFFLOAD_VLAN_INSERT); ++ ++ if (!hw->port_base_vlan_cfg.state) ++ info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT; ++ ++ if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) ++ info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; ++ ++ if (hns3_dev_get_support(hw, INDEP_TXRX)) ++ info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | ++ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; ++ info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; ++ ++ if (hns3_dev_get_support(hw, PTP)) ++ info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP; ++ ++ info->rx_desc_lim = (struct rte_eth_desc_lim) { ++ .nb_max = HNS3_MAX_RING_DESC, ++ .nb_min = HNS3_MIN_RING_DESC, ++ .nb_align = HNS3_ALIGN_RING_DESC, ++ }; ++ ++ info->tx_desc_lim = (struct rte_eth_desc_lim) { ++ .nb_max = HNS3_MAX_RING_DESC, ++ .nb_min = HNS3_MIN_RING_DESC, ++ .nb_align = HNS3_ALIGN_RING_DESC, ++ .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, ++ .nb_mtu_seg_max = hw->max_non_tso_bd_num, ++ }; ++ ++ info->default_rxconf = (struct rte_eth_rxconf) { ++ .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH, ++ /* ++ * If there are no available Rx buffer descriptors, incoming ++ * packets are always dropped by hardware based on hns3 network ++ * engine. ++ */ ++ .rx_drop_en = 1, ++ .offloads = 0, ++ }; ++ info->default_txconf = (struct rte_eth_txconf) { ++ .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH, ++ .offloads = 0, ++ }; ++ ++ info->reta_size = hw->rss_ind_tbl_size; ++ info->hash_key_size = HNS3_RSS_KEY_SIZE; ++ info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; ++ ++ info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; ++ info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; ++ info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; ++ info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; ++ info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; ++ info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; ++ ++ /* ++ * Next is the PF/VF difference section. ++ */ ++ if (!hns->is_vf) { ++ info->max_mac_addrs = HNS3_UC_MACADDR_NUM; ++ info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; ++ info->speed_capa = hns3_get_speed_capa(hw); ++ } else { ++ info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM; ++ } ++ ++ return 0; ++} + + static int + hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args) +@@ -68,6 +213,12 @@ hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) + RTE_SET_USED(key); + + val = strtoul(value, NULL, HNS3_CONVERT_TO_DECIMAL); ++ ++ /* ++ * 500ms is empirical value in process of mailbox communication. If ++ * the delay value is set to one lower thanthe empirical value, mailbox ++ * communication may fail. ++ */ + if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX) + *(uint16_t *)extra_args = val; + +@@ -116,6 +267,11 @@ hns3_parse_devargs(struct rte_eth_dev *dev) + hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".", + HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask); + hns->dev_caps_mask = dev_caps_mask; ++ ++ if (mbx_time_limit_ms != HNS3_MBX_DEF_TIME_LIMIT_MS) ++ hns3_warn(hw, "parsed %s = %u.", HNS3_DEVARG_MBX_TIME_LIMIT_MS, ++ mbx_time_limit_ms); ++ hns->mbx_time_limit_ms = mbx_time_limit_ms; + } + + void +@@ -424,3 +580,184 @@ hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) + ret); + } + } ++ ++int ++hns3_init_ring_with_vector(struct hns3_hw *hw) ++{ ++ uint16_t vec; ++ int ret; ++ int i; ++ ++ /* ++ * In hns3 network engine, vector 0 is always the misc interrupt of this ++ * function, vector 1~N can be used respectively for the queues of the ++ * function. Tx and Rx queues with the same number share the interrupt ++ * vector. In the initialization clearing the all hardware mapping ++ * relationship configurations between queues and interrupt vectors is ++ * needed, so some error caused by the residual configurations, such as ++ * the unexpected Tx interrupt, can be avoid. ++ */ ++ vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ ++ if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE) ++ vec = vec - 1; /* the last interrupt is reserved */ ++ hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); ++ for (i = 0; i < hw->intr_tqps_num; i++) { ++ /* ++ * Set gap limiter/rate limiter/quanity limiter algorithm ++ * configuration for interrupt coalesce of queue's interrupt. ++ */ ++ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, ++ HNS3_TQP_INTR_GL_DEFAULT); ++ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, ++ HNS3_TQP_INTR_GL_DEFAULT); ++ hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); ++ /* ++ * QL(quantity limiter) is not used currently, just set 0 to ++ * close it. ++ */ ++ hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); ++ ++ ret = hw->ops.bind_ring_with_vector(hw, vec, false, ++ HNS3_RING_TYPE_TX, i); ++ if (ret) { ++ PMD_INIT_LOG(ERR, "fail to unbind TX ring(%d) with " ++ "vector: %u, ret=%d", i, vec, ret); ++ return ret; ++ } ++ ++ ret = hw->ops.bind_ring_with_vector(hw, vec, false, ++ HNS3_RING_TYPE_RX, i); ++ if (ret) { ++ PMD_INIT_LOG(ERR, "fail to unbind RX ring(%d) with " ++ "vector: %u, ret=%d", i, vec, ret); ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++int ++hns3_map_rx_interrupt(struct rte_eth_dev *dev) ++{ ++ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); ++ struct rte_intr_handle *intr_handle = pci_dev->intr_handle; ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ uint16_t base = RTE_INTR_VEC_ZERO_OFFSET; ++ uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET; ++ uint32_t intr_vector; ++ uint16_t q_id; ++ int ret; ++ ++ /* ++ * hns3 needs a separate interrupt to be used as event interrupt which ++ * could not be shared with task queue pair, so KERNEL drivers need ++ * support multiple interrupt vectors. ++ */ ++ if (dev->data->dev_conf.intr_conf.rxq == 0 || ++ !rte_intr_cap_multiple(intr_handle)) ++ return 0; ++ ++ rte_intr_disable(intr_handle); ++ intr_vector = hw->used_rx_queues; ++ /* creates event fd for each intr vector when MSIX is used */ ++ if (rte_intr_efd_enable(intr_handle, intr_vector)) ++ return -EINVAL; ++ ++ /* Allocate vector list */ ++ if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", ++ hw->used_rx_queues)) { ++ hns3_err(hw, "failed to allocate %u rx_queues intr_vec", ++ hw->used_rx_queues); ++ ret = -ENOMEM; ++ goto alloc_intr_vec_error; ++ } ++ ++ if (rte_intr_allow_others(intr_handle)) { ++ vec = RTE_INTR_VEC_RXTX_OFFSET; ++ base = RTE_INTR_VEC_RXTX_OFFSET; ++ } ++ ++ for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { ++ ret = hw->ops.bind_ring_with_vector(hw, vec, true, ++ HNS3_RING_TYPE_RX, q_id); ++ if (ret) ++ goto bind_vector_error; ++ ++ if (rte_intr_vec_list_index_set(intr_handle, q_id, vec)) ++ goto bind_vector_error; ++ /* ++ * If there are not enough efds (e.g. not enough interrupt), ++ * remaining queues will be bond to the last interrupt. ++ */ ++ if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1) ++ vec++; ++ } ++ rte_intr_enable(intr_handle); ++ return 0; ++ ++bind_vector_error: ++ rte_intr_vec_list_free(intr_handle); ++alloc_intr_vec_error: ++ rte_intr_efd_disable(intr_handle); ++ return ret; ++} ++ ++void ++hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) ++{ ++ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); ++ struct rte_intr_handle *intr_handle = pci_dev->intr_handle; ++ struct hns3_adapter *hns = dev->data->dev_private; ++ struct hns3_hw *hw = &hns->hw; ++ uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; ++ uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; ++ uint16_t q_id; ++ ++ if (dev->data->dev_conf.intr_conf.rxq == 0) ++ return; ++ ++ /* unmap the ring with vector */ ++ if (rte_intr_allow_others(intr_handle)) { ++ vec = RTE_INTR_VEC_RXTX_OFFSET; ++ base = RTE_INTR_VEC_RXTX_OFFSET; ++ } ++ if (rte_intr_dp_is_en(intr_handle)) { ++ for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { ++ (void)hw->ops.bind_ring_with_vector(hw, vec, false, ++ HNS3_RING_TYPE_RX, ++ q_id); ++ if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1) ++ vec++; ++ } ++ } ++ /* Clean datapath event and queue/vec mapping */ ++ rte_intr_efd_disable(intr_handle); ++ rte_intr_vec_list_free(intr_handle); ++} ++ ++int ++hns3_restore_rx_interrupt(struct hns3_hw *hw) ++{ ++ struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; ++ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); ++ struct rte_intr_handle *intr_handle = pci_dev->intr_handle; ++ uint16_t q_id; ++ int ret; ++ ++ if (dev->data->dev_conf.intr_conf.rxq == 0) ++ return 0; ++ ++ if (rte_intr_dp_is_en(intr_handle)) { ++ for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { ++ ret = hw->ops.bind_ring_with_vector(hw, ++ rte_intr_vec_list_index_get(intr_handle, ++ q_id), ++ true, HNS3_RING_TYPE_RX, q_id); ++ if (ret) ++ return ret; ++ } ++ } ++ ++ return 0; ++} +diff --git a/drivers/net/hns3/hns3_common.h b/drivers/net/hns3/hns3_common.h +index 68f9b1b96..0dbb1c041 100644 +--- a/drivers/net/hns3/hns3_common.h ++++ b/drivers/net/hns3/hns3_common.h +@@ -30,6 +30,11 @@ enum { + #define MSEC_PER_SEC 1000L + #define USEC_PER_MSEC 1000L + ++int hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, ++ size_t fw_size); ++int hns3_dev_infos_get(struct rte_eth_dev *eth_dev, ++ struct rte_eth_dev_info *info); ++ + void hns3_clock_gettime(struct timeval *tv); + uint64_t hns3_clock_calctime_ms(struct timeval *tv); + uint64_t hns3_clock_gettime_ms(void); +@@ -48,4 +53,9 @@ int hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, + void hns3_ether_format_addr(char *buf, uint16_t size, + const struct rte_ether_addr *ether_addr); + ++int hns3_init_ring_with_vector(struct hns3_hw *hw); ++int hns3_map_rx_interrupt(struct rte_eth_dev *dev); ++void hns3_unmap_rx_interrupt(struct rte_eth_dev *dev); ++int hns3_restore_rx_interrupt(struct hns3_hw *hw); ++ + #endif /* _HNS3_COMMON_H_ */ +diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c +index 5a826c7aa..77efb3146 100644 +--- a/drivers/net/hns3/hns3_ethdev.c ++++ b/drivers/net/hns3/hns3_ethdev.c +@@ -1929,62 +1929,6 @@ hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en, + return 0; + } + +-static int +-hns3_init_ring_with_vector(struct hns3_hw *hw) +-{ +- uint16_t vec; +- int ret; +- int i; +- +- /* +- * In hns3 network engine, vector 0 is always the misc interrupt of this +- * function, vector 1~N can be used respectively for the queues of the +- * function. Tx and Rx queues with the same number share the interrupt +- * vector. In the initialization clearing the all hardware mapping +- * relationship configurations between queues and interrupt vectors is +- * needed, so some error caused by the residual configurations, such as +- * the unexpected Tx interrupt, can be avoid. +- */ +- vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ +- if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE) +- vec = vec - 1; /* the last interrupt is reserved */ +- hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); +- for (i = 0; i < hw->intr_tqps_num; i++) { +- /* +- * Set gap limiter/rate limiter/quanity limiter algorithm +- * configuration for interrupt coalesce of queue's interrupt. +- */ +- hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, +- HNS3_TQP_INTR_GL_DEFAULT); +- hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, +- HNS3_TQP_INTR_GL_DEFAULT); +- hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); +- /* +- * QL(quantity limiter) is not used currently, just set 0 to +- * close it. +- */ +- hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); +- +- ret = hns3_bind_ring_with_vector(hw, vec, false, +- HNS3_RING_TYPE_TX, i); +- if (ret) { +- PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with " +- "vector: %u, ret=%d", i, vec, ret); +- return ret; +- } +- +- ret = hns3_bind_ring_with_vector(hw, vec, false, +- HNS3_RING_TYPE_RX, i); +- if (ret) { +- PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with " +- "vector: %u, ret=%d", i, vec, ret); +- return ret; +- } +- } +- +- return 0; +-} +- + static int + hns3_refresh_mtu(struct rte_eth_dev *dev, struct rte_eth_conf *conf) + { +@@ -2299,7 +2243,7 @@ hns3_get_firber_port_speed_capa(uint32_t supported_speed) + return speed_capa; + } + +-static uint32_t ++uint32_t + hns3_get_speed_capa(struct hns3_hw *hw) + { + struct hns3_mac *mac = &hw->mac; +@@ -2318,132 +2262,6 @@ hns3_get_speed_capa(struct hns3_hw *hw) + return speed_capa; + } + +-int +-hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) +-{ +- struct hns3_adapter *hns = eth_dev->data->dev_private; +- struct hns3_hw *hw = &hns->hw; +- uint16_t queue_num = hw->tqps_num; +- +- /* +- * In interrupt mode, 'max_rx_queues' is set based on the number of +- * MSI-X interrupt resources of the hardware. +- */ +- if (hw->data->dev_conf.intr_conf.rxq == 1) +- queue_num = hw->intr_tqps_num; +- +- info->max_rx_queues = queue_num; +- info->max_tx_queues = hw->tqps_num; +- info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ +- info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE; +- info->max_mac_addrs = HNS3_UC_MACADDR_NUM; +- info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; +- info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; +- info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | +- DEV_RX_OFFLOAD_TCP_CKSUM | +- DEV_RX_OFFLOAD_UDP_CKSUM | +- DEV_RX_OFFLOAD_SCTP_CKSUM | +- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | +- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | +- DEV_RX_OFFLOAD_KEEP_CRC | +- DEV_RX_OFFLOAD_SCATTER | +- DEV_RX_OFFLOAD_VLAN_STRIP | +- DEV_RX_OFFLOAD_VLAN_FILTER | +- DEV_RX_OFFLOAD_JUMBO_FRAME | +- DEV_RX_OFFLOAD_RSS_HASH | +- DEV_RX_OFFLOAD_TCP_LRO); +- info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | +- DEV_TX_OFFLOAD_IPV4_CKSUM | +- DEV_TX_OFFLOAD_TCP_CKSUM | +- DEV_TX_OFFLOAD_UDP_CKSUM | +- DEV_TX_OFFLOAD_SCTP_CKSUM | +- DEV_TX_OFFLOAD_MULTI_SEGS | +- DEV_TX_OFFLOAD_TCP_TSO | +- DEV_TX_OFFLOAD_VXLAN_TNL_TSO | +- DEV_TX_OFFLOAD_GRE_TNL_TSO | +- DEV_TX_OFFLOAD_GENEVE_TNL_TSO | +- DEV_TX_OFFLOAD_MBUF_FAST_FREE | +- hns3_txvlan_cap_get(hw)); +- +- if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) +- info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; +- +- if (hns3_dev_get_support(hw, INDEP_TXRX)) +- info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | +- RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; +- +- if (hns3_dev_get_support(hw, PTP)) +- info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; +- +- info->rx_desc_lim = (struct rte_eth_desc_lim) { +- .nb_max = HNS3_MAX_RING_DESC, +- .nb_min = HNS3_MIN_RING_DESC, +- .nb_align = HNS3_ALIGN_RING_DESC, +- }; +- +- info->tx_desc_lim = (struct rte_eth_desc_lim) { +- .nb_max = HNS3_MAX_RING_DESC, +- .nb_min = HNS3_MIN_RING_DESC, +- .nb_align = HNS3_ALIGN_RING_DESC, +- .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, +- .nb_mtu_seg_max = hw->max_non_tso_bd_num, +- }; +- +- info->speed_capa = hns3_get_speed_capa(hw); +- info->default_rxconf = (struct rte_eth_rxconf) { +- .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH, +- /* +- * If there are no available Rx buffer descriptors, incoming +- * packets are always dropped by hardware based on hns3 network +- * engine. +- */ +- .rx_drop_en = 1, +- .offloads = 0, +- }; +- info->default_txconf = (struct rte_eth_txconf) { +- .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH, +- .offloads = 0, +- }; +- +- info->reta_size = hw->rss_ind_tbl_size; +- info->hash_key_size = HNS3_RSS_KEY_SIZE; +- info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; +- +- info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; +- info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; +- info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; +- info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; +- info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; +- info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; +- +- return 0; +-} +- +-static int +-hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, +- size_t fw_size) +-{ +- struct hns3_adapter *hns = eth_dev->data->dev_private; +- struct hns3_hw *hw = &hns->hw; +- uint32_t version = hw->fw_version; +- int ret; +- +- ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", +- hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, +- HNS3_FW_VERSION_BYTE3_S), +- hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, +- HNS3_FW_VERSION_BYTE2_S), +- hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, +- HNS3_FW_VERSION_BYTE1_S), +- hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, +- HNS3_FW_VERSION_BYTE0_S)); +- ret += 1; /* add the size of '\0' */ +- if (fw_size < (uint32_t)ret) +- return ret; +- else +- return 0; +-} +- + static int + hns3_update_port_link_info(struct rte_eth_dev *eth_dev) + { +@@ -5322,99 +5140,6 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue) + return ret; + } + +-static int +-hns3_map_rx_interrupt(struct rte_eth_dev *dev) +-{ +- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); +- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; +- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- uint16_t base = RTE_INTR_VEC_ZERO_OFFSET; +- uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET; +- uint32_t intr_vector; +- uint16_t q_id; +- int ret; +- +- /* +- * hns3 needs a separate interrupt to be used as event interrupt which +- * could not be shared with task queue pair, so KERNEL drivers need +- * support multiple interrupt vectors. +- */ +- if (dev->data->dev_conf.intr_conf.rxq == 0 || +- !rte_intr_cap_multiple(intr_handle)) +- return 0; +- +- rte_intr_disable(intr_handle); +- intr_vector = hw->used_rx_queues; +- /* creates event fd for each intr vector when MSIX is used */ +- if (rte_intr_efd_enable(intr_handle, intr_vector)) +- return -EINVAL; +- +- if (intr_handle->intr_vec == NULL) { +- intr_handle->intr_vec = +- rte_zmalloc("intr_vec", +- hw->used_rx_queues * sizeof(int), 0); +- if (intr_handle->intr_vec == NULL) { +- hns3_err(hw, "failed to allocate %u rx_queues intr_vec", +- hw->used_rx_queues); +- ret = -ENOMEM; +- goto alloc_intr_vec_error; +- } +- } +- +- if (rte_intr_allow_others(intr_handle)) { +- vec = RTE_INTR_VEC_RXTX_OFFSET; +- base = RTE_INTR_VEC_RXTX_OFFSET; +- } +- +- for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { +- ret = hns3_bind_ring_with_vector(hw, vec, true, +- HNS3_RING_TYPE_RX, q_id); +- if (ret) +- goto bind_vector_error; +- intr_handle->intr_vec[q_id] = vec; +- /* +- * If there are not enough efds (e.g. not enough interrupt), +- * remaining queues will be bond to the last interrupt. +- */ +- if (vec < base + intr_handle->nb_efd - 1) +- vec++; +- } +- rte_intr_enable(intr_handle); +- return 0; +- +-bind_vector_error: +- rte_free(intr_handle->intr_vec); +- intr_handle->intr_vec = NULL; +-alloc_intr_vec_error: +- rte_intr_efd_disable(intr_handle); +- return ret; +-} +- +-static int +-hns3_restore_rx_interrupt(struct hns3_hw *hw) +-{ +- struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; +- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); +- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; +- uint16_t q_id; +- int ret; +- +- if (dev->data->dev_conf.intr_conf.rxq == 0) +- return 0; +- +- if (rte_intr_dp_is_en(intr_handle)) { +- for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { +- ret = hns3_bind_ring_with_vector(hw, +- intr_handle->intr_vec[q_id], true, +- HNS3_RING_TYPE_RX, q_id); +- if (ret) +- return ret; +- } +- } +- +- return 0; +-} +- + static void + hns3_restore_filter(struct rte_eth_dev *dev) + { +@@ -5545,42 +5270,6 @@ hns3_do_stop(struct hns3_adapter *hns) + return 0; + } + +-static void +-hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) +-{ +- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); +- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; +- struct hns3_adapter *hns = dev->data->dev_private; +- struct hns3_hw *hw = &hns->hw; +- uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; +- uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; +- uint16_t q_id; +- +- if (dev->data->dev_conf.intr_conf.rxq == 0) +- return; +- +- /* unmap the ring with vector */ +- if (rte_intr_allow_others(intr_handle)) { +- vec = RTE_INTR_VEC_RXTX_OFFSET; +- base = RTE_INTR_VEC_RXTX_OFFSET; +- } +- if (rte_intr_dp_is_en(intr_handle)) { +- for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { +- (void)hns3_bind_ring_with_vector(hw, vec, false, +- HNS3_RING_TYPE_RX, +- q_id); +- if (vec < base + intr_handle->nb_efd - 1) +- vec++; +- } +- } +- /* Clean datapath event and queue/vec mapping */ +- rte_intr_efd_disable(intr_handle); +- if (intr_handle->intr_vec) { +- rte_free(intr_handle->intr_vec); +- intr_handle->intr_vec = NULL; +- } +-} +- + static int + hns3_dev_stop(struct rte_eth_dev *dev) + { +@@ -6971,6 +6660,7 @@ hns3_init_hw_ops(struct hns3_hw *hw) + hw->ops.del_mc_mac_addr = hns3_remove_mc_mac_addr; + hw->ops.add_uc_mac_addr = hns3_add_uc_mac_addr; + hw->ops.del_uc_mac_addr = hns3_remove_uc_mac_addr; ++ hw->ops.bind_ring_with_vector = hns3_bind_ring_with_vector; + } + + static int +diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h +index 960f781e1..09287c3c1 100644 +--- a/drivers/net/hns3/hns3_ethdev.h ++++ b/drivers/net/hns3/hns3_ethdev.h +@@ -437,6 +437,9 @@ struct hns3_hw_ops { + struct rte_ether_addr *mac_addr); + int (*del_uc_mac_addr)(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr); ++ int (*bind_ring_with_vector)(struct hns3_hw *hw, uint16_t vector_id, ++ bool en, enum hns3_ring_type queue_type, ++ uint16_t queue_id); + }; + + #define HNS3_INTR_MAPPING_VEC_RSV_ONE 0 +@@ -1032,6 +1035,7 @@ hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr) + return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask; + } + ++uint32_t hns3_get_speed_capa(struct hns3_hw *hw); + int hns3_buffer_alloc(struct hns3_hw *hw); + int hns3_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, +@@ -1039,8 +1043,6 @@ int hns3_dev_filter_ctrl(struct rte_eth_dev *dev, + bool hns3_is_reset_pending(struct hns3_adapter *hns); + bool hns3vf_is_reset_pending(struct hns3_adapter *hns); + void hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query); +-int hns3_dev_infos_get(struct rte_eth_dev *eth_dev, +- struct rte_eth_dev_info *info); + void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, + uint32_t link_speed, uint8_t link_duplex); + void hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported); +@@ -1072,13 +1074,4 @@ is_reset_pending(struct hns3_adapter *hns) + return ret; + } + +-static inline uint64_t +-hns3_txvlan_cap_get(struct hns3_hw *hw) +-{ +- if (hw->port_base_vlan_cfg.state) +- return DEV_TX_OFFLOAD_VLAN_INSERT; +- else +- return DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT; +-} +- + #endif /* _HNS3_ETHDEV_H_ */ +diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c +index 84ae26987..4cec0949a 100644 +--- a/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/drivers/net/hns3/hns3_ethdev_vf.c +@@ -422,7 +422,7 @@ hns3vf_restore_promisc(struct hns3_adapter *hns) + } + + static int +-hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, ++hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, + bool mmap, enum hns3_ring_type queue_type, + uint16_t queue_id) + { +@@ -434,7 +434,7 @@ hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, + memset(&bind_msg, 0, sizeof(bind_msg)); + code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR : + HNS3_MBX_UNMAP_RING_TO_VECTOR; +- bind_msg.vector_id = vector_id; ++ bind_msg.vector_id = (uint8_t)vector_id; + + if (queue_type == HNS3_RING_TYPE_RX) + bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX; +@@ -454,62 +454,6 @@ hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, + return ret; + } + +-static int +-hns3vf_init_ring_with_vector(struct hns3_hw *hw) +-{ +- uint16_t vec; +- int ret; +- int i; +- +- /* +- * In hns3 network engine, vector 0 is always the misc interrupt of this +- * function, vector 1~N can be used respectively for the queues of the +- * function. Tx and Rx queues with the same number share the interrupt +- * vector. In the initialization clearing the all hardware mapping +- * relationship configurations between queues and interrupt vectors is +- * needed, so some error caused by the residual configurations, such as +- * the unexpected Tx interrupt, can be avoid. +- */ +- vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ +- if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE) +- vec = vec - 1; /* the last interrupt is reserved */ +- hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); +- for (i = 0; i < hw->intr_tqps_num; i++) { +- /* +- * Set gap limiter/rate limiter/quanity limiter algorithm +- * configuration for interrupt coalesce of queue's interrupt. +- */ +- hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, +- HNS3_TQP_INTR_GL_DEFAULT); +- hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, +- HNS3_TQP_INTR_GL_DEFAULT); +- hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); +- /* +- * QL(quantity limiter) is not used currently, just set 0 to +- * close it. +- */ +- hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); +- +- ret = hns3vf_bind_ring_with_vector(hw, vec, false, +- HNS3_RING_TYPE_TX, i); +- if (ret) { +- PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with " +- "vector: %u, ret=%d", i, vec, ret); +- return ret; +- } +- +- ret = hns3vf_bind_ring_with_vector(hw, vec, false, +- HNS3_RING_TYPE_RX, i); +- if (ret) { +- PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with " +- "vector: %u, ret=%d", i, vec, ret); +- return ret; +- } +- } +- +- return 0; +-} +- + static int + hns3vf_dev_configure(struct rte_eth_dev *dev) + { +@@ -677,103 +621,6 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + return 0; + } + +-static int +-hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) +-{ +- struct hns3_adapter *hns = eth_dev->data->dev_private; +- struct hns3_hw *hw = &hns->hw; +- uint16_t q_num = hw->tqps_num; +- +- /* +- * In interrupt mode, 'max_rx_queues' is set based on the number of +- * MSI-X interrupt resources of the hardware. +- */ +- if (hw->data->dev_conf.intr_conf.rxq == 1) +- q_num = hw->intr_tqps_num; +- +- info->max_rx_queues = q_num; +- info->max_tx_queues = hw->tqps_num; +- info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ +- info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE; +- info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM; +- info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; +- info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; +- +- info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | +- DEV_RX_OFFLOAD_UDP_CKSUM | +- DEV_RX_OFFLOAD_TCP_CKSUM | +- DEV_RX_OFFLOAD_SCTP_CKSUM | +- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | +- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | +- DEV_RX_OFFLOAD_SCATTER | +- DEV_RX_OFFLOAD_VLAN_STRIP | +- DEV_RX_OFFLOAD_VLAN_FILTER | +- DEV_RX_OFFLOAD_JUMBO_FRAME | +- DEV_RX_OFFLOAD_RSS_HASH | +- DEV_RX_OFFLOAD_TCP_LRO); +- info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | +- DEV_TX_OFFLOAD_IPV4_CKSUM | +- DEV_TX_OFFLOAD_TCP_CKSUM | +- DEV_TX_OFFLOAD_UDP_CKSUM | +- DEV_TX_OFFLOAD_SCTP_CKSUM | +- DEV_TX_OFFLOAD_MULTI_SEGS | +- DEV_TX_OFFLOAD_TCP_TSO | +- DEV_TX_OFFLOAD_VXLAN_TNL_TSO | +- DEV_TX_OFFLOAD_GRE_TNL_TSO | +- DEV_TX_OFFLOAD_GENEVE_TNL_TSO | +- DEV_TX_OFFLOAD_MBUF_FAST_FREE | +- hns3_txvlan_cap_get(hw)); +- +- if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) +- info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; +- +- if (hns3_dev_get_support(hw, INDEP_TXRX)) +- info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | +- RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; +- +- info->rx_desc_lim = (struct rte_eth_desc_lim) { +- .nb_max = HNS3_MAX_RING_DESC, +- .nb_min = HNS3_MIN_RING_DESC, +- .nb_align = HNS3_ALIGN_RING_DESC, +- }; +- +- info->tx_desc_lim = (struct rte_eth_desc_lim) { +- .nb_max = HNS3_MAX_RING_DESC, +- .nb_min = HNS3_MIN_RING_DESC, +- .nb_align = HNS3_ALIGN_RING_DESC, +- .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, +- .nb_mtu_seg_max = hw->max_non_tso_bd_num, +- }; +- +- info->default_rxconf = (struct rte_eth_rxconf) { +- .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH, +- /* +- * If there are no available Rx buffer descriptors, incoming +- * packets are always dropped by hardware based on hns3 network +- * engine. +- */ +- .rx_drop_en = 1, +- .offloads = 0, +- }; +- info->default_txconf = (struct rte_eth_txconf) { +- .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH, +- .offloads = 0, +- }; +- +- info->reta_size = hw->rss_ind_tbl_size; +- info->hash_key_size = HNS3_RSS_KEY_SIZE; +- info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; +- +- info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; +- info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; +- info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; +- info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; +- info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; +- info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; +- +- return 0; +-} +- + static void + hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr) + { +@@ -1662,7 +1509,7 @@ hns3vf_init_hardware(struct hns3_adapter *hns) + * some error caused by the residual configurations, such as the + * unexpected interrupt, can be avoid. + */ +- ret = hns3vf_init_ring_with_vector(hw); ++ ret = hns3_init_ring_with_vector(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); + goto err_init_hardware; +@@ -1849,41 +1696,6 @@ hns3vf_do_stop(struct hns3_adapter *hns) + return 0; + } + +-static void +-hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev) +-{ +- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); +- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; +- uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; +- uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; +- uint16_t q_id; +- +- if (dev->data->dev_conf.intr_conf.rxq == 0) +- return; +- +- /* unmap the ring with vector */ +- if (rte_intr_allow_others(intr_handle)) { +- vec = RTE_INTR_VEC_RXTX_OFFSET; +- base = RTE_INTR_VEC_RXTX_OFFSET; +- } +- if (rte_intr_dp_is_en(intr_handle)) { +- for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { +- (void)hns3vf_bind_ring_with_vector(hw, vec, false, +- HNS3_RING_TYPE_RX, +- q_id); +- if (vec < base + intr_handle->nb_efd - 1) +- vec++; +- } +- } +- /* Clean datapath event and queue/vec mapping */ +- rte_intr_efd_disable(intr_handle); +- if (intr_handle->intr_vec) { +- rte_free(intr_handle->intr_vec); +- intr_handle->intr_vec = NULL; +- } +-} +- + static int + hns3vf_dev_stop(struct rte_eth_dev *dev) + { +@@ -1905,7 +1717,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev) + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { + hns3_stop_tqps(hw); + hns3vf_do_stop(hns); +- hns3vf_unmap_rx_interrupt(dev); ++ hns3_unmap_rx_interrupt(dev); + hw->adapter_state = HNS3_NIC_CONFIGURED; + } + hns3_rx_scattered_reset(dev); +@@ -1946,31 +1758,6 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) + return ret; + } + +-static int +-hns3vf_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, +- size_t fw_size) +-{ +- struct hns3_adapter *hns = eth_dev->data->dev_private; +- struct hns3_hw *hw = &hns->hw; +- uint32_t version = hw->fw_version; +- int ret; +- +- ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", +- hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, +- HNS3_FW_VERSION_BYTE3_S), +- hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, +- HNS3_FW_VERSION_BYTE2_S), +- hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, +- HNS3_FW_VERSION_BYTE1_S), +- hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, +- HNS3_FW_VERSION_BYTE0_S)); +- ret += 1; /* add the size of '\0' */ +- if (fw_size < (uint32_t)ret) +- return ret; +- else +- return 0; +-} +- + static int + hns3vf_dev_link_update(struct rte_eth_dev *eth_dev, + __rte_unused int wait_to_complete) +@@ -2032,99 +1819,6 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue) + return ret; + } + +-static int +-hns3vf_map_rx_interrupt(struct rte_eth_dev *dev) +-{ +- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); +- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; +- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; +- uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; +- uint32_t intr_vector; +- uint16_t q_id; +- int ret; +- +- /* +- * hns3 needs a separate interrupt to be used as event interrupt which +- * could not be shared with task queue pair, so KERNEL drivers need +- * support multiple interrupt vectors. +- */ +- if (dev->data->dev_conf.intr_conf.rxq == 0 || +- !rte_intr_cap_multiple(intr_handle)) +- return 0; +- +- rte_intr_disable(intr_handle); +- intr_vector = hw->used_rx_queues; +- /* It creates event fd for each intr vector when MSIX is used */ +- if (rte_intr_efd_enable(intr_handle, intr_vector)) +- return -EINVAL; +- +- if (intr_handle->intr_vec == NULL) { +- intr_handle->intr_vec = +- rte_zmalloc("intr_vec", +- hw->used_rx_queues * sizeof(int), 0); +- if (intr_handle->intr_vec == NULL) { +- hns3_err(hw, "Failed to allocate %u rx_queues" +- " intr_vec", hw->used_rx_queues); +- ret = -ENOMEM; +- goto vf_alloc_intr_vec_error; +- } +- } +- +- if (rte_intr_allow_others(intr_handle)) { +- vec = RTE_INTR_VEC_RXTX_OFFSET; +- base = RTE_INTR_VEC_RXTX_OFFSET; +- } +- +- for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { +- ret = hns3vf_bind_ring_with_vector(hw, vec, true, +- HNS3_RING_TYPE_RX, q_id); +- if (ret) +- goto vf_bind_vector_error; +- intr_handle->intr_vec[q_id] = vec; +- /* +- * If there are not enough efds (e.g. not enough interrupt), +- * remaining queues will be bond to the last interrupt. +- */ +- if (vec < base + intr_handle->nb_efd - 1) +- vec++; +- } +- rte_intr_enable(intr_handle); +- return 0; +- +-vf_bind_vector_error: +- rte_free(intr_handle->intr_vec); +- intr_handle->intr_vec = NULL; +-vf_alloc_intr_vec_error: +- rte_intr_efd_disable(intr_handle); +- return ret; +-} +- +-static int +-hns3vf_restore_rx_interrupt(struct hns3_hw *hw) +-{ +- struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; +- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); +- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; +- uint16_t q_id; +- int ret; +- +- if (dev->data->dev_conf.intr_conf.rxq == 0) +- return 0; +- +- if (rte_intr_dp_is_en(intr_handle)) { +- for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { +- ret = hns3vf_bind_ring_with_vector(hw, +- intr_handle->intr_vec[q_id], true, +- HNS3_RING_TYPE_RX, q_id); +- if (ret) +- return ret; +- } +- } +- +- return 0; +-} +- + static void + hns3vf_restore_filter(struct rte_eth_dev *dev) + { +@@ -2150,7 +1844,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev) + rte_spinlock_unlock(&hw->lock); + return ret; + } +- ret = hns3vf_map_rx_interrupt(dev); ++ ret = hns3_map_rx_interrupt(dev); + if (ret) + goto map_rx_inter_err; + +@@ -2467,7 +2161,7 @@ hns3vf_restore_conf(struct hns3_adapter *hns) + if (ret) + goto err_vlan_table; + +- ret = hns3vf_restore_rx_interrupt(hw); ++ ret = hns3_restore_rx_interrupt(hw); + if (ret) + goto err_vlan_table; + +@@ -2641,8 +2335,8 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = { + .xstats_reset = hns3_dev_xstats_reset, + .xstats_get_by_id = hns3_dev_xstats_get_by_id, + .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, +- .dev_infos_get = hns3vf_dev_infos_get, +- .fw_version_get = hns3vf_fw_version_get, ++ .dev_infos_get = hns3_dev_infos_get, ++ .fw_version_get = hns3_fw_version_get, + .rx_queue_setup = hns3_rx_queue_setup, + .tx_queue_setup = hns3_tx_queue_setup, + .rx_queue_release = hns3_dev_rx_queue_release, +@@ -2691,6 +2385,7 @@ hns3vf_init_hw_ops(struct hns3_hw *hw) + hw->ops.del_mc_mac_addr = hns3vf_remove_mc_mac_addr; + hw->ops.add_uc_mac_addr = hns3vf_add_uc_mac_addr; + hw->ops.del_uc_mac_addr = hns3vf_remove_uc_mac_addr; ++ hw->ops.bind_ring_with_vector = hns3vf_bind_ring_with_vector; + } + + static int +diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c +index 44b607af7..e1089b6bd 100644 +--- a/drivers/net/hns3/hns3_tm.c ++++ b/drivers/net/hns3/hns3_tm.c +@@ -4,7 +4,7 @@ + + #include + +-#include "hns3_ethdev.h" ++#include "hns3_common.h" + #include "hns3_dcb.h" + #include "hns3_logs.h" + #include "hns3_tm.h" +-- +2.33.0 + diff --git a/0266-app-testpmd-remove-unused-header-file.patch b/0266-app-testpmd-remove-unused-header-file.patch new file mode 100644 index 0000000000000000000000000000000000000000..4b9b71bd2ae8f932cb8a62d729f58674aec111b1 --- /dev/null +++ b/0266-app-testpmd-remove-unused-header-file.patch @@ -0,0 +1,252 @@ +From 11bcfb49be7f092d8d20d88dfdc5358196d3ecca Mon Sep 17 00:00:00 2001 +From: Huisong Li +Date: Mon, 25 Oct 2021 14:39:22 +0800 +Subject: [PATCH 33/33] app/testpmd: remove unused header file + +This patch removes unused "rte_eth_bond.h" header file. + +Fixes: 2950a769315e ("bond: testpmd support") +Cc: stable@dpdk.org + +Signed-off-by: Huisong Li +Signed-off-by: Min Hu (Connor) +Reviewed-by: Ferruh Yigit +--- + app/test-pmd/parameters.c | 3 - + drivers/net/hns3/hns3_common.c | 101 +++++++++++++++++---------------- + drivers/net/hns3/hns3_flow.h | 5 +- + 3 files changed, 55 insertions(+), 54 deletions(-) + +diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c +index c464c42f6..2a69df5b7 100644 +--- a/app/test-pmd/parameters.c ++++ b/app/test-pmd/parameters.c +@@ -39,9 +39,6 @@ + #include + #include + #include +-#ifdef RTE_NET_BOND +-#include +-#endif + #include + + #include "testpmd.h" +diff --git a/drivers/net/hns3/hns3_common.c b/drivers/net/hns3/hns3_common.c +index eac2aa104..0328f2beb 100644 +--- a/drivers/net/hns3/hns3_common.c ++++ b/drivers/net/hns3/hns3_common.c +@@ -4,7 +4,7 @@ + + #include + #include +-#include ++#include + #include + + #include "hns3_common.h" +@@ -60,43 +60,42 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) + info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE; + info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; + info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; +- info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | +- RTE_ETH_RX_OFFLOAD_TCP_CKSUM | +- RTE_ETH_RX_OFFLOAD_UDP_CKSUM | +- RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | +- RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | +- RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | +- RTE_ETH_RX_OFFLOAD_SCATTER | +- RTE_ETH_RX_OFFLOAD_VLAN_STRIP | +- RTE_ETH_RX_OFFLOAD_VLAN_FILTER | +- RTE_ETH_RX_OFFLOAD_RSS_HASH | +- RTE_ETH_RX_OFFLOAD_TCP_LRO); +- info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | +- RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | +- RTE_ETH_TX_OFFLOAD_TCP_CKSUM | +- RTE_ETH_TX_OFFLOAD_UDP_CKSUM | +- RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | +- RTE_ETH_TX_OFFLOAD_MULTI_SEGS | +- RTE_ETH_TX_OFFLOAD_TCP_TSO | +- RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | +- RTE_ETH_TX_OFFLOAD_VLAN_INSERT); ++ info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | ++ DEV_RX_OFFLOAD_TCP_CKSUM | ++ DEV_RX_OFFLOAD_UDP_CKSUM | ++ DEV_RX_OFFLOAD_SCTP_CKSUM | ++ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | ++ DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | ++ DEV_RX_OFFLOAD_SCATTER | ++ DEV_RX_OFFLOAD_VLAN_STRIP | ++ DEV_RX_OFFLOAD_VLAN_FILTER | ++ DEV_RX_OFFLOAD_RSS_HASH | ++ DEV_RX_OFFLOAD_TCP_LRO); ++ info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | ++ DEV_TX_OFFLOAD_IPV4_CKSUM | ++ DEV_TX_OFFLOAD_TCP_CKSUM | ++ DEV_TX_OFFLOAD_UDP_CKSUM | ++ DEV_TX_OFFLOAD_SCTP_CKSUM | ++ DEV_TX_OFFLOAD_MULTI_SEGS | ++ DEV_TX_OFFLOAD_TCP_TSO | ++ DEV_TX_OFFLOAD_VXLAN_TNL_TSO | ++ DEV_TX_OFFLOAD_GRE_TNL_TSO | ++ DEV_TX_OFFLOAD_GENEVE_TNL_TSO | ++ DEV_TX_OFFLOAD_MBUF_FAST_FREE | ++ DEV_TX_OFFLOAD_VLAN_INSERT); + + if (!hw->port_base_vlan_cfg.state) +- info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT; ++ info->tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT; + + if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) +- info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; ++ info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; + + if (hns3_dev_get_support(hw, INDEP_TXRX)) + info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; +- info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; + + if (hns3_dev_get_support(hw, PTP)) +- info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP; ++ info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; + + info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = HNS3_MAX_RING_DESC, +@@ -143,7 +142,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) + */ + if (!hns->is_vf) { + info->max_mac_addrs = HNS3_UC_MACADDR_NUM; +- info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; ++ info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC; + info->speed_capa = hns3_get_speed_capa(hw); + } else { + info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM; +@@ -641,7 +640,7 @@ int + hns3_map_rx_interrupt(struct rte_eth_dev *dev) + { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); +- struct rte_intr_handle *intr_handle = pci_dev->intr_handle; ++ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t base = RTE_INTR_VEC_ZERO_OFFSET; + uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET; +@@ -664,13 +663,16 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev) + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -EINVAL; + +- /* Allocate vector list */ +- if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", +- hw->used_rx_queues)) { +- hns3_err(hw, "failed to allocate %u rx_queues intr_vec", +- hw->used_rx_queues); +- ret = -ENOMEM; +- goto alloc_intr_vec_error; ++ if (intr_handle->intr_vec == NULL) { ++ intr_handle->intr_vec = ++ rte_zmalloc("intr_vec", ++ hw->used_rx_queues * sizeof(int), 0); ++ if (intr_handle->intr_vec == NULL) { ++ hns3_err(hw, "failed to allocate %u rx_queues intr_vec", ++ hw->used_rx_queues); ++ ret = -ENOMEM; ++ goto alloc_intr_vec_error; ++ } + } + + if (rte_intr_allow_others(intr_handle)) { +@@ -683,21 +685,20 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev) + HNS3_RING_TYPE_RX, q_id); + if (ret) + goto bind_vector_error; +- +- if (rte_intr_vec_list_index_set(intr_handle, q_id, vec)) +- goto bind_vector_error; ++ intr_handle->intr_vec[q_id] = vec; + /* + * If there are not enough efds (e.g. not enough interrupt), + * remaining queues will be bond to the last interrupt. + */ +- if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1) ++ if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + rte_intr_enable(intr_handle); + return 0; + + bind_vector_error: +- rte_intr_vec_list_free(intr_handle); ++ rte_free(intr_handle->intr_vec); ++ intr_handle->intr_vec = NULL; + alloc_intr_vec_error: + rte_intr_efd_disable(intr_handle); + return ret; +@@ -707,7 +708,7 @@ void + hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) + { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); +- struct rte_intr_handle *intr_handle = pci_dev->intr_handle; ++ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; +@@ -727,13 +728,16 @@ hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) + (void)hw->ops.bind_ring_with_vector(hw, vec, false, + HNS3_RING_TYPE_RX, + q_id); +- if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1) ++ if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + } + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); +- rte_intr_vec_list_free(intr_handle); ++ if (intr_handle->intr_vec) { ++ rte_free(intr_handle->intr_vec); ++ intr_handle->intr_vec = NULL; ++ } + } + + int +@@ -741,7 +745,7 @@ hns3_restore_rx_interrupt(struct hns3_hw *hw) + { + struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); +- struct rte_intr_handle *intr_handle = pci_dev->intr_handle; ++ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint16_t q_id; + int ret; + +@@ -751,9 +755,8 @@ hns3_restore_rx_interrupt(struct hns3_hw *hw) + if (rte_intr_dp_is_en(intr_handle)) { + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + ret = hw->ops.bind_ring_with_vector(hw, +- rte_intr_vec_list_index_get(intr_handle, +- q_id), +- true, HNS3_RING_TYPE_RX, q_id); ++ intr_handle->intr_vec[q_id], true, ++ HNS3_RING_TYPE_RX, q_id); + if (ret) + return ret; + } +diff --git a/drivers/net/hns3/hns3_flow.h b/drivers/net/hns3/hns3_flow.h +index 2eb451b72..d679e5928 100644 +--- a/drivers/net/hns3/hns3_flow.h ++++ b/drivers/net/hns3/hns3_flow.h +@@ -36,8 +36,9 @@ struct hns3_flow_mem { + TAILQ_HEAD(hns3_rss_filter_list, hns3_rss_conf_ele); + TAILQ_HEAD(hns3_flow_mem_list, hns3_flow_mem); + +-int hns3_dev_flow_ops_get(struct rte_eth_dev *dev, +- const struct rte_flow_ops **ops); ++int hns3_dev_filter_ctrl(struct rte_eth_dev *dev, ++ enum rte_filter_type filter_type, ++ enum rte_filter_op filter_op, void *arg); + void hns3_flow_init(struct rte_eth_dev *dev); + void hns3_flow_uninit(struct rte_eth_dev *dev); + +-- +2.33.0 + diff --git a/0267-usertools-add-Intel-DLB-device-binding.patch b/0267-usertools-add-Intel-DLB-device-binding.patch new file mode 100644 index 0000000000000000000000000000000000000000..72112bbfd3d21a8f42686906e52cab03229a6f75 --- /dev/null +++ b/0267-usertools-add-Intel-DLB-device-binding.patch @@ -0,0 +1,31 @@ +From dfbf3715354e41c8751972d2bcb04a8f5a6961dd Mon Sep 17 00:00:00 2001 +From: speech_white +Date: Fri, 10 Dec 2021 09:20:28 +0800 +Subject: [PATCH] usertools: add Intel DLB device binding + +Fix execution failure to add DLB to usertools/dpdk-devbind.py + +Signed-off-by: speech_white +--- + usertools/dpdk-devbind.py | 2 ++ + 1 file changed, 2 insertions(+) + mode change 100644 => 100755 usertools/dpdk-devbind.py + +diff --git a/usertools/dpdk-devbind.py b/usertools/dpdk-devbind.py +old mode 100644 +new mode 100755 +index 8af3089ae..9b063dae9 +--- a/usertools/dpdk-devbind.py ++++ b/usertools/dpdk-devbind.py +@@ -47,6 +47,8 @@ + + hisilicon_dma = {'Class': '08', 'Vendor': '19e5', 'Device': 'a122', + 'SVendor': None, 'SDevice': None} ++intel_dlb = {'Class': '0b', 'Vendor': '8086', 'Device': '270b,2710,2714', ++ 'SVendor': None, 'SDevice': None} + intel_ioat_bdw = {'Class': '08', 'Vendor': '8086', + 'Device': '6f20,6f21,6f22,6f23,6f24,6f25,6f26,6f27,6f2e,6f2f', + 'SVendor': None, 'SDevice': None} +-- +2.23.0 + diff --git a/dpdk.spec b/dpdk.spec index 7c79f4d5287ba134c68cf915abaf3b7e799be8ee..6fd7bc4387f7e35afb550e311f09a86c8aaa23dd 100644 --- a/dpdk.spec +++ b/dpdk.spec @@ -1,6 +1,6 @@ Name: dpdk Version: 20.11 -Release: 9 +Release: 10 Packager: packaging@6wind.com URL: http://dpdk.org %global source_version 20.11 @@ -221,6 +221,59 @@ Patch211: 0211-net-hns3-disable-PFC-if-not-configured.patch Patch212: 0212-net-hns3-use-the-correct-HiSilicon-copyright.patch Patch213: 0213-app-testpmd-change-port-link-speed-without-stopping-.patch Patch214: 0214-ethdev-add-dev-configured-flag.patch +Patch215: 0215-net-hns3-add-start-stop-Tx-datapath-request-for-MP.patch +Patch216: 0216-net-hns3-support-set-link-up-down-for-PF.patch +Patch217: 0217-net-hns3-fix-queue-flow-action-validation.patch +Patch218: 0218-net-hns3-fix-taskqueue-pair-reset-command.patch +Patch219: 0219-net-hns3-fix-Tx-push-capability.patch +Patch220: 0220-examples-kni-close-port-before-exit.patch +Patch221: 0221-net-hns3-fix-residual-MAC-after-setting-default-MAC.patch +Patch222: 0222-net-hns3-fix-input-parameters-of-MAC-functions.patch +Patch223: 0223-net-bonding-fix-dedicated-queue-mode-in-vector-burst.patch +Patch224: 0224-net-bonding-fix-RSS-key-length.patch +Patch225: 0225-app-testpmd-add-command-to-show-LACP-bonding-info.patch +Patch226: 0226-app-testpmd-retain-all-original-dev-conf-when-config.patch +Patch227: 0227-net-hns3-remove-similar-macro-function-definitions.patch +Patch228: 0228-net-hns3-fix-interrupt-vector-freeing.patch +Patch229: 0229-net-hns3-add-runtime-config-for-mailbox-limit-time.patch +Patch230: 0230-net-hns3-fix-mailbox-communication-with-HW.patch +Patch231: 0231-app-testpmd-support-multi-process.patch +Patch232: 0232-app-testpmd-fix-key-for-RSS-flow-rule.patch +Patch233: 0233-app-testpmd-release-flows-left-before-port-stop.patch +Patch234: 0234-app-testpmd-delete-unused-function.patch +Patch235: 0235-dmadev-introduce-DMA-device-support.patch +Patch236: 0236-net-hns3-rename-multicast-address-function.patch +Patch237: 0237-net-hns3-rename-unicast-address-function.patch +Patch238: 0238-net-hns3-rename-multicast-address-removal-function.patch +Patch239: 0239-net-hns3-extract-common-interface-to-check-duplicate.patch +Patch240: 0240-net-hns3-remove-redundant-multicast-MAC-interface.patch +Patch241: 0241-net-hns3-rename-unicast-address-removal-function.patch +Patch242: 0242-net-hns3-remove-redundant-multicast-removal-interfac.patch +Patch243: 0243-net-hns3-add-HW-ops-structure-to-operate-hardware.patch +Patch244: 0244-net-hns3-use-HW-ops-to-config-MAC-features.patch +Patch245: 0245-net-hns3-unify-MAC-and-multicast-address-configurati.patch +Patch246: 0246-net-hns3-unify-MAC-address-add-and-remove.patch +Patch247: 0247-net-hns3-unify-multicast-address-check.patch +Patch248: 0248-net-hns3-refactor-multicast-MAC-address-set-for-PF.patch +Patch249: 0249-net-hns3-unify-multicast-MAC-address-set-list.patch +Patch250: 0250-bonding-show-Tx-policy-for-802.3AD-mode.patch +Patch251: 0251-net-hns3-fix-secondary-process-reference-count.patch +Patch252: 0252-net-hns3-fix-multi-process-action-register-and-unreg.patch +Patch253: 0253-net-hns3-unregister-MP-action-on-close-for-secondary.patch +Patch254: 0254-net-hns3-refactor-multi-process-initialization.patch +Patch255: 0255-usertools-devbind-add-Kunpeng-DMA.patch +Patch256: 0256-kni-check-error-code-of-allmulticast-mode-switch.patch +Patch257: 0257-net-hns3-simplify-queue-DMA-address-arithmetic.patch +Patch258: 0258-net-hns3-remove-redundant-function-declaration.patch +Patch259: 0259-net-hns3-modify-an-indent-alignment.patch +Patch260: 0260-net-hns3-use-unsigned-integer-for-bitwise-operations.patch +Patch261: 0261-net-hns3-extract-common-code-to-its-own-file.patch +Patch262: 0262-net-hns3-move-declarations-in-flow-header-file.patch +Patch263: 0263-net-hns3-remove-magic-numbers.patch +Patch264: 0264-net-hns3-mark-unchecked-return-of-snprintf.patch +Patch265: 0265-net-hns3-remove-PF-VF-duplicate-code.patch +Patch266: 0266-app-testpmd-remove-unused-header-file.patch +Patch267: 0267-usertools-add-Intel-DLB-device-binding.patch Summary: Data Plane Development Kit core Group: System Environment/Libraries @@ -239,7 +292,8 @@ BuildRequires: meson ninja-build gcc BuildRequires: kernel-devel numactl-devel BuildRequires: libpcap libpcap-devel BuildRequires: uname-build-checks -BuildRequires: doxygen python3-sphinx chrpath +BuildRequires: chrpath +BuildRequires: groff-base %define kern_devel_ver %(uname -r) @@ -275,7 +329,7 @@ This package contains the pdump tool for capture the dpdk network packets. %build export CFLAGS="%{optflags}" -meson %{target} -Ddisable_drivers=*/octeontx2 -Ddisable_drivers=*/fpga* -Ddisable_drivers=*/ifpga* -Denable_kmods=true -Denable_docs=true +meson %{target} -Ddisable_drivers=*/octeontx2 -Ddisable_drivers=*/fpga* -Ddisable_drivers=*/ifpga* -Denable_kmods=true ninja -C %{target} %install @@ -345,7 +399,6 @@ strip -g $RPM_BUILD_ROOT/lib/modules/${namer}/extra/dpdk/rte_kni.ko /usr/share/dpdk/%{target}/lib/* %files doc -/usr/local/share/doc/* %files tools /usr/bin/dpdk-pdump @@ -359,6 +412,8 @@ strip -g $RPM_BUILD_ROOT/lib/modules/${namer}/extra/dpdk/rte_kni.ko /usr/sbin/depmod %changelog +* Sat Dec 17 2021 Min Hu - 20.11-10 +- sync patches ranges from versoin 9 t0 17 from master branch * Mon Sep 13 2021 chenchen - 20.11-9 - del rpath from some binaries and bin - add debug package to strip