From 09be8198b942238bc0dacf467676b014e5bd44db Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E6=9D=8E=E5=AF=8C=E8=89=B3?=
Date: Mon, 24 Mar 2025 11:42:22 +0800
Subject: [PATCH] fix some libzrdma bugs and add some optimization
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 李富艳
---
...-libzrdma-Fix-wqe-polarity-set-error.patch | 245 +++++++
...ma-Add-interface-aligned-with-kernel.patch | 304 ++++++++
...-Add-poll-cqe-error-to-Failed-status.patch | 241 +++++++
...sq-rq-flush-cqe-and-log-optimization.patch | 625 ++++++++++++++++
...libzrdma-Fix-capability-related-bugs.patch | 676 ++++++++++++++++++
rdma-core.spec | 13 +-
6 files changed, 2103 insertions(+), 1 deletion(-)
create mode 100644 0047-libzrdma-Fix-wqe-polarity-set-error.patch
create mode 100644 0048-libzrdma-Add-interface-aligned-with-kernel.patch
create mode 100644 0049-libzrdma-Add-poll-cqe-error-to-Failed-status.patch
create mode 100644 0050-libzrdma-Add-sq-rq-flush-cqe-and-log-optimization.patch
create mode 100644 0051-libzrdma-Fix-capability-related-bugs.patch
diff --git a/0047-libzrdma-Fix-wqe-polarity-set-error.patch b/0047-libzrdma-Fix-wqe-polarity-set-error.patch
new file mode 100644
index 0000000..19439ff
--- /dev/null
+++ b/0047-libzrdma-Fix-wqe-polarity-set-error.patch
@@ -0,0 +1,245 @@
+From 232ae986da9c9995be0ae39bdf5b2145250c22c9 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E6=9D=8E=E5=AF=8C=E8=89=B3?=
+Date: Fri, 28 Mar 2025 11:04:33 +0800
+Subject: [PATCH] Fix: wqe polarity set error
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: 李富艳
+---
+ providers/zrdma/zxdh_defs.h | 3 --
+ providers/zrdma/zxdh_hw.c | 62 +++++++-----------------------------
+ providers/zrdma/zxdh_verbs.h | 1 -
+ 3 files changed, 11 insertions(+), 55 deletions(-)
+
+diff --git a/providers/zrdma/zxdh_defs.h b/providers/zrdma/zxdh_defs.h
+index eaf73ca..3863fb9 100644
+--- a/providers/zrdma/zxdh_defs.h
++++ b/providers/zrdma/zxdh_defs.h
+@@ -313,9 +313,6 @@
+ #define ZXDH_RING_FREE_QUANTA(_ring) \
+ (((_ring).size - ZXDH_RING_USED_QUANTA(_ring) - 1))
+
+-#define ZXDH_SQ_RING_FREE_QUANTA(_ring) \
+- (((_ring).size - ZXDH_RING_USED_QUANTA(_ring) - 257))
+-
+ #define ZXDH_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \
+ { \
+ index = ZXDH_RING_CURRENT_HEAD(_ring); \
+diff --git a/providers/zrdma/zxdh_hw.c b/providers/zrdma/zxdh_hw.c
+index ed577a9..073b198 100644
+--- a/providers/zrdma/zxdh_hw.c
++++ b/providers/zrdma/zxdh_hw.c
+@@ -20,13 +20,14 @@
+ #include
+ #define ERROR_CODE_VALUE 65
+
+-static void qp_tx_psn_add(__u32 *x, __u32 y, __u16 mtu)
++static inline void qp_tx_psn_add(__u32 *x, __u32 y, __u16 mtu)
+ {
+ if (y == 0) {
+ *x = (*x + 1) & 0xffffff;
+ return;
+ }
+- *x = (*x + ((y % mtu) ? (y / mtu + 1) : y / mtu)) & 0xffffff;
++ __u32 chunks = (y + mtu - 1) / mtu;
++ *x = (*x + chunks) & 0xffffff;
+ }
+
+ int zxdh_get_write_imm_split_switch(void)
+@@ -95,26 +96,6 @@ static enum zxdh_status_code zxdh_nop_1(struct zxdh_qp *qp)
+ return 0;
+ }
+
+-/**
+- * zxdh_clr_wqes - clear next 128 sq entries
+- * @qp: hw qp ptr
+- * @qp_wqe_idx: wqe_idx
+- */
+-void zxdh_clr_wqes(struct zxdh_qp *qp, __u32 qp_wqe_idx)
+-{
+- __le64 *wqe;
+- __u32 wqe_idx;
+-
+- if (!(qp_wqe_idx & 0x7F)) {
+- wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
+- wqe = qp->sq_base[wqe_idx].elem;
+- if (wqe_idx)
+- memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
+- else
+- memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
+- }
+-}
+-
+ /**
+ * zxdh_qp_post_wr - ring doorbell
+ * @qp: hw qp ptr
+@@ -197,14 +178,13 @@ __le64 *zxdh_qp_get_next_send_wqe(struct zxdh_qp *qp, __u32 *wqe_idx,
+ avail_quanta = ZXDH_MAX_SQ_WQES_PER_PAGE -
+ (ZXDH_RING_CURRENT_HEAD(qp->sq_ring) %
+ ZXDH_MAX_SQ_WQES_PER_PAGE);
+- if (quanta <= avail_quanta) {
++ if (likely(quanta <= avail_quanta)) {
+ /* WR fits in current chunk */
+- if (quanta > ZXDH_SQ_RING_FREE_QUANTA(qp->sq_ring))
++ if (unlikely(quanta > ZXDH_RING_FREE_QUANTA(qp->sq_ring)))
+ return NULL;
+ } else {
+ /* Need to pad with NOP */
+- if (quanta + avail_quanta >
+- ZXDH_SQ_RING_FREE_QUANTA(qp->sq_ring))
++ if (quanta + avail_quanta > ZXDH_RING_FREE_QUANTA(qp->sq_ring))
+ return NULL;
+
+ for (i = 0; i < avail_quanta; i++) {
+@@ -287,8 +267,6 @@ zxdh_post_rdma_write(struct zxdh_qp *qp, struct zxdh_post_sq_info *info,
+ if (!wqe)
+ return ZXDH_ERR_QP_TOOMANY_WRS_POSTED;
+
+- zxdh_clr_wqes(qp, wqe_idx);
+-
+ if (op_info->num_lo_sges) {
+ set_64bit_val(
+ wqe, 16,
+@@ -635,8 +613,6 @@ static enum zxdh_status_code zxdh_post_rdma_read(struct zxdh_qp *qp,
+ if (!wqe)
+ return ZXDH_ERR_QP_TOOMANY_WRS_POSTED;
+
+- zxdh_clr_wqes(qp, wqe_idx);
+-
+ addl_frag_cnt =
+ op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : 0;
+ local_fence |= info->local_fence;
+@@ -817,8 +793,6 @@ enum zxdh_status_code zxdh_rc_send(struct zxdh_qp *qp,
+ if (!wqe)
+ return ZXDH_ERR_QP_TOOMANY_WRS_POSTED;
+
+- zxdh_clr_wqes(qp, wqe_idx);
+-
+ read_fence |= info->read_fence;
+ addl_frag_cnt = op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0;
+ if (op_info->num_sges) {
+@@ -975,7 +949,7 @@ enum zxdh_status_code zxdh_ud_send(struct zxdh_qp *qp,
+ if (ret_code)
+ return ret_code;
+
+- if (quanta > ZXDH_SQ_RING_FREE_QUANTA(qp->sq_ring))
++ if (quanta > ZXDH_RING_FREE_QUANTA(qp->sq_ring))
+ return ZXDH_ERR_QP_TOOMANY_WRS_POSTED;
+
+ wqe_idx = ZXDH_RING_CURRENT_HEAD(qp->sq_ring);
+@@ -989,8 +963,6 @@ enum zxdh_status_code zxdh_ud_send(struct zxdh_qp *qp,
+ qp->sq_wrtrk_array[wqe_idx].wr_len = total_size;
+ qp->sq_wrtrk_array[wqe_idx].quanta = quanta;
+
+- zxdh_clr_wqes(qp, wqe_idx);
+-
+ read_fence |= info->read_fence;
+ addl_frag_cnt = op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0;
+ hdr = FIELD_PREP(ZXDHQPSQ_VALID, qp->swqe_polarity) |
+@@ -1281,8 +1253,6 @@ enum zxdh_status_code zxdh_inline_rdma_write(struct zxdh_qp *qp,
+ if (!wqe)
+ return ZXDH_ERR_QP_TOOMANY_WRS_POSTED;
+
+- zxdh_clr_wqes(qp, wqe_idx);
+-
+ read_fence |= info->read_fence;
+ hdr = FIELD_PREP(ZXDHQPSQ_VALID, qp->swqe_polarity) |
+ FIELD_PREP(ZXDHQPSQ_OPCODE, info->op_type) |
+@@ -1293,7 +1263,7 @@ enum zxdh_status_code zxdh_inline_rdma_write(struct zxdh_qp *qp,
+ FIELD_PREP(ZXDHQPSQ_IMMDATAFLAG, imm_data_flag) |
+ FIELD_PREP(ZXDHQPSQ_WRITE_INLINEDATAFLAG, 1) |
+ FIELD_PREP(ZXDHQPSQ_WRITE_INLINEDATALEN, op_info->len) |
+- FIELD_PREP(ZXDHQPSQ_ADDFRAGCNT, quanta - 1) |
++ FIELD_PREP(ZXDHQPSQ_ADDFRAGCNT, (__u16)(quanta - 1)) |
+ FIELD_PREP(ZXDHQPSQ_REMSTAG, op_info->rem_addr.stag);
+ set_64bit_val(wqe, 24,
+ FIELD_PREP(ZXDHQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+@@ -1355,8 +1325,6 @@ enum zxdh_status_code zxdh_rc_inline_send(struct zxdh_qp *qp,
+ if (!wqe)
+ return ZXDH_ERR_QP_TOOMANY_WRS_POSTED;
+
+- zxdh_clr_wqes(qp, wqe_idx);
+-
+ read_fence |= info->read_fence;
+ hdr = FIELD_PREP(ZXDHQPSQ_VALID, qp->swqe_polarity) |
+ FIELD_PREP(ZXDHQPSQ_OPCODE, info->op_type) |
+@@ -1364,7 +1332,7 @@ enum zxdh_status_code zxdh_rc_inline_send(struct zxdh_qp *qp,
+ FIELD_PREP(ZXDHQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(ZXDHQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(ZXDHQPSQ_SOLICITED, info->solicited) |
+- FIELD_PREP(ZXDHQPSQ_ADDFRAGCNT, quanta - 1) |
++ FIELD_PREP(ZXDHQPSQ_ADDFRAGCNT, (__u16)(quanta - 1)) |
+ FIELD_PREP(ZXDHQPSQ_IMMDATAFLAG, imm_data_flag) |
+ FIELD_PREP(ZXDHQPSQ_REMSTAG, info->stag_to_inv);
+ set_64bit_val(wqe, 24,
+@@ -1430,7 +1398,7 @@ enum zxdh_status_code zxdh_ud_inline_send(struct zxdh_qp *qp,
+
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len,
+ imm_data_flag);
+- if (quanta > ZXDH_SQ_RING_FREE_QUANTA(qp->sq_ring))
++ if (quanta > ZXDH_RING_FREE_QUANTA(qp->sq_ring))
+ return ZXDH_ERR_QP_TOOMANY_WRS_POSTED;
+
+ wqe_idx = ZXDH_RING_CURRENT_HEAD(qp->sq_ring);
+@@ -1444,8 +1412,6 @@ enum zxdh_status_code zxdh_ud_inline_send(struct zxdh_qp *qp,
+ qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->len;
+ qp->sq_wrtrk_array[wqe_idx].quanta = quanta;
+
+- zxdh_clr_wqes(qp, wqe_idx);
+-
+ read_fence |= info->read_fence;
+ hdr = FIELD_PREP(ZXDHQPSQ_VALID, qp->swqe_polarity) |
+ FIELD_PREP(ZXDHQPSQ_OPCODE, info->op_type) |
+@@ -1454,7 +1420,7 @@ enum zxdh_status_code zxdh_ud_inline_send(struct zxdh_qp *qp,
+ FIELD_PREP(ZXDHQPSQ_IMMDATAFLAG, imm_data_flag) |
+ FIELD_PREP(ZXDHQPSQ_UD_INLINEDATAFLAG, 1) |
+ FIELD_PREP(ZXDHQPSQ_UD_INLINEDATALEN, op_info->len) |
+- FIELD_PREP(ZXDHQPSQ_UD_ADDFRAGCNT, quanta - 1) |
++ FIELD_PREP(ZXDHQPSQ_UD_ADDFRAGCNT, (__u16)(quanta - 1)) |
+ FIELD_PREP(ZXDHQPSQ_AHID, op_info->ah_id);
+ set_64bit_val(wqe_base, 24,
+ FIELD_PREP(ZXDHQPSQ_DESTQPN, op_info->dest_qp) |
+@@ -1572,8 +1538,6 @@ enum zxdh_status_code zxdh_stag_local_invalidate(struct zxdh_qp *qp,
+ if (!wqe)
+ return ZXDH_ERR_QP_TOOMANY_WRS_POSTED;
+
+- zxdh_clr_wqes(qp, wqe_idx);
+-
+ set_64bit_val(wqe, 16, 0);
+
+ hdr = FIELD_PREP(ZXDHQPSQ_VALID, qp->swqe_polarity) |
+@@ -1618,8 +1582,6 @@ enum zxdh_status_code zxdh_mw_bind(struct zxdh_qp *qp,
+ if (!wqe)
+ return ZXDH_ERR_QP_TOOMANY_WRS_POSTED;
+
+- zxdh_clr_wqes(qp, wqe_idx);
+-
+ if (op_info->ena_writes) {
+ access = (op_info->ena_reads << 2) |
+ (op_info->ena_writes << 3) | (1 << 1) | access;
+@@ -2391,8 +2353,6 @@ enum zxdh_status_code zxdh_nop(struct zxdh_qp *qp, __u64 wr_id, bool signaled,
+ if (!wqe)
+ return ZXDH_ERR_QP_TOOMANY_WRS_POSTED;
+
+- zxdh_clr_wqes(qp, wqe_idx);
+-
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+diff --git a/providers/zrdma/zxdh_verbs.h b/providers/zrdma/zxdh_verbs.h
+index 69a98cc..40aa7bb 100644
+--- a/providers/zrdma/zxdh_verbs.h
++++ b/providers/zrdma/zxdh_verbs.h
+@@ -596,7 +596,6 @@ int zxdh_qp_round_up(__u32 wqdepth);
+ int zxdh_cq_round_up(__u32 wqdepth);
+ void zxdh_qp_push_wqe(struct zxdh_qp *qp, __le64 *wqe, __u16 quanta,
+ __u32 wqe_idx, bool post_sq);
+-void zxdh_clr_wqes(struct zxdh_qp *qp, __u32 qp_wqe_idx);
+
+ void zxdh_get_srq_wqe_shift(struct zxdh_dev_attrs *dev_attrs, __u32 sge,
+ __u8 *shift);
+--
+2.27.0
+
diff --git a/0048-libzrdma-Add-interface-aligned-with-kernel.patch b/0048-libzrdma-Add-interface-aligned-with-kernel.patch
new file mode 100644
index 0000000..ffe84a9
--- /dev/null
+++ b/0048-libzrdma-Add-interface-aligned-with-kernel.patch
@@ -0,0 +1,304 @@
+From da8370c2360deb73af7a211bec2be76b025cb5d3 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E6=9D=8E=E5=AF=8C=E8=89=B3?=
+Date: Fri, 28 Mar 2025 11:36:50 +0800
+Subject: [PATCH] libzrdma:Add interface aligned with kernel
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: 李富艳
+---
+ kernel-headers/rdma/zxdh-abi.h | 23 ++++++++-------
+ providers/zrdma/main.c | 34 +++++++++++++----------
+ providers/zrdma/zxdh_defs.h | 8 +++---
+ providers/zrdma/zxdh_devids.h | 9 ++++++
+ providers/zrdma/zxdh_hw.c | 9 ++++--
+ providers/zrdma/zxdh_verbs.h | 51 ++++++----------------------------
+ 6 files changed, 60 insertions(+), 74 deletions(-)
+
+diff --git a/kernel-headers/rdma/zxdh-abi.h b/kernel-headers/rdma/zxdh-abi.h
+index 665f874..59c0160 100644
+--- a/kernel-headers/rdma/zxdh-abi.h
++++ b/kernel-headers/rdma/zxdh-abi.h
+@@ -6,10 +6,9 @@
+
+ #include
+
+-/* zxdh must support legacy GEN_1 i40iw kernel
+- * and user-space whose last ABI ver is 5
+- */
++/* user-space whose last ABI ver is 5 */
+ #define ZXDH_ABI_VER 5
++#define ZXDH_CONTEXT_VER_V1 5
+
+ enum zxdh_memreg_type {
+ ZXDH_MEMREG_TYPE_MEM = 0,
+@@ -35,7 +34,7 @@ struct zxdh_alloc_ucontext_resp {
+ __u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */
+ __u8 kernel_ver;
+ __u8 db_addr_type;
+- __u8 rsvd[2];
++ __u16 rdma_tool_flags;
+ __aligned_u64 feature_flags;
+ __aligned_u64 sq_db_mmap_key;
+ __aligned_u64 cq_db_mmap_key;
+@@ -51,8 +50,8 @@ struct zxdh_alloc_ucontext_resp {
+ __u32 min_hw_cq_size;
+ __u32 max_hw_cq_size;
+ __u16 max_hw_sq_chunk;
+- __u8 hw_rev;
+- __u8 rsvd2;
++ __u8 rsvd;
++ __u8 chip_rev;
+ };
+
+ struct zxdh_alloc_pd_resp {
+@@ -82,13 +81,13 @@ struct zxdh_create_srq_req {
+ };
+
+ struct zxdh_mem_reg_req {
+- __u16 reg_type; /* enum zxdh_memreg_type */
+- __u16 cq_pages;
+- __u16 rq_pages;
+- __u16 sq_pages;
+- __u16 srq_pages;
++ __u32 reg_type; /* enum zxdh_memreg_type */
++ __u32 cq_pages;
++ __u32 rq_pages;
++ __u32 sq_pages;
++ __u32 srq_pages;
+ __u16 srq_list_pages;
+- __u8 rsvd[4];
++ __u8 rsvd[2];
+ };
+
+ struct zxdh_reg_mr_resp {
+diff --git a/providers/zrdma/main.c b/providers/zrdma/main.c
+index e25a1a2..4626a21 100644
+--- a/providers/zrdma/main.c
++++ b/providers/zrdma/main.c
+@@ -22,6 +22,12 @@ static const struct verbs_match_ent hca_table[] = {
+ ZXDH_HCA(PCI_VENDOR_ID_ZXDH_EVB, ZXDH_DEV_ID_ADAPTIVE_EVB_VF),
+ ZXDH_HCA(PCI_VENDOR_ID_ZXDH_E312, ZXDH_DEV_ID_ADAPTIVE_E312_PF),
+ ZXDH_HCA(PCI_VENDOR_ID_ZXDH_E312, ZXDH_DEV_ID_ADAPTIVE_E312_VF),
++ ZXDH_HCA(PCI_VENDOR_ID_ZXDH_E310, ZXDH_DEV_ID_ADAPTIVE_E310_PF),
++ ZXDH_HCA(PCI_VENDOR_ID_ZXDH_E310, ZXDH_DEV_ID_ADAPTIVE_E310_VF),
++ ZXDH_HCA(PCI_VENDOR_ID_ZXDH_E310_RDMA, ZXDH_DEV_ID_ADAPTIVE_E310_RDMA_PF),
++ ZXDH_HCA(PCI_VENDOR_ID_ZXDH_E310_RDMA, ZXDH_DEV_ID_ADAPTIVE_E310_RDMA_VF),
++ ZXDH_HCA(PCI_VENDOR_ID_ZXDH_E316, ZXDH_DEV_ID_ADAPTIVE_E316_PF),
++ ZXDH_HCA(PCI_VENDOR_ID_ZXDH_E316, ZXDH_DEV_ID_ADAPTIVE_E316_VF),
+ ZXDH_HCA(PCI_VENDOR_ID_ZXDH_X512, ZXDH_DEV_ID_ADAPTIVE_X512_PF),
+ ZXDH_HCA(PCI_VENDOR_ID_ZXDH_X512, ZXDH_DEV_ID_ADAPTIVE_X512_VF),
+ {}
+@@ -100,7 +106,6 @@ static struct verbs_context *zxdh_ualloc_context(struct ibv_device *ibdev,
+ struct zxdh_get_context cmd;
+ struct zxdh_get_context_resp resp = {};
+ __u64 sq_db_mmap_key, cq_db_mmap_key;
+- __u8 user_ver = ZXDH_ABI_VER;
+
+ iwvctx = verbs_init_and_alloc_context(ibdev, cmd_fd, iwvctx, ibv_ctx,
+ RDMA_DRIVER_ZXDH);
+@@ -109,22 +114,16 @@ static struct verbs_context *zxdh_ualloc_context(struct ibv_device *ibdev,
+
+ zxdh_set_debug_mask();
+ iwvctx->zxdh_write_imm_split_switch = zxdh_get_write_imm_split_switch();
+- cmd.userspace_ver = user_ver;
++
++ cmd.userspace_ver = ZXDH_CONTEXT_VER_V1;
+ if (ibv_cmd_get_context(&iwvctx->ibv_ctx,
+ (struct ibv_get_context *)&cmd, sizeof(cmd),
+- &resp.ibv_resp, sizeof(resp))) {
+- cmd.userspace_ver = 4;
+- if (ibv_cmd_get_context(
+- &iwvctx->ibv_ctx, (struct ibv_get_context *)&cmd,
+- sizeof(cmd), &resp.ibv_resp, sizeof(resp)))
+- goto err_free;
+- user_ver = cmd.userspace_ver;
+- }
++ &resp.ibv_resp, sizeof(resp)))
++ goto err_free;
+
+ verbs_set_ops(&iwvctx->ibv_ctx, &zxdh_uctx_ops);
+
+ iwvctx->dev_attrs.feature_flags = resp.feature_flags;
+- iwvctx->dev_attrs.hw_rev = resp.hw_rev;
+ iwvctx->dev_attrs.max_hw_wq_frags = resp.max_hw_wq_frags;
+ iwvctx->dev_attrs.max_hw_read_sges = resp.max_hw_read_sges;
+ iwvctx->dev_attrs.max_hw_inline = resp.max_hw_inline;
+@@ -135,11 +134,20 @@ static struct verbs_context *zxdh_ualloc_context(struct ibv_device *ibdev,
+ iwvctx->dev_attrs.max_hw_sq_chunk = resp.max_hw_sq_chunk;
+ iwvctx->dev_attrs.max_hw_cq_size = resp.max_hw_cq_size;
+ iwvctx->dev_attrs.min_hw_cq_size = resp.min_hw_cq_size;
+- iwvctx->abi_ver = user_ver;
++ iwvctx->abi_ver = ZXDH_ABI_VER;
++ iwvctx->dev_attrs.chip_rev = resp.chip_rev;
++ iwvctx->dev_attrs.rdma_tool_flags = resp.rdma_tool_flags;
+
+ sq_db_mmap_key = resp.sq_db_mmap_key;
+ cq_db_mmap_key = resp.cq_db_mmap_key;
+
++ iwvctx->dev_attrs.db_addr_type = resp.db_addr_type;
++ iwvctx->dev_attrs.sq_db_pa = resp.sq_db_pa;
++ iwvctx->dev_attrs.cq_db_pa = resp.cq_db_pa;
++
++ if (iwvctx->dev_attrs.db_addr_type != ZXDH_DB_ADDR_BAR)
++ goto err_free;
++
+ iwvctx->sq_db = zxdh_mmap(cmd_fd, sq_db_mmap_key);
+ if (iwvctx->sq_db == MAP_FAILED)
+ goto err_free;
+@@ -160,10 +168,8 @@ static struct verbs_context *zxdh_ualloc_context(struct ibv_device *ibdev,
+ iwvctx->iwupd = container_of(ibv_pd, struct zxdh_upd, ibv_pd);
+ add_private_ops(iwvctx);
+ return &iwvctx->ibv_ctx;
+-
+ err_free:
+ free(iwvctx);
+-
+ return NULL;
+ }
+
+diff --git a/providers/zrdma/zxdh_defs.h b/providers/zrdma/zxdh_defs.h
+index 3863fb9..8772e7b 100644
+--- a/providers/zrdma/zxdh_defs.h
++++ b/providers/zrdma/zxdh_defs.h
+@@ -389,8 +389,8 @@ static inline void db_wr32(__u32 val, __u32 *wqe_word)
+ *wqe_word = val;
+ }
+
+-#define read_wqe_need_split(pre_cal_psn, next_psn) \
+- (((pre_cal_psn < next_psn) && (pre_cal_psn != 0)) || \
+- ((next_psn <= 0x7FFFFF) && (pre_cal_psn > 0x800000)))
+-
++#define read_wqe_need_split(pre_cal_psn, next_psn, chip_rev) \
++ (!(chip_rev == 2) && \
++ (((pre_cal_psn < next_psn) && (pre_cal_psn != 0)) || \
++ ((next_psn <= 0x7FFFFF) && (pre_cal_psn > 0x800000))))
+ #endif /* ZXDH_DEFS_H */
+diff --git a/providers/zrdma/zxdh_devids.h b/providers/zrdma/zxdh_devids.h
+index ac23124..3430f5f 100644
+--- a/providers/zrdma/zxdh_devids.h
++++ b/providers/zrdma/zxdh_devids.h
+@@ -6,12 +6,21 @@
+ /* ZXDH VENDOR ID */
+ #define PCI_VENDOR_ID_ZXDH_EVB 0x16c3
+ #define PCI_VENDOR_ID_ZXDH_E312 0x1cf2
++#define PCI_VENDOR_ID_ZXDH_E310 0x1cf2
++#define PCI_VENDOR_ID_ZXDH_E310_RDMA 0x1cf2
++#define PCI_VENDOR_ID_ZXDH_E316 0x1cf2
+ #define PCI_VENDOR_ID_ZXDH_X512 0x1cf2
+ /* ZXDH Devices ID */
+ #define ZXDH_DEV_ID_ADAPTIVE_EVB_PF 0x8040 /* ZXDH EVB PF DEVICE ID*/
+ #define ZXDH_DEV_ID_ADAPTIVE_EVB_VF 0x8041 /* ZXDH EVB VF DEVICE ID*/
+ #define ZXDH_DEV_ID_ADAPTIVE_E312_PF 0x8049 /* ZXDH E312 PF DEVICE ID*/
+ #define ZXDH_DEV_ID_ADAPTIVE_E312_VF 0x8060 /* ZXDH E312 VF DEVICE ID*/
++#define ZXDH_DEV_ID_ADAPTIVE_E310_PF 0x8061 /* ZXDH E310 PF DEVICE ID*/
++#define ZXDH_DEV_ID_ADAPTIVE_E310_VF 0x8062 /* ZXDH E310 VF DEVICE ID*/
++#define ZXDH_DEV_ID_ADAPTIVE_E310_RDMA_PF 0x8084 /* ZXDH E310_RDMA PF DEVICE ID*/
++#define ZXDH_DEV_ID_ADAPTIVE_E310_RDMA_VF 0x8085 /* ZXDH E310_RDMA VF DEVICE ID*/
++#define ZXDH_DEV_ID_ADAPTIVE_E316_PF 0x807e /* ZXDH E316 PF DEVICE ID*/
++#define ZXDH_DEV_ID_ADAPTIVE_E316_VF 0x807f /* ZXDH E316 VF DEVICE ID*/
+ #define ZXDH_DEV_ID_ADAPTIVE_X512_PF 0x806B /* ZXDH X512 PF DEVICE ID*/
+ #define ZXDH_DEV_ID_ADAPTIVE_X512_VF 0x806C /* ZXDH X512 VF DEVICE ID*/
+ #endif /* ZXDH_DEVIDS_H */
+diff --git a/providers/zrdma/zxdh_hw.c b/providers/zrdma/zxdh_hw.c
+index 073b198..99489dc 100644
+--- a/providers/zrdma/zxdh_hw.c
++++ b/providers/zrdma/zxdh_hw.c
+@@ -703,8 +703,12 @@ enum zxdh_status_code zxdh_rdma_read(struct zxdh_qp *qp,
+ struct zxdh_post_sq_info split_part2_info = { 0 };
+ struct zxdh_rdma_read *op_info;
+ enum zxdh_status_code ret_code;
++ struct zxdh_uqp *iwuqp;
++ struct zxdh_uvcontext *iwvctx;
+ __u32 i, total_size = 0, pre_cal_psn = 0;
+-
++ iwuqp = container_of(qp, struct zxdh_uqp, qp);
++ iwvctx = container_of(iwuqp->vqp.qp.context, struct zxdh_uvcontext,
++ ibv_ctx.context);
+ op_info = &info->op.rdma_read;
+ if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
+ return ZXDH_ERR_INVALID_FRAG_COUNT;
+@@ -720,7 +724,8 @@ enum zxdh_status_code zxdh_rdma_read(struct zxdh_qp *qp,
+ op_info->rem_addr.len = total_size;
+ pre_cal_psn = qp->next_psn;
+ qp_tx_psn_add(&pre_cal_psn, total_size, qp->mtu);
+- if (read_wqe_need_split(pre_cal_psn, qp->next_psn)) {
++ if (read_wqe_need_split(pre_cal_psn, qp->next_psn,
++ iwvctx->dev_attrs.chip_rev)) {
+ split_two_part_info(qp, info, qp->next_psn, pre_cal_psn,
+ &split_part1_info, &split_part2_info);
+ ret_code = zxdh_post_rdma_read(qp, &split_part1_info, post_sq,
+diff --git a/providers/zrdma/zxdh_verbs.h b/providers/zrdma/zxdh_verbs.h
+index 40aa7bb..1a26cf4 100644
+--- a/providers/zrdma/zxdh_verbs.h
++++ b/providers/zrdma/zxdh_verbs.h
+@@ -71,47 +71,13 @@
+
+ #define ZXDH_WQEALLOC_WQE_DESC_INDEX GENMASK(31, 20)
+
+-enum zxdh_device_caps_const {
+- ZXDH_WQE_SIZE = 4,
+- ZXDH_SRQE_SIZE = 2,
+- ZXDH_CQP_WQE_SIZE = 8,
+- ZXDH_CQE_SIZE = 8,
+- ZXDH_EXTENDED_CQE_SIZE = 8,
+- ZXDH_AEQE_SIZE = 2,
+- ZXDH_CEQE_SIZE = 1,
+- ZXDH_CQP_CTX_SIZE = 8,
+- ZXDH_SHADOW_AREA_SIZE = 8,
+- ZXDH_GATHER_STATS_BUF_SIZE = 1024,
+- ZXDH_MIN_IW_QP_ID = 0,
+- ZXDH_QUERY_FPM_BUF_SIZE = 176,
+- ZXDH_COMMIT_FPM_BUF_SIZE = 176,
+- ZXDH_MAX_IW_QP_ID = 262143,
+- ZXDH_MIN_CEQID = 0,
+- ZXDH_MAX_CEQID = 1023,
+- ZXDH_CEQ_MAX_COUNT = ZXDH_MAX_CEQID + 1,
+- ZXDH_MIN_CQID = 0,
+- ZXDH_MAX_CQID = 524287,
+- ZXDH_MIN_AEQ_ENTRIES = 1,
+- ZXDH_MAX_AEQ_ENTRIES = 524287,
+- ZXDH_MIN_CEQ_ENTRIES = 1,
+- ZXDH_MAX_CEQ_ENTRIES = 262143,
+- ZXDH_MIN_CQ_SIZE = 1,
+- ZXDH_MAX_CQ_SIZE = 1048575,
+- ZXDH_DB_ID_ZERO = 0,
+- ZXDH_MAX_WQ_FRAGMENT_COUNT = 13,
+- ZXDH_MAX_SGE_RD = 13,
+- ZXDH_MAX_OUTBOUND_MSG_SIZE = 2147483647,
+- ZXDH_MAX_INBOUND_MSG_SIZE = 2147483647,
+- ZXDH_MAX_PUSH_PAGE_COUNT = 1024,
+- ZXDH_MAX_PE_ENA_VF_COUNT = 32,
+- ZXDH_MAX_VF_FPM_ID = 47,
+- ZXDH_MAX_SQ_PAYLOAD_SIZE = 2147483648,
+- ZXDH_MAX_INLINE_DATA_SIZE = 217,
+- ZXDH_MAX_WQ_ENTRIES = 32768,
+- ZXDH_Q2_BUF_SIZE = 256,
+- ZXDH_QP_CTX_SIZE = 256,
+- ZXDH_MAX_PDS = 262144,
+-};
++#define ZXDH_SRQE_SIZE 2
++#define ZXDH_CQE_SIZE 8
++#define ZXDH_EXTENDED_CQE_SIZE 8
++#define ZXDH_MAX_INLINE_DATA_SIZE 217
++#define ZXDH_MAX_SQ_PAYLOAD_SIZE 2147483648
++#define ZXDH_MIN_CQ_SIZE 1
++#define ZXDH_MAX_CQ_SIZE 2097152
+
+ enum zxdh_addressing_type {
+ ZXDH_ADDR_TYPE_ZERO_BASED = 0,
+@@ -394,8 +360,9 @@ struct zxdh_dev_attrs {
+ __u32 max_hw_cq_size;
+ __u16 max_hw_sq_chunk;
+ __u32 max_hw_srq_wr;
+- __u8 hw_rev;
+ __u8 db_addr_type;
++ __u8 chip_rev;
++ __u16 rdma_tool_flags;
+ };
+
+ struct zxdh_hw_attrs {
+--
+2.27.0
+
diff --git a/0049-libzrdma-Add-poll-cqe-error-to-Failed-status.patch b/0049-libzrdma-Add-poll-cqe-error-to-Failed-status.patch
new file mode 100644
index 0000000..1d60e34
--- /dev/null
+++ b/0049-libzrdma-Add-poll-cqe-error-to-Failed-status.patch
@@ -0,0 +1,241 @@
+From 3a0b295e98d9557c65274424dc23b74de4aef8d2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E6=9D=8E=E5=AF=8C=E8=89=B3?=
+Date: Fri, 28 Mar 2025 15:13:32 +0800
+Subject: [PATCH] libzrdma:Add poll cqe error to Failed status
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: 李富艳
+---
+ providers/zrdma/zxdh_verbs.c | 68 +++++++++++++--------
+ providers/zrdma/zxdh_verbs.h | 113 +++++++++++++++++++++++++++++++++++
+ 2 files changed, 156 insertions(+), 25 deletions(-)
+
+diff --git a/providers/zrdma/zxdh_verbs.c b/providers/zrdma/zxdh_verbs.c
+index 93cf705..f67f8c7 100644
+--- a/providers/zrdma/zxdh_verbs.c
++++ b/providers/zrdma/zxdh_verbs.c
+@@ -616,30 +616,50 @@ int zxdh_umodify_cq(struct ibv_cq *cq, struct ibv_modify_cq_attr *attr)
+ }
+
+ static enum ibv_wc_status
+-zxdh_flush_err_to_ib_wc_status(enum zxdh_flush_opcode opcode)
++zxdh_err_to_ib_wc_status(__u32 opcode)
+ {
+ switch (opcode) {
+- case FLUSH_PROT_ERR:
++ case ZXDH_RX_WQE_LEN_ERR:
++ return IBV_WC_LOC_LEN_ERR;
++ case ZXDH_TX_ACK_SYS_TOP_VADDR_LEN_CHECK_ERR:
++ case ZXDH_TX_ACK_SYS_TOP_LKEY_CHECK_ERR:
++ case ZXDH_TX_ACK_SYS_TOP_ACCESS_RIGHT_CHECK_ERR:
++ case ZXDH_RX_MR_MW_STATE_FREE_ERR:
++ case ZXDH_RX_MR_MW_STATE_INVALID_ERR:
++ case ZXDH_RX_MR_MW_PD_CHECK_ERR:
++ case ZXDH_RX_MR_MW_KEY_CHECK_ERR:
++ case ZXDH_RX_MR_MW_STAG_INDEX_CHECK_ERR:
++ case ZXDH_RX_MR_MW_BOUNDARY_CHECK_ERR:
++ case ZXDH_RX_MR_MW_0STAG_INDEX_CHECK_ERR:
++ case ZXDH_RX_MW_STATE_INVALID_ERR:
++ case ZXDH_RX_MW_PD_CHECK_ERR:
++ case ZXDH_RX_MW_STAG_INDEX_CHECK_ERR:
++ case ZXDH_RX_MW_SHARE_MR_CHECK_ERR:
++ case ZXDH_RX_MR_PD_CHECK_ERR:
++ case ZXDH_RX_MR_SHARE_MR_CHECK_ERR:
++ case ZXDH_RX_MR_MW_ACCESS_CHECK_ERR:
+ return IBV_WC_LOC_PROT_ERR;
+- case FLUSH_REM_ACCESS_ERR:
++ case ZXDH_TX_PARSE_TOP_WQE_FLUSH:
++ return IBV_WC_WR_FLUSH_ERR;
++ case ZXDH_TX_ACK_SYS_TOP_NAK_INVALID_REQ:
++ return IBV_WC_REM_INV_REQ_ERR;
++ case ZXDH_TX_ACK_SYS_TOP_NAK_REMOTE_ACCESS_ERR:
++ case ZXDH_RX_MW_RKEY_CHECK_ERR:
++ case ZXDH_RX_MR_RKEY_CHECK_ERR:
+ return IBV_WC_REM_ACCESS_ERR;
+- case FLUSH_LOC_QP_OP_ERR:
+- return IBV_WC_LOC_QP_OP_ERR;
+- case FLUSH_REM_OP_ERR:
++ case ZXDH_TX_ACK_SYS_TOP_NAK_REMOTE_OPERATIONAL_ERR:
+ return IBV_WC_REM_OP_ERR;
+- case FLUSH_LOC_LEN_ERR:
+- return IBV_WC_LOC_LEN_ERR;
+- case FLUSH_GENERAL_ERR:
+- return IBV_WC_WR_FLUSH_ERR;
+- case FLUSH_RETRY_EXC_ERR:
++ case ZXDH_TX_ACK_SYS_TOP_NAK_RETRY_LIMIT:
++ case ZXDH_TX_ACK_SYS_TOP_READ_RETRY_LIMIT:
++ case ZXDH_TX_ACK_SYS_TOP_TIMEOUT_RETRY_LIMIT:
+ return IBV_WC_RETRY_EXC_ERR;
+- case FLUSH_MW_BIND_ERR:
+- return IBV_WC_MW_BIND_ERR;
+- case FLUSH_REM_INV_REQ_ERR:
+- return IBV_WC_REM_INV_REQ_ERR;
+- case FLUSH_FATAL_ERR:
+- default:
++ case ZXDH_TX_ACK_SYS_TOP_RNR_RETRY_LIMIT:
++ return IBV_WC_RNR_RETRY_EXC_ERR;
++ case ZXDH_TX_PARSE_TOP_AXI_ERR:
++ case ZXDH_RX_AXI_RESP_ERR:
+ return IBV_WC_FATAL_ERR;
++ default:
++ return IBV_WC_GENERAL_ERR;
+ }
+ }
+
+@@ -656,10 +676,9 @@ static inline void zxdh_process_cqe_ext(struct zxdh_cq_poll_info *cur_cqe)
+ ibvcq_ex->wr_id = cur_cqe->wr_id;
+ if (cur_cqe->error)
+ ibvcq_ex->status =
+- (cur_cqe->comp_status == ZXDH_COMPL_STATUS_FLUSHED) ?
+- zxdh_flush_err_to_ib_wc_status(
+- cur_cqe->minor_err) :
+- IBV_WC_GENERAL_ERR;
++ zxdh_err_to_ib_wc_status(
++ cur_cqe->major_err << 16 |
++ cur_cqe->minor_err);
+ else
+ ibvcq_ex->status = IBV_WC_SUCCESS;
+ }
+@@ -683,10 +702,9 @@ static inline void zxdh_process_cqe(struct ibv_wc *entry,
+
+ if (cur_cqe->error) {
+ entry->status =
+- (cur_cqe->comp_status == ZXDH_COMPL_STATUS_FLUSHED) ?
+- zxdh_flush_err_to_ib_wc_status(
+- cur_cqe->minor_err) :
+- IBV_WC_GENERAL_ERR;
++ zxdh_err_to_ib_wc_status(
++ cur_cqe->major_err << 16 |
++ cur_cqe->minor_err);
+ entry->vendor_err =
+ cur_cqe->major_err << 16 | cur_cqe->minor_err;
+ } else {
+diff --git a/providers/zrdma/zxdh_verbs.h b/providers/zrdma/zxdh_verbs.h
+index 1a26cf4..e3974c1 100644
+--- a/providers/zrdma/zxdh_verbs.h
++++ b/providers/zrdma/zxdh_verbs.h
+@@ -146,6 +146,119 @@ enum zxdh_page_size {
+ ZXDH_PAGE_SIZE_1G = 18,
+ };
+
++enum zxdh_rdmatx_parse_top_err {
++ ZXDH_TX_PARSE_TOP_AXI_ERR = 0x1,
++ ZXDH_TX_PARSE_TOP_WQE_FLUSH = 0x10001,
++ ZXDH_TX_PARSE_TOP_ORD_ERR = 0x20020,
++ ZXDH_TX_PARSE_TOP_OPCODE_ERR_FLAG = 0x20021,
++ ZXDH_TX_PARSE_TOP_CQP_STATE_AXI_ERR = 0x20022,
++ ZXDH_TX_PARSE_TOP_WQE_LEN_ERR = 0x20023,
++ ZXDH_TX_PARSE_TOP_DATA_LEN_ERR = 0x20024,
++ ZXDH_TX_PARSE_TOP_AH_VALID0_ERR = 0x20025,
++ ZXDH_TX_PARSE_TOP_UD_PDINDEX_ERR = 0x20026,
++ ZXDH_TX_PARSE_TOP_QP_STATE_ERR = 0x20027,
++ ZXDH_TX_PARSE_TOP_SERVICE_TYPE_ERR = 0x20028,
++ ZXDH_TX_PARSE_TOP_UD_PAYLOAD_ERR = 0x20029,
++ ZXDH_TX_PARSE_TOP_WQE_LEN0_ERR = 0x2002a,
++ ZXDH_TX_PARSE_TOP_WQE_DEFICIENT_CLR_ERR = 0x2002b,
++ ZXDH_TX_PARSE_TOP_IMMDT_ERR = 0x2002c,
++ ZXDH_TX_PARSE_TOP_FRAGMENT_LENGTH_ERR = 0x2009f,
++ ZXDH_TX_PARSE_TOP_MRTE_STATE_ERR = 0x90091,
++ ZXDH_TX_PARSE_TOP_QP_CHECK_ERR = 0x90092,
++ ZXDH_TX_PARSE_TOP_PD_CHECK_ERR = 0x90093,
++ ZXDH_TX_PARSE_TOP_LKEY_CHECK_ERR = 0x90094,
++ ZXDH_TX_PARSE_TOP_STAG_INDEX_CHECK_ERR = 0x90095,
++ ZXDH_TX_PARSE_TOP_VADDR_LEN_CHECK_ERR = 0x90096,
++ ZXDH_TX_PARSE_TOP_ACCESS_RIGHT_CHECK_ERR = 0x90097,
++ ZXDH_TX_PARSE_TOP_STAG_INDEX_CHECK_ZERO_ERR = 0x90098,
++};
++
++enum zxdh_rdmatx_ack_sys_top_err {
++ ZXDH_TX_ACK_SYS_TOP_NVME_INDEX_ERR = 0x30030,
++ ZXDH_TX_ACK_SYS_TOP_NVME_NOF_QID_ERR = 0x30031,
++ ZXDH_TX_ACK_SYS_TOP_NVME_NOF_PD_INDEX_ERR = 0x30032,
++ ZXDH_TX_ACK_SYS_TOP_NVME_LENGTH_ERR = 0x30033,
++ ZXDH_TX_ACK_SYS_TOP_NVME_KEY_ERR = 0x30034,
++ ZXDH_TX_ACK_SYS_TOP_NVME_ACCESS_ERR = 0x30035,
++ ZXDH_TX_ACK_SYS_TOP_MRTE_STATE_ERR = 0x50091,
++ ZXDH_TX_ACK_SYS_TOP_QP_CHECK_ERR = 0x50092,
++ ZXDH_TX_ACK_SYS_TOP_PD_CHECK_ERR = 0x50093,
++ ZXDH_TX_ACK_SYS_TOP_LKEY_CHECK_ERR = 0x50094,
++ ZXDH_TX_ACK_SYS_TOP_STAG_INDEX_CHECK_ERR = 0x50095,
++ ZXDH_TX_ACK_SYS_TOP_VADDR_LEN_CHECK_ERR = 0x50096,
++ ZXDH_TX_ACK_SYS_TOP_ACCESS_RIGHT_CHECK_ERR = 0x50097,
++ ZXDH_TX_ACK_SYS_TOP_STAG_INDEX_CHECK_ZERO_ERR = 0x50098,
++ ZXDH_TX_ACK_SYS_TOP_LOC_LEN_ERR = 0x600c0,
++ ZXDH_TX_ACK_SYS_TOP_NAK_INVALID_REQ = 0x700d0,
++ ZXDH_TX_ACK_SYS_TOP_NAK_REMOTE_ACCESS_ERR = 0x700d1,
++ ZXDH_TX_ACK_SYS_TOP_NAK_REMOTE_OPERATIONAL_ERR = 0x700d2,
++ ZXDH_TX_ACK_SYS_TOP_NAK_RETRY_LIMIT = 0x800f1,
++ ZXDH_TX_ACK_SYS_TOP_READ_RETRY_LIMIT = 0x800f2,
++ ZXDH_TX_ACK_SYS_TOP_TIMEOUT_RETRY_LIMIT = 0x800f3,
++ ZXDH_TX_ACK_SYS_TOP_RNR_RETRY_LIMIT = 0x800f4,
++};
++
++enum zxdh_rdmatx_window_top_err {
++ ZXDH_TX_WINDOW_TOP_WINDOW_NO_ENTRY = 0x800f5,
++ ZXDH_TX_WINDOW_TOP_WINDOW_BACK_MSN = 0x800f6,
++ ZXDH_TX_WINDOW_TOP_WINDOW_SMALL_MSN = 0x800f7,
++};
++
++enum zxdh_rdmatx_doorbell_mgr_err {
++ ZXDH_TX_DOORBELL_MGR_INDEX_CHECK_ERROR = 0x30036,
++ ZXDH_TX_DOORBELL_MGR_QID_CHECK_ERROR = 0x30037,
++ ZXDH_TX_DOORBELL_MGR_PD_INDEX_CHECK_ERROR = 0x30038,
++ ZXDH_TX_DOORBELL_MGR_LENGTH_CHECK_ERROR = 0x30039,
++ ZXDH_TX_DOORBELL_MGR_KEY_CHECK_ERROR = 0x3003a,
++ ZXDH_TX_DOORBELL_MGR_ACCESS_CHECK_ERROR = 0x3003b,
++};
++
++enum zxdh_rdmarx_err {
++ ZXDH_RX_CQP_FLUSH = 0x12,
++ ZXDH_RX_FIRST_PACKET_ERR = 0x4f,
++ ZXDH_RX_INVALID_OPCODE = 0x50,
++ ZXDH_RX_ORDER_ERR = 0x51,
++ ZXDH_RX_LEN_ERR = 0x52,
++ ZXDH_RX_SQR_STATE_ERR = 0x53,
++ ZXDH_RX_WQE_SIGN_ERR = 0x54,
++ ZXDH_RX_WQE_LEN_ERR = 0x55,
++ ZXDH_RX_SQR_WATER_LEVEL_ERR = 0x80,
++ ZXDH_RX_SRQ_AXI_RESP_ERR = 0xb1,
++ ZXDH_RX_CQ_OVERFLOW_ERR = 0x76,
++ ZXDH_RX_QP_CQ_OVERFLOW_ERR = 0x78,
++ ZXDH_RX_CQ_STATE_ERR = 0x7a,
++ ZXDH_RX_CQ_AXI_ERR = 0x7b,
++ ZXDH_RX_QP_CQ_AXI_ERR = 0x7c,
++ ZXDH_RX_NOF_IOQ_ERR = 0x70,
++ ZXDH_RX_NOF_PDNUM_ERR = 0x71,
++ ZXDH_RX_NOF_LEN_ERR = 0x72,
++ ZXDH_RX_NOF_RKEY_ERR = 0x73,
++ ZXDH_RX_NOF_ACC_ERR = 0x74,
++ ZXDH_RX_IRD_OVF = 0x77,
++ ZXDH_RX_MR_MW_STATE_FREE_ERR = 0x90,
++ ZXDH_RX_MR_MW_STATE_INVALID_ERR = 0x91,
++ ZXDH_RX_TYPE2B_MW_QPN_CHECK_ERR = 0x92,
++ ZXDH_RX_MR_MW_PD_CHECK_ERR = 0x93,
++ ZXDH_RX_MR_MW_KEY_CHECK_ERR = 0x94,
++ ZXDH_RX_MR_MW_STAG_INDEX_CHECK_ERR = 0x95,
++ ZXDH_RX_MR_MW_BOUNDARY_CHECK_ERR = 0x96,
++ ZXDH_RX_MR_MW_ACCESS_CHECK_ERR = 0x97,
++ ZXDH_RX_MR_MW_0STAG_INDEX_CHECK_ERR = 0x98,
++ ZXDH_RX_MW_STATE_INVALID_ERR = 0x99,
++ ZXDH_RX_MW_PD_CHECK_ERR = 0x9a,
++ ZXDH_RX_MW_RKEY_CHECK_ERR = 0x9b,
++ ZXDH_RX_TYPE2BMW_QPN_CHECK_ERR = 0x9c,
++ ZXDH_RX_MW_STAG_INDEX_CHECK_ERR = 0x9d,
++ ZXDH_RX_MW_SHARE_MR_CHECK_ERR = 0x9e,
++ ZXDH_RX_MW_TYPE1_CHECK_ERR = 0x9f,
++ ZXDH_RX_MR_PD_CHECK_ERR = 0xa0,
++ ZXDH_RX_MR_RKEY_CHECK_ERR = 0xa1,
++ ZXDH_RX_MR_SHARE_MR_CHECK_ERR = 0xa4,
++ ZXDH_RX_MR_BOND_MW_NUM_CHECK_ERR = 0xa5,
++ ZXDH_RX_MR_CANBE_R_INVALID_CHECK_ERR = 0xa6,
++ ZXDH_RX_AXI_RESP_ERR = 0xb0,
++};
++
+ struct zxdh_qp;
+ struct zxdh_cq;
+ struct zxdh_qp_init_info;
+--
+2.27.0
+
diff --git a/0050-libzrdma-Add-sq-rq-flush-cqe-and-log-optimization.patch b/0050-libzrdma-Add-sq-rq-flush-cqe-and-log-optimization.patch
new file mode 100644
index 0000000..b65ed3f
--- /dev/null
+++ b/0050-libzrdma-Add-sq-rq-flush-cqe-and-log-optimization.patch
@@ -0,0 +1,625 @@
+From 9f440ac471dce7fa44f0ecb6df4ca2ac0509d105 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E6=9D=8E=E5=AF=8C=E8=89=B3?=
+Date: Fri, 28 Mar 2025 15:30:41 +0800
+Subject: [PATCH] libzrdma:Add sq/rq flush cqe and log optimization
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: 李富艳
+---
+ providers/zrdma/main.h | 10 +-
+ providers/zrdma/zxdh_hw.c | 253 ++++++++++++++++++++++-------------
+ providers/zrdma/zxdh_verbs.c | 72 +++++-----
+ 3 files changed, 206 insertions(+), 129 deletions(-)
+
+diff --git a/providers/zrdma/main.h b/providers/zrdma/main.h
+index e28c77b..7c78fed 100644
+--- a/providers/zrdma/main.h
++++ b/providers/zrdma/main.h
+@@ -38,15 +38,17 @@ enum {
+ ZXDH_DBG_SRQ = 1 << 2,
+ };
+ extern uint32_t zxdh_debug_mask;
+-#define zxdh_dbg(ctx, mask, format, arg...) \
++#define zxdh_dbg(mask, format, arg...) \
+ do { \
+ if (mask & zxdh_debug_mask) { \
+- int zxdh_dbg_tmp = errno; \
+- verbs_debug(ctx, format, ##arg); \
+- errno = zxdh_dbg_tmp; \
++ int tmp = errno; \
++ fprintf(stdout, "%s:%d: " format, __func__, __LINE__, \
++ ##arg); \
++ errno = tmp; \
+ } \
+ } while (0)
+
++
+ struct zxdh_udevice {
+ struct verbs_device ibv_dev;
+ };
+diff --git a/providers/zrdma/zxdh_hw.c b/providers/zrdma/zxdh_hw.c
+index 99489dc..fb8f016 100644
+--- a/providers/zrdma/zxdh_hw.c
++++ b/providers/zrdma/zxdh_hw.c
+@@ -1785,19 +1785,26 @@ static inline void build_comp_status(__u32 cq_type,
+ }
+ return;
+ }
+- if (info->major_err == ZXDH_RETRY_ACK_MAJOR_ERR &&
+- info->minor_err == ZXDH_RETRY_ACK_MINOR_ERR) {
+- info->comp_status = ZXDH_COMPL_STATUS_RETRY_ACK_ERR;
++
++ switch (info->major_err) {
++ case ZXDH_RETRY_ACK_MAJOR_ERR:
++ if (info->minor_err == ZXDH_RETRY_ACK_MINOR_ERR) {
++ info->comp_status = ZXDH_COMPL_STATUS_RETRY_ACK_ERR;
++ return;
++ }
++ if (info->minor_err == ZXDH_TX_WINDOW_QUERY_ITEM_MINOR_ERR) {
++ info->comp_status =
++ ZXDH_COMPL_STATUS_TX_WINDOW_QUERY_ITEM_ERR;
++ return;
++ }
++ break;
++ case ZXDH_FLUSH_MAJOR_ERR:
++ info->comp_status = ZXDH_COMPL_STATUS_FLUSHED;
+ return;
+- }
+- if (info->major_err == ZXDH_RETRY_ACK_MAJOR_ERR &&
+- info->minor_err == ZXDH_TX_WINDOW_QUERY_ITEM_MINOR_ERR) {
+- info->comp_status = ZXDH_COMPL_STATUS_TX_WINDOW_QUERY_ITEM_ERR;
++ default:
++ info->comp_status = ZXDH_COMPL_STATUS_UNKNOWN;
+ return;
+ }
+- info->comp_status = (info->major_err == ZXDH_FLUSH_MAJOR_ERR) ?
+- ZXDH_COMPL_STATUS_FLUSHED :
+- ZXDH_COMPL_STATUS_UNKNOWN;
+ }
+
+ __le64 *get_current_cqe(struct zxdh_cq *cq)
+@@ -1837,9 +1844,9 @@ static inline void zxdh_get_cq_poll_info(struct zxdh_qp *qp,
+ }
+ }
+
+-static void update_cq_poll_info(struct zxdh_qp *qp,
+- struct zxdh_cq_poll_info *info, __u32 wqe_idx,
+- __u64 qword0)
++static enum zxdh_status_code update_cq_poll_info(struct zxdh_qp *qp,
++ struct zxdh_cq_poll_info *info,
++ __u32 wqe_idx, __u64 qword0)
+ {
+ info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
+ if (!info->comp_status)
+@@ -1847,6 +1854,7 @@ static void update_cq_poll_info(struct zxdh_qp *qp,
+ info->op_type = (__u8)FIELD_GET(ZXDHCQ_OP, qword0);
+ ZXDH_RING_SET_TAIL(qp->sq_ring,
+ wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
++ return ZXDH_SUCCESS;
+ }
+
+ static enum zxdh_status_code
+@@ -1862,9 +1870,9 @@ process_tx_window_query_item_err(struct zxdh_qp *qp,
+ ib_qp = &iwuqp->vqp.qp;
+ ret = zxdh_query_qpc(ib_qp, &qpc);
+ if (ret) {
+- verbs_err(verbs_get_ctx(ib_qp->context),
+- "process tx window query item query qpc failed:%d\n",
+- ret);
++ zxdh_dbg(ZXDH_DBG_QP,
++ "process tx window query item query qpc failed:%d\n",
++ ret);
+ return ZXDH_ERR_RETRY_ACK_ERR;
+ }
+ if (qpc.tx_last_ack_psn != qp->qp_last_ack_qsn)
+@@ -1876,9 +1884,9 @@ process_tx_window_query_item_err(struct zxdh_qp *qp,
+
+ ret = zxdh_reset_qp(ib_qp, ZXDH_RESET_RETRY_TX_ITEM_FLAG);
+ if (ret) {
+- verbs_err(verbs_get_ctx(ib_qp->context),
+- "process tx window query item reset qp failed:%d\n",
+- ret);
++ zxdh_dbg(ZXDH_DBG_QP,
++ "process tx window query item reset qp failed:%d\n",
++ ret);
+ return ZXDH_ERR_RETRY_ACK_ERR;
+ }
+ qp->qp_reset_cnt++;
+@@ -1899,8 +1907,8 @@ process_retry_ack_err(struct zxdh_qp *qp, struct zxdh_cq_poll_info *info)
+ ib_qp = &iwuqp->vqp.qp;
+ ret = zxdh_query_qpc(ib_qp, &qpc);
+ if (ret) {
+- verbs_err(verbs_get_ctx(ib_qp->context),
+- "process retry ack query qpc failed:%d\n", ret);
++ zxdh_dbg(ZXDH_DBG_QP, "process retry ack query qpc failed:%d\n",
++ ret);
+ return ZXDH_ERR_RETRY_ACK_ERR;
+ }
+ if (!(qpc.retry_cqe_sq_opcode >= ZXDH_RETRY_CQE_SQ_OPCODE_ERR &&
+@@ -1926,14 +1934,122 @@ process_retry_ack_err(struct zxdh_qp *qp, struct zxdh_cq_poll_info *info)
+ ZXDH_RETRY_CQE_SQ_OPCODE |
+ ZXDH_TX_READ_RETRY_FLAG_SET);
+ if (ret) {
+- verbs_err(verbs_get_ctx(ib_qp->context),
+- "process retry ack modify qpc failed:%d\n", ret);
++ zxdh_dbg(ZXDH_DBG_QP,
++ "process retry ack modify qpc failed:%d\n", ret);
+ return ZXDH_ERR_RETRY_ACK_ERR;
+ }
+ qp->cqe_retry_cnt++;
+ return ZXDH_ERR_RETRY_ACK_NOT_EXCEED_ERR;
+ }
+
++static enum zxdh_status_code
++zxdh_flush_sq_comp_info(struct zxdh_qp *qp, struct zxdh_cq_poll_info *info,
++ bool *move_cq_head)
++{
++ if (!ZXDH_RING_MORE_WORK(qp->sq_ring)) {
++ ZXDH_RING_INIT(qp->sq_ring, qp->sq_ring.size)
++ return ZXDH_ERR_Q_EMPTY;
++ }
++ do {
++ __le64 *sw_wqe;
++ __u64 wqe_qword;
++ __u64 wqe_idx;
++ wqe_idx = qp->sq_ring.tail;
++ sw_wqe = qp->sq_base[wqe_idx].elem;
++ get_64bit_val(sw_wqe, 0, &wqe_qword);
++ info->op_type = (__u8)FIELD_GET(ZXDHQPSQ_OPCODE, wqe_qword);
++ ZXDH_RING_SET_TAIL(qp->sq_ring,
++ wqe_idx +
++ qp->sq_wrtrk_array[wqe_idx].quanta);
++
++ if (info->op_type != ZXDH_OP_TYPE_NOP) {
++ info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
++ break;
++ }
++ } while (1);
++ qp->sq_flush_seen = true;
++ if (!ZXDH_RING_MORE_WORK(qp->sq_ring)) {
++ qp->sq_flush_complete = true;
++ ZXDH_RING_INIT(qp->sq_ring, qp->sq_ring.size)
++ } else
++ *move_cq_head = false;
++ return ZXDH_SUCCESS;
++}
++
++static enum zxdh_status_code zxdh_sq_comp_info(struct zxdh_qp *qp,
++ struct zxdh_cq_poll_info *info,
++ __u32 wqe_idx, __u64 qword0,
++ bool *move_cq_head)
++{
++ enum zxdh_status_code status_code;
++ switch (info->comp_status) {
++ case ZXDH_COMPL_STATUS_SUCCESS:
++ case ZXDH_COMPL_STATUS_UNKNOWN:
++ break;
++ case ZXDH_COMPL_STATUS_RETRY_ACK_ERR:
++ if (qp->qp_type == ZXDH_QP_TYPE_ROCE_RC) {
++ status_code = process_retry_ack_err(qp, info);
++ return (status_code == ZXDH_ERR_RETRY_ACK_ERR) ?
++ update_cq_poll_info(qp, info, wqe_idx,
++ qword0) :
++ status_code;
++ }
++ break;
++ case ZXDH_COMPL_STATUS_TX_WINDOW_QUERY_ITEM_ERR:
++ if (qp->qp_type == ZXDH_QP_TYPE_ROCE_RC) {
++ status_code =
++ process_tx_window_query_item_err(qp, info);
++ return (status_code == ZXDH_ERR_RETRY_ACK_ERR) ?
++ update_cq_poll_info(qp, info, wqe_idx,
++ qword0) :
++ status_code;
++ }
++ break;
++ case ZXDH_COMPL_STATUS_FLUSHED:
++ return zxdh_flush_sq_comp_info(qp, info, move_cq_head);
++ default:
++ break;
++ }
++ return update_cq_poll_info(qp, info, wqe_idx, qword0);
++}
++
++static enum zxdh_status_code zxdh_rq_comp_info(struct zxdh_qp *qp,
++ struct zxdh_cq_poll_info *info,
++ __u32 wqe_idx, __u64 qword2,
++ __u64 qword3, bool *move_cq_head)
++{
++ struct zxdh_uqp *iwuqp = NULL;
++ struct zxdh_usrq *iwusrq = NULL;
++ struct zxdh_srq *srq = NULL;
++ if (qp->is_srq) {
++ iwuqp = container_of(qp, struct zxdh_uqp, qp);
++ iwusrq = iwuqp->srq;
++ srq = &iwusrq->srq;
++ zxdh_free_srq_wqe(srq, wqe_idx);
++ info->wr_id = srq->srq_wrid_array[wqe_idx];
++ zxdh_get_cq_poll_info(qp, info, qword2, qword3);
++ } else {
++ if (unlikely(info->comp_status == ZXDH_COMPL_STATUS_FLUSHED ||
++ info->comp_status == ZXDH_COMPL_STATUS_UNKNOWN)) {
++ if (!ZXDH_RING_MORE_WORK(qp->rq_ring)) {
++ return ZXDH_ERR_Q_EMPTY;
++ }
++ wqe_idx = qp->rq_ring.tail;
++ }
++ info->wr_id = qp->rq_wrid_array[wqe_idx];
++ zxdh_get_cq_poll_info(qp, info, qword2, qword3);
++ ZXDH_RING_SET_TAIL(qp->rq_ring, wqe_idx + 1);
++ if (info->comp_status == ZXDH_COMPL_STATUS_FLUSHED) {
++ qp->rq_flush_seen = true;
++ if (!ZXDH_RING_MORE_WORK(qp->rq_ring))
++ qp->rq_flush_complete = true;
++ else
++ *move_cq_head = false;
++ }
++ }
++ return ZXDH_SUCCESS;
++}
++
+ /**
+ * zxdh_cq_poll_cmpl - get cq completion info
+ * @cq: hw cq
+@@ -1942,7 +2058,6 @@ process_retry_ack_err(struct zxdh_qp *qp, struct zxdh_cq_poll_info *info)
+ enum zxdh_status_code zxdh_cq_poll_cmpl(struct zxdh_cq *cq,
+ struct zxdh_cq_poll_info *info)
+ {
+- enum zxdh_status_code status_code;
+ __u64 comp_ctx, qword0, qword2, qword3;
+ __le64 *cqe;
+ struct zxdh_qp *qp;
+@@ -1951,9 +2066,6 @@ enum zxdh_status_code zxdh_cq_poll_cmpl(struct zxdh_cq *cq,
+ int ret_code;
+ bool move_cq_head = true;
+ __u8 polarity;
+- struct zxdh_usrq *iwusrq = NULL;
+- struct zxdh_srq *srq = NULL;
+- struct zxdh_uqp *iwuqp;
+
+ cqe = get_current_cqe(cq);
+
+@@ -1973,7 +2085,7 @@ enum zxdh_status_code zxdh_cq_poll_cmpl(struct zxdh_cq *cq,
+ ret_code = ZXDH_ERR_Q_DESTROYED;
+ goto exit;
+ }
+- iwuqp = container_of(qp, struct zxdh_uqp, qp);
++
+ info->qp_handle = (zxdh_qp_handle)(unsigned long)qp;
+ q_type = (__u8)FIELD_GET(ZXDH_CQ_SQ, qword0);
+ info->solicited_event = (bool)FIELD_GET(ZXDHCQ_SOEVENT, qword0);
+@@ -1993,74 +2105,19 @@ enum zxdh_status_code zxdh_cq_poll_cmpl(struct zxdh_cq *cq,
+
+ info->qp_id = (__u32)FIELD_GET(ZXDHCQ_QPID, qword2);
+ info->imm_valid = false;
+-
+- info->qp_handle = (zxdh_qp_handle)(unsigned long)qp;
+ switch (q_type) {
+- case ZXDH_CQE_QTYPE_RQ:
+- if (qp->is_srq) {
+- iwusrq = iwuqp->srq;
+- srq = &iwusrq->srq;
+- zxdh_free_srq_wqe(srq, wqe_idx);
+- info->wr_id = srq->srq_wrid_array[wqe_idx];
+- zxdh_get_cq_poll_info(qp, info, qword2, qword3);
+- } else {
+- if (unlikely(info->comp_status ==
+- ZXDH_COMPL_STATUS_FLUSHED ||
+- info->comp_status ==
+- ZXDH_COMPL_STATUS_UNKNOWN)) {
+- if (!ZXDH_RING_MORE_WORK(qp->rq_ring)) {
+- ret_code = ZXDH_ERR_Q_EMPTY;
+- goto exit;
+- }
+- wqe_idx = qp->rq_ring.tail;
+- }
+- info->wr_id = qp->rq_wrid_array[wqe_idx];
+- zxdh_get_cq_poll_info(qp, info, qword2, qword3);
+- ZXDH_RING_SET_TAIL(qp->rq_ring, wqe_idx + 1);
+- if (info->comp_status == ZXDH_COMPL_STATUS_FLUSHED) {
+- qp->rq_flush_seen = true;
+- if (!ZXDH_RING_MORE_WORK(qp->rq_ring))
+- qp->rq_flush_complete = true;
+- else
+- move_cq_head = false;
+- }
+- pring = &qp->rq_ring;
+- }
+- ret_code = ZXDH_SUCCESS;
+- break;
+ case ZXDH_CQE_QTYPE_SQ:
+- if (info->comp_status == ZXDH_COMPL_STATUS_RETRY_ACK_ERR &&
+- qp->qp_type == ZXDH_QP_TYPE_ROCE_RC) {
+- status_code = process_retry_ack_err(qp, info);
+- if (status_code == ZXDH_ERR_RETRY_ACK_ERR) {
+- update_cq_poll_info(qp, info, wqe_idx, qword0);
+- ret_code = ZXDH_SUCCESS;
+- } else {
+- ret_code = status_code;
+- }
+- } else if (info->comp_status ==
+- ZXDH_COMPL_STATUS_TX_WINDOW_QUERY_ITEM_ERR &&
+- qp->qp_type == ZXDH_QP_TYPE_ROCE_RC) {
+- status_code =
+- process_tx_window_query_item_err(qp, info);
+- if (status_code == ZXDH_ERR_RETRY_ACK_ERR) {
+- update_cq_poll_info(qp, info, wqe_idx, qword0);
+- ret_code = ZXDH_SUCCESS;
+- } else {
+- ret_code = status_code;
+- }
+- } else if (info->comp_status == ZXDH_COMPL_STATUS_FLUSHED) {
+- info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
+- ZXDH_RING_INIT(qp->sq_ring, qp->sq_ring.size);
+- ret_code = ZXDH_SUCCESS;
+- } else {
+- update_cq_poll_info(qp, info, wqe_idx, qword0);
+- ret_code = ZXDH_SUCCESS;
+- }
++ ret_code = zxdh_sq_comp_info(qp, info, wqe_idx, qword0,
++ &move_cq_head);
++ pring = &qp->sq_ring;
++ break;
++ case ZXDH_CQE_QTYPE_RQ:
++ ret_code = zxdh_rq_comp_info(qp, info, wqe_idx, qword2, qword3,
++ &move_cq_head);
++ pring = &qp->rq_ring;
+ break;
+ default:
+- zxdh_dbg(verbs_get_ctx(iwuqp->vqp.qp.context), ZXDH_DBG_CQ,
+- "zxdh get cqe type unknow!\n");
++ zxdh_dbg(ZXDH_DBG_CQ, "zxdh get cqe type unknow!\n");
+ ret_code = ZXDH_ERR_Q_DESTROYED;
+ break;
+ }
+@@ -2538,6 +2595,16 @@ enum zxdh_status_code zxdh_srq_init(struct zxdh_srq *srq,
+ ZXDH_RING_INIT(srq->srq_list_ring, srq->srq_list_size);
+ srq->srq_ring.tail = srq->srq_size - 1;
+ srq->srq_list_polarity = 1;
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s srq_wqe_size_multiplier:%d srqshift:%d\n",
++ __func__, srq->srq_wqe_size_multiplier, srqshift);
++ zxdh_dbg(
++ ZXDH_DBG_SRQ,
++ "%s srq->srq_id:%d srq_base:0x%p srq_list_base:0x%p srq_db_base:0x%p\n",
++ __func__, srq->srq_id, srq->srq_base, srq->srq_list_base,
++ srq->srq_db_base);
++ zxdh_dbg(ZXDH_DBG_SRQ,
++ "%s srq->srq_id:%d srq_ring_size:%d srq->srq_list_size:%d\n",
++ __func__, srq->srq_id, srq_ring_size, srq->srq_list_size);
+ return 0;
+ }
+
+@@ -2558,4 +2625,6 @@ void zxdh_free_srq_wqe(struct zxdh_srq *srq, int wqe_index)
+ set_64bit_val(wqe, 0, hdr);
+
+ pthread_spin_unlock(&iwusrq->lock);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s srq->srq_id:%d wqe_index:%d\n", __func__,
++ srq->srq_id, wqe_index);
+ }
+diff --git a/providers/zrdma/zxdh_verbs.c b/providers/zrdma/zxdh_verbs.c
+index f67f8c7..9cf1240 100644
+--- a/providers/zrdma/zxdh_verbs.c
++++ b/providers/zrdma/zxdh_verbs.c
+@@ -1485,13 +1485,12 @@ static struct ibv_qp *create_qp(struct ibv_context *ibv_ctx,
+ }
+
+ if (attr->cap.max_inline_data > dev_attrs->max_hw_inline) {
+- zxdh_dbg(&iwvctx->ibv_ctx, ZXDH_DBG_QP,
+- "max_inline_data over max_hw_inline\n");
++ zxdh_dbg(ZXDH_DBG_QP, "max_inline_data over max_hw_inline\n");
+ attr->cap.max_inline_data = dev_attrs->max_hw_inline;
+ }
+
+- zxdh_get_sq_wqe_shift(attr->cap.max_send_sge, attr->cap.max_inline_data,
+- &sqshift);
++ zxdh_get_sq_wqe_shift(attr->cap.max_send_sge,
++ attr->cap.max_inline_data, &sqshift);
+ status = zxdh_get_sqdepth(dev_attrs, attr->cap.max_send_wr, sqshift,
+ &sqdepth);
+ if (status) {
+@@ -2661,9 +2660,8 @@ static void zxdh_srq_wqe_init(struct zxdh_usrq *iwusrq)
+ __u64 hdr;
+
+ srq = &iwusrq->srq;
+- zxdh_dbg(verbs_get_ctx(iwusrq->ibv_srq.context), ZXDH_DBG_SRQ,
+- "%s head:%d tail:%d\n", __func__, srq->srq_ring.head,
+- srq->srq_ring.tail);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s head:%d tail:%d\n", __func__,
++ srq->srq_ring.head, srq->srq_ring.tail);
+ for (i = srq->srq_ring.head; i < srq->srq_ring.tail; i++) {
+ wqe = zxdh_get_srq_wqe(srq, i);
+
+@@ -2707,7 +2705,7 @@ static size_t zxdh_get_total_srq_size(struct zxdh_usrq *iwusrq, int srqdepth,
+ total_srq_queue_size + total_srq_list_size + total_srq_db_size;
+ iwusrq->total_buf_size = total_srq_size;
+ zxdh_dbg(
+- verbs_get_ctx(iwusrq->ibv_srq.context), ZXDH_DBG_SRQ,
++ ZXDH_DBG_SRQ,
+ "%s total_srq_queue_size:%ld total_srq_list_size:%ld total_srq_db_size:%ld srqdepth:%d\n",
+ __func__, total_srq_queue_size, total_srq_list_size,
+ total_srq_db_size, srqdepth);
+@@ -2730,7 +2728,7 @@ static int zxdh_alloc_srq_buf(struct zxdh_usrq *iwusrq,
+ (__le64 *)&info->srq_list_base[iwusrq->list_buf_size /
+ (sizeof(__u16))];
+ *(__le64 *)info->srq_db_base = ZXDH_SRQ_DB_INIT_VALUE;
+- zxdh_dbg(verbs_get_ctx(iwusrq->ibv_srq.context), ZXDH_DBG_SRQ,
++ zxdh_dbg(ZXDH_DBG_SRQ,
+ "%s srq_base:0x%p srq_list_base:0x%p srq_db_base:0x%p\n",
+ __func__, info->srq_base, info->srq_list_base,
+ info->srq_db_base);
+@@ -2782,7 +2780,7 @@ static int create_srq(struct ibv_pd *pd, struct zxdh_usrq *iwusrq,
+ info->srq_size = resp.actual_srq_size;
+ info->srq_list_size = resp.actual_srq_list_size;
+ zxdh_dbg(
+- verbs_get_ctx(iwusrq->ibv_srq.context), ZXDH_DBG_SRQ,
++ ZXDH_DBG_SRQ,
+ "%s info->srq_id:%d info->srq_size:%d info->srq_list_size:%d\n",
+ __func__, info->srq_id, info->srq_size, info->srq_list_size);
+
+@@ -2814,19 +2812,21 @@ static int zxdh_vmapped_srq(struct zxdh_usrq *iwusrq, struct ibv_pd *pd,
+ ret = zxdh_alloc_srq_buf(iwusrq, info, total_srq_size);
+ if (ret)
+ return -ENOMEM;
+- zxdh_dbg(verbs_get_ctx(iwusrq->ibv_srq.context), ZXDH_DBG_SRQ,
+- "%s srq_pages:%ld srq_list_pages:%ld\n", __func__, srq_pages,
+- srq_list_pages);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s srq_pages:%ld srq_list_pages:%ld\n",
++ __func__, srq_pages, srq_list_pages);
+
+ ret = zxdh_reg_srq_mr(pd, info, total_srq_size, srq_pages,
+ srq_list_pages, iwusrq);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s %d ret:%d\n", __func__, __LINE__, ret);
+ if (ret) {
+ errno = ret;
+ goto err_dereg_srq_mr;
+ }
+ ret = create_srq(pd, iwusrq, attr, info);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s %d ret:%d\n", __func__, __LINE__, ret);
+ if (ret)
+ goto err_srq;
++
+ return 0;
+ err_srq:
+ ibv_cmd_dereg_mr(&iwusrq->vmr);
+@@ -2907,8 +2907,7 @@ struct ibv_srq *zxdh_ucreate_srq(struct ibv_pd *pd,
+ dev_attrs = &iwvctx->dev_attrs;
+
+ if ((zxdh_check_srq_init_attr(srq_init_attr, dev_attrs)) != 0) {
+- verbs_err(&iwvctx->ibv_ctx,
+- "zxdh_check_srq_init_attr failed\n");
++ zxdh_dbg(ZXDH_DBG_SRQ, "zxdh_check_srq_init_attr failed\n");
+ errno = EINVAL;
+ return NULL;
+ }
+@@ -2922,12 +2921,12 @@ struct ibv_srq *zxdh_ucreate_srq(struct ibv_pd *pd,
+ srq_init_attr->attr.max_wr, srqshift,
+ &srqdepth);
+ zxdh_dbg(
+- &iwvctx->ibv_ctx, ZXDH_DBG_SRQ,
++ ZXDH_DBG_SRQ,
+ "%s %d status:%d srqshift:%d srqdepth:%d dev_attrs->max_hw_srq_quanta:%d srq_init_attr->attr.max_wr:%d\n",
+ __func__, __LINE__, status, srqshift, srqdepth,
+ dev_attrs->max_hw_srq_quanta, srq_init_attr->attr.max_wr);
+ if (status != 0) {
+- verbs_err(&iwvctx->ibv_ctx, "zxdh_get_srqdepth failed\n");
++ zxdh_dbg(ZXDH_DBG_SRQ, "zxdh_get_srqdepth failed\n");
+ errno = EINVAL;
+ return NULL;
+ }
+@@ -2940,19 +2939,21 @@ struct ibv_srq *zxdh_ucreate_srq(struct ibv_pd *pd,
+
+ if (zxdh_init_iwusrq(iwusrq, srq_init_attr, srqdepth, srqshift, &info,
+ dev_attrs)) {
+- verbs_err(&iwvctx->ibv_ctx, "calloc srq_wrid_array failed\n");
++ zxdh_dbg(ZXDH_DBG_SRQ, "calloc srq_wrid_array failed\n");
+ goto err_srq_wrid_array;
+ }
+ status = zxdh_vmapped_srq(iwusrq, pd, srq_init_attr, srqdepth, &info);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s %d status:%d\n", __func__, __LINE__, status);
+ if (status) {
+- verbs_err(&iwvctx->ibv_ctx, "zxdh_vmapped_srq failed\n");
++ zxdh_dbg(ZXDH_DBG_SRQ, "zxdh_vmapped_srq failed\n");
+ errno = status;
+ goto err_vmapped_srq;
+ }
+
+ status = zxdh_srq_init(&iwusrq->srq, &info);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s %d status:%d\n", __func__, __LINE__, status);
+ if (status) {
+- verbs_err(&iwvctx->ibv_ctx, "zxdh_srq_init failed\n");
++ zxdh_dbg(ZXDH_DBG_SRQ, "zxdh_srq_init failed\n");
+ errno = EINVAL;
+ goto err_free_srq_init;
+ }
+@@ -2960,9 +2961,8 @@ struct ibv_srq *zxdh_ucreate_srq(struct ibv_pd *pd,
+
+ srq_init_attr->attr.max_wr = (srqdepth - ZXDH_SRQ_RSVD) >> srqshift;
+
+- zxdh_dbg(&iwvctx->ibv_ctx, ZXDH_DBG_SRQ,
+- "iwusrq->srq_id:%d info.srq_size:%d\n", iwusrq->srq_id,
+- info.srq_size);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s iwusrq->srq_id:%d info.srq_size:%d\n",
++ __func__, iwusrq->srq_id, info.srq_size);
+ return &iwusrq->ibv_srq;
+
+ err_free_srq_init:
+@@ -2976,6 +2976,7 @@ err_srq_wrid_array:
+ errno = EINVAL;
+ err_free_srq:
+ free(iwusrq);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s %d\n", __func__, __LINE__);
+ return NULL;
+ }
+
+@@ -2996,8 +2997,8 @@ int zxdh_udestroy_srq(struct ibv_srq *srq)
+ ret = zxdh_destroy_vmapped_srq(iwusrq);
+ if (ret)
+ goto err;
+- zxdh_dbg(verbs_get_ctx(iwusrq->ibv_srq.context), ZXDH_DBG_SRQ,
+- "iwusrq->srq_id:%d\n", iwusrq->srq_id);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s iwusrq->srq_id:%d\n", __func__,
++ iwusrq->srq_id);
+ zxdh_free_hw_buf(iwusrq->srq.srq_base, iwusrq->total_buf_size);
+ free(iwusrq->srq.srq_wrid_array);
+ free(iwusrq);
+@@ -3024,9 +3025,8 @@ int zxdh_umodify_srq(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr,
+ sizeof(cmd));
+ if (ret == 0)
+ iwusrq->srq_limit = srq_attr->srq_limit;
+- zxdh_dbg(verbs_get_ctx(iwusrq->ibv_srq.context), ZXDH_DBG_SRQ,
+- "iwusrq->srq_id:%d srq_attr->srq_limit:%d\n", iwusrq->srq_id,
+- srq_attr->srq_limit);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s iwusrq->srq_id:%d srq_attr->srq_limit:%d\n",
++ __func__, iwusrq->srq_id, srq_attr->srq_limit);
+ return ret;
+ }
+
+@@ -3090,6 +3090,13 @@ static void zxdh_fill_srq_wqe(struct zxdh_usrq *iwusrq, struct zxdh_srq *srq,
+
+ udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
+ set_64bit_val(wqe_64, 0, hdr);
++
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s wqe_64[0]:0x%llx\n", __func__, wqe_64[0]);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s wqe_64[1]:0x%llx\n", __func__, wqe_64[1]);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s wqe_64[2]:0x%llx\n", __func__, wqe_64[2]);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s wqe_64[3]:0x%llx\n", __func__, wqe_64[3]);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s wqe_64[4]:0x%llx\n", __func__, wqe_64[4]);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s wqe_64[5]:0x%llx\n", __func__, wqe_64[5]);
+ }
+
+ static void zxdh_get_wqe_index(struct zxdh_srq *srq, __le16 *wqe_16, __u16 *buf,
+@@ -3112,6 +3119,7 @@ static void zxdh_update_srq_db_base(struct zxdh_usrq *iwusrq, __u16 idx)
+
+ udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
+ set_64bit_val(iwusrq->srq.srq_db_base, 0, hdr);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s srq_db_base(hdr):0x%llx\n", __func__, hdr);
+ }
+
+ /**
+@@ -3140,8 +3148,7 @@ int zxdh_upost_srq_recv(struct ibv_srq *srq, struct ibv_recv_wr *recv_wr,
+ buf_size = iwusrq->max_wr * sizeof(__u16);
+ buf = malloc(buf_size);
+ if (buf == NULL) {
+- verbs_err(verbs_get_ctx(iwusrq->ibv_srq.context),
+- "malloc buf_size failed\n");
++ zxdh_dbg(ZXDH_DBG_SRQ, "malloc buf_size failed\n");
+ err = -ENOMEM;
+ goto out;
+ }
+@@ -3161,9 +3168,8 @@ int zxdh_upost_srq_recv(struct ibv_srq *srq, struct ibv_recv_wr *recv_wr,
+ zxdh_fill_srq_wqe(iwusrq, hw_srq, wqe_64, recv_wr);
+ }
+
+- zxdh_dbg(verbs_get_ctx(iwusrq->ibv_srq.context), ZXDH_DBG_SRQ,
+- "nreq:%d err:%d iwusrq->srq_id:%d\n", nreq, err,
+- iwusrq->srq_id);
++ zxdh_dbg(ZXDH_DBG_SRQ, "%s nreq:%d err:%d iwusrq->srq_id:%d\n",
++ __func__, nreq, err, iwusrq->srq_id);
+
+ if (err == 0) {
+ zxdh_get_wqe_index(hw_srq, wqe_16, buf, nreq, &idx);
+--
+2.27.0
+
diff --git a/0051-libzrdma-Fix-capability-related-bugs.patch b/0051-libzrdma-Fix-capability-related-bugs.patch
new file mode 100644
index 0000000..643e186
--- /dev/null
+++ b/0051-libzrdma-Fix-capability-related-bugs.patch
@@ -0,0 +1,676 @@
+From 2db3c164aea36d297eb3db7c54804037c2754c80 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E6=9D=8E=E5=AF=8C=E8=89=B3?=
+Date: Fri, 28 Mar 2025 15:56:16 +0800
+Subject: [PATCH] libzrdma:Fix capability related bugs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: 李富艳
+---
+ providers/zrdma/zxdh_defs.h | 33 +-----
+ providers/zrdma/zxdh_hw.c | 47 ++++-----
+ providers/zrdma/zxdh_verbs.c | 196 +++++++++++++++++++++--------------
+ providers/zrdma/zxdh_verbs.h | 2 +-
+ 4 files changed, 141 insertions(+), 137 deletions(-)
+
+diff --git a/providers/zrdma/zxdh_defs.h b/providers/zrdma/zxdh_defs.h
+index 8772e7b..ec0bebe 100644
+--- a/providers/zrdma/zxdh_defs.h
++++ b/providers/zrdma/zxdh_defs.h
+@@ -41,7 +41,7 @@
+ #define ZXDH_SQ_WQE_BYTESIZE 32
+ #define ZXDH_SRQ_WQE_MIN_SIZE 16
+
+-#define ZXDH_SQ_RSVD 258
++#define ZXDH_SQ_RSVD 1
+ #define ZXDH_RQ_RSVD 1
+ #define ZXDH_SRQ_RSVD 1
+
+@@ -252,29 +252,7 @@
+ (_retcode) = ZXDH_ERR_RING_FULL; \
+ } \
+ }
+-#define ZXDH_SQ_RING_MOVE_HEAD(_ring, _retcode) \
+- { \
+- register __u32 size; \
+- size = (_ring).size; \
+- if (!ZXDH_SQ_RING_FULL_ERR(_ring)) { \
+- (_ring).head = ((_ring).head + 1) % size; \
+- (_retcode) = 0; \
+- } else { \
+- (_retcode) = ZXDH_ERR_RING_FULL; \
+- } \
+- }
+-#define ZXDH_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
+- { \
+- register __u32 size; \
+- size = (_ring).size; \
+- if ((ZXDH_RING_USED_QUANTA(_ring) + (_count)) < \
+- (size - 256)) { \
+- (_ring).head = ((_ring).head + (_count)) % size; \
+- (_retcode) = 0; \
+- } else { \
+- (_retcode) = ZXDH_ERR_RING_FULL; \
+- } \
+- }
++
+ #define ZXDH_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \
+ (_ring).head = ((_ring).head + (_count)) % (_ring).size
+
+@@ -298,13 +276,6 @@
+ #define ZXDH_ERR_RING_FULL3(_ring) \
+ ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 3)))
+
+-#define ZXDH_SQ_RING_FULL_ERR(_ring) \
+- ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 257)))
+-
+-#define ZXDH_ERR_SQ_RING_FULL2(_ring) \
+- ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 258)))
+-#define ZXDH_ERR_SQ_RING_FULL3(_ring) \
+- ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 259)))
+ #define ZXDH_RING_MORE_WORK(_ring) ((ZXDH_RING_USED_QUANTA(_ring) != 0))
+
+ #define ZXDH_RING_USED_QUANTA(_ring) \
+diff --git a/providers/zrdma/zxdh_hw.c b/providers/zrdma/zxdh_hw.c
+index fb8f016..0ea5a85 100644
+--- a/providers/zrdma/zxdh_hw.c
++++ b/providers/zrdma/zxdh_hw.c
+@@ -10,14 +10,6 @@
+ #include
+ #include
+ #include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+ #define ERROR_CODE_VALUE 65
+
+ static inline void qp_tx_psn_add(__u32 *x, __u32 y, __u16 mtu)
+@@ -30,11 +22,17 @@ static inline void qp_tx_psn_add(__u32 *x, __u32 y, __u16 mtu)
+ *x = (*x + chunks) & 0xffffff;
+ }
+
+-int zxdh_get_write_imm_split_switch(void)
++/**
++ * zxdh_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
++ * @frag_cnt: number of fragments
++ * @quanta: quanta for frag_cnt
++ */
++static inline enum zxdh_status_code zxdh_fragcnt_to_quanta_sq(__u32 frag_cnt, __u16 *quanta)
+ {
+- char *env;
+- env = getenv("ZXDH_WRITE_IMM_SPILT_ENABLE");
+- return (env != NULL) ? atoi(env) : 0;
++ if (unlikely(frag_cnt > ZXDH_MAX_SQ_FRAG))
++ return ZXDH_ERR_INVALID_FRAG_COUNT;
++ *quanta = (frag_cnt >> 1) + 1;
++ return 0;
+ }
+
+ /**
+@@ -2153,6 +2151,9 @@ int zxdh_qp_round_up(__u32 wqdepth)
+ {
+ int scount = 1;
+
++ if (wqdepth == 0)
++ return 0;
++
+ for (wqdepth--; scount <= 16; scount *= 2)
+ wqdepth |= wqdepth >> scount;
+
+@@ -2167,6 +2168,9 @@ int zxdh_cq_round_up(__u32 wqdepth)
+ {
+ int scount = 1;
+
++ if (wqdepth == 0)
++ return 0;
++
+ for (wqdepth--; scount <= 16; scount *= 2)
+ wqdepth |= wqdepth >> scount;
+
+@@ -2364,7 +2368,7 @@ enum zxdh_status_code zxdh_cq_init(struct zxdh_cq *cq,
+ void zxdh_clean_cq(void *q, struct zxdh_cq *cq)
+ {
+ __le64 *cqe;
+- __u64 qword3, comp_ctx;
++ __u64 qword0, comp_ctx;
+ __u32 cq_head;
+ __u8 polarity, temp;
+
+@@ -2377,8 +2381,8 @@ void zxdh_clean_cq(void *q, struct zxdh_cq *cq)
+ .buf;
+ else
+ cqe = cq->cq_base[cq_head].buf;
+- get_64bit_val(cqe, 24, &qword3);
+- polarity = (__u8)FIELD_GET(ZXDH_CQ_VALID, qword3);
++ get_64bit_val(cqe, 0, &qword0);
++ polarity = (__u8)FIELD_GET(ZXDH_CQ_VALID, qword0);
+
+ if (polarity != temp)
+ break;
+@@ -2432,19 +2436,6 @@ enum zxdh_status_code zxdh_nop(struct zxdh_qp *qp, __u64 wr_id, bool signaled,
+ return 0;
+ }
+
+-/**
+- * zxdh_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
+- * @frag_cnt: number of fragments
+- * @quanta: quanta for frag_cnt
+- */
+-enum zxdh_status_code zxdh_fragcnt_to_quanta_sq(__u32 frag_cnt, __u16 *quanta)
+-{
+- if (frag_cnt > ZXDH_MAX_SQ_FRAG)
+- return ZXDH_ERR_INVALID_FRAG_COUNT;
+- *quanta = frag_cnt / 2 + 1;
+- return 0;
+-}
+-
+ /**
+ * zxdh_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
+ * @frag_cnt: number of fragments
+diff --git a/providers/zrdma/zxdh_verbs.c b/providers/zrdma/zxdh_verbs.c
+index 9cf1240..39ff401 100644
+--- a/providers/zrdma/zxdh_verbs.c
++++ b/providers/zrdma/zxdh_verbs.c
+@@ -59,6 +59,7 @@ static int zxdh_get_inline_data(uint8_t *inline_data, struct ibv_send_wr *ib_wr,
+ while (num < ib_wr->num_sge) {
+ *len += ib_wr->sg_list[num].length;
+ if (*len > ZXDH_MAX_INLINE_DATA_SIZE) {
++ printf("err:inline bytes over max inline length\n");
+ return -EINVAL;
+ }
+ memcpy(inline_data + offset,
+@@ -343,12 +344,8 @@ static void zxdh_free_hw_buf(void *buf, size_t size)
+ */
+ static inline int get_cq_size(int ncqe)
+ {
+- ncqe++;
+-
+- /* Completions with immediate require 1 extra entry */
+ if (ncqe < ZXDH_U_MINCQ_SIZE)
+ ncqe = ZXDH_U_MINCQ_SIZE;
+-
+ return ncqe;
+ }
+
+@@ -380,6 +377,7 @@ static struct ibv_cq_ex *ucreate_cq(struct ibv_context *context,
+ size_t total_size;
+ __u32 cq_pages;
+ int ret, ncqe;
++ __u64 resize_supported;
+
+ iwvctx = container_of(context, struct zxdh_uvcontext, ibv_ctx.context);
+ dev_attrs = &iwvctx->dev_attrs;
+@@ -390,6 +388,13 @@ static struct ibv_cq_ex *ucreate_cq(struct ibv_context *context,
+ return NULL;
+ }
+
++ info.cq_size = get_cq_size(attr_ex->cqe);
++ info.cq_size = zxdh_cq_round_up(info.cq_size);
++ if (info.cq_size > dev_attrs->max_hw_cq_size) {
++ errno = EINVAL;
++ return NULL;
++ }
++
+ /* save the cqe requested by application */
+ ncqe = attr_ex->cqe;
+ iwucq = calloc(1, sizeof(*iwucq));
+@@ -404,14 +409,13 @@ static struct ibv_cq_ex *ucreate_cq(struct ibv_context *context,
+ }
+
+ iwucq->resize_enable = false;
+- info.cq_size = get_cq_size(attr_ex->cqe);
+- info.cq_size = zxdh_cq_round_up(info.cq_size);
+ iwucq->comp_vector = attr_ex->comp_vector;
+ list_head_init(&iwucq->resize_list);
+ total_size = get_cq_total_bytes(info.cq_size);
+ cq_pages = total_size >> ZXDH_HW_PAGE_SHIFT;
++ resize_supported = dev_attrs->feature_flags & ZXDH_FEATURE_CQ_RESIZE;
+
+- if (!(dev_attrs->feature_flags & ZXDH_FEATURE_CQ_RESIZE))
++ if (!resize_supported)
+ total_size = (cq_pages << ZXDH_HW_PAGE_SHIFT) +
+ ZXDH_DB_SHADOW_AREA_SIZE;
+
+@@ -436,7 +440,7 @@ static struct ibv_cq_ex *ucreate_cq(struct ibv_context *context,
+
+ iwucq->vmr.ibv_mr.pd = &iwvctx->iwupd->ibv_pd;
+
+- if (dev_attrs->feature_flags & ZXDH_FEATURE_CQ_RESIZE) {
++ if (resize_supported) {
+ info.shadow_area = zxdh_alloc_hw_buf(ZXDH_DB_SHADOW_AREA_SIZE);
+ if (!info.shadow_area)
+ goto err_dereg_mr;
+@@ -457,7 +461,6 @@ static struct ibv_cq_ex *ucreate_cq(struct ibv_context *context,
+ }
+
+ iwucq->vmr_shadow_area.ibv_mr.pd = &iwvctx->iwupd->ibv_pd;
+-
+ } else {
+ info.shadow_area = (__le64 *)((__u8 *)info.cq_base +
+ (cq_pages << ZXDH_HW_PAGE_SHIFT));
+@@ -491,7 +494,9 @@ err_dereg_shadow:
+ ibv_cmd_dereg_mr(&iwucq->vmr);
+ if (iwucq->vmr_shadow_area.ibv_mr.handle) {
+ ibv_cmd_dereg_mr(&iwucq->vmr_shadow_area);
+- zxdh_free_hw_buf(info.shadow_area, ZXDH_DB_SHADOW_AREA_SIZE);
++ if (resize_supported)
++ zxdh_free_hw_buf(info.shadow_area,
++ ZXDH_DB_SHADOW_AREA_SIZE);
+ }
+ err_dereg_mr:
+ zxdh_free_hw_buf(info.cq_base, total_size);
+@@ -553,7 +558,7 @@ static int zxdh_process_resize_list(struct zxdh_ucq *iwucq,
+ struct zxdh_cq_buf *cq_buf, *next;
+ int cq_cnt = 0;
+
+- list_for_each_safe (&iwucq->resize_list, cq_buf, next, list) {
++ list_for_each_safe(&iwucq->resize_list, cq_buf, next, list) {
+ if (cq_buf == lcqe_buf)
+ return cq_cnt;
+
+@@ -774,7 +779,8 @@ static inline void zxdh_process_cqe(struct ibv_wc *entry,
+ *
+ * Returns the internal zxdh device error code or 0 on success
+ */
+-static int zxdh_poll_one(struct zxdh_cq *cq, struct zxdh_cq_poll_info *cur_cqe,
++static int zxdh_poll_one(struct zxdh_cq *cq,
++ struct zxdh_cq_poll_info *cur_cqe,
+ struct ibv_wc *entry)
+ {
+ int ret = zxdh_cq_poll_cmpl(cq, cur_cqe);
+@@ -811,7 +817,7 @@ static int __zxdh_upoll_resize_cq(struct zxdh_ucq *iwucq, int num_entries,
+ int ret;
+
+ /* go through the list of previously resized CQ buffers */
+- list_for_each_safe (&iwucq->resize_list, cq_buf, next, list) {
++ list_for_each_safe(&iwucq->resize_list, cq_buf, next, list) {
+ while (npolled < num_entries) {
+ ret = zxdh_poll_one(&cq_buf->cq, cur_cqe,
+ entry ? entry + npolled : NULL);
+@@ -829,6 +835,7 @@ static int __zxdh_upoll_resize_cq(struct zxdh_ucq *iwucq, int num_entries,
+ cq_new_cqe = true;
+ continue;
+ }
++ printf("__zrdma_upoll_cq resize goto error failed\n");
+ goto error;
+ }
+
+@@ -856,6 +863,7 @@ static int __zxdh_upoll_resize_cq(struct zxdh_ucq *iwucq, int num_entries,
+ cq_new_cqe = true;
+ continue;
+ }
++ printf("__zrdma_upoll_cq goto error failed\n");
+ goto error;
+ }
+ if (cq_new_cqe)
+@@ -1038,7 +1046,7 @@ static uint64_t zxdh_wc_read_completion_wallclock_ns(struct ibv_cq_ex *ibvcq_ex)
+ container_of(ibvcq_ex, struct zxdh_ucq, verbs_cq.cq_ex);
+
+ /* RTT is in usec */
+- return iwucq->cur_cqe.tcp_seq_num_rtt * 1000;
++ return (uint64_t)iwucq->cur_cqe.tcp_seq_num_rtt * 1000;
+ }
+
+ static enum ibv_wc_opcode zxdh_wc_read_opcode(struct ibv_cq_ex *ibvcq_ex)
+@@ -1682,6 +1690,37 @@ int zxdh_uquery_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask,
+ sizeof(cmd));
+ }
+
++/**
++ * zxdh_clean_cqes - clean cq entries for qp
++ * @qp: qp for which completions are cleaned
++ * @iwcq: cq to be cleaned
++ */
++static void zxdh_clean_cqes(struct zxdh_qp *qp, struct zxdh_ucq *iwucq)
++{
++ struct zxdh_cq *ukcq = &iwucq->cq;
++ int ret;
++
++ ret = pthread_spin_lock(&iwucq->lock);
++ if (ret)
++ return;
++
++ zxdh_clean_cq(qp, ukcq);
++ pthread_spin_unlock(&iwucq->lock);
++}
++
++static void zxdh_init_qp_indices(struct zxdh_qp *qp)
++{
++ __u32 sq_ring_size;
++ sq_ring_size = ZXDH_RING_SIZE(qp->sq_ring);
++ ZXDH_RING_INIT(qp->sq_ring, sq_ring_size);
++ ZXDH_RING_INIT(qp->initial_ring, sq_ring_size);
++ qp->swqe_polarity = 0;
++ qp->swqe_polarity_deferred = 1;
++ qp->rwqe_polarity = 0;
++ qp->rwqe_signature = 0;
++ ZXDH_RING_INIT(qp->rq_ring, qp->rq_size);
++}
++
+ /**
+ * zxdh_umodify_qp - send qp modify to driver
+ * @qp: qp to modify
+@@ -1705,6 +1744,18 @@ int zxdh_umodify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)
+ } else {
+ ret = ibv_cmd_modify_qp(qp, attr, attr_mask, &cmd, sizeof(cmd));
+ }
++
++ if (!ret &&
++ (attr_mask & IBV_QP_STATE) &&
++ attr->qp_state == IBV_QPS_RESET) {
++ if (iwuqp->send_cq)
++ zxdh_clean_cqes(&iwuqp->qp, iwuqp->send_cq);
++
++ if (iwuqp->recv_cq && iwuqp->recv_cq != iwuqp->send_cq)
++ zxdh_clean_cqes(&iwuqp->qp, iwuqp->recv_cq);
++ zxdh_init_qp_indices(&iwuqp->qp);
++ }
++
+ if (!ret && (attr_mask & IBV_QP_PATH_MTU) &&
+ qp->qp_type == IBV_QPT_RC) {
+ mtu = mtu_enum_to_int(attr->path_mtu);
+@@ -1736,24 +1787,6 @@ static void zxdh_issue_flush(struct ibv_qp *qp, bool sq_flush, bool rq_flush)
+ sizeof(cmd_ex), &resp, sizeof(resp));
+ }
+
+-/**
+- * zxdh_clean_cqes - clean cq entries for qp
+- * @qp: qp for which completions are cleaned
+- * @iwcq: cq to be cleaned
+- */
+-static void zxdh_clean_cqes(struct zxdh_qp *qp, struct zxdh_ucq *iwucq)
+-{
+- struct zxdh_cq *cq = &iwucq->cq;
+- int ret;
+-
+- ret = pthread_spin_lock(&iwucq->lock);
+- if (ret)
+- return;
+-
+- zxdh_clean_cq(qp, cq);
+- pthread_spin_unlock(&iwucq->lock);
+-}
+-
+ /**
+ * zxdh_udestroy_qp - destroy qp
+ * @qp: qp to destroy
+@@ -1851,16 +1884,10 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
+ struct zxdh_umr *umr = NULL;
+ __u64 mr_va = 0, mw_va = 0, value_dffer = 0, mw_pa_pble_index = 0;
+ __u16 mr_offset = 0;
+- iwvctx = container_of(ib_qp->context, struct zxdh_uvcontext,
+- ibv_ctx.context);
+- if (ib_qp->state != IBV_QPS_RTS) {
+- *bad_wr = ib_wr;
+- verbs_err(&iwvctx->ibv_ctx, "zrdma: post send at state:%d\n",
+- ib_qp->state);
+- return -EINVAL;
+- }
+
+ iwuqp = container_of(ib_qp, struct zxdh_uqp, vqp.qp);
++ iwvctx = container_of(ib_qp->context, struct zxdh_uvcontext,
++ ibv_ctx.context);
+ dev_attrs = &iwvctx->dev_attrs;
+
+ err = pthread_spin_lock(&iwuqp->lock);
+@@ -1918,9 +1945,7 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
+ iwuqp->inline_data, ib_wr,
+ &info.op.inline_rdma_send.len);
+ if (ret) {
+- verbs_err(
+- &iwvctx->ibv_ctx,
+- "zrdma: get inline data fail\n");
++ printf("err:zxdh_get_inline_data fail\n");
+ pthread_spin_unlock(&iwuqp->lock);
+ return -EINVAL;
+ }
+@@ -1937,11 +1962,11 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
+ ib_wr->wr.ud.remote_qkey;
+ info.op.inline_rdma_send.dest_qp =
+ ib_wr->wr.ud.remote_qpn;
+- ret = zxdh_ud_inline_send(&iwuqp->qp,
+- &info, false);
++ ret = zxdh_ud_inline_send(
++ &iwuqp->qp, &info, false);
+ } else {
+- ret = zxdh_rc_inline_send(&iwuqp->qp,
+- &info, false);
++ ret = zxdh_rc_inline_send(
++ &iwuqp->qp, &info, false);
+ }
+ } else {
+ info.op.send.num_sges = ib_wr->num_sge;
+@@ -1960,10 +1985,10 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
+ info.op.inline_rdma_send.dest_qp =
+ ib_wr->wr.ud.remote_qpn;
+ ret = zxdh_ud_send(&iwuqp->qp, &info,
+- false);
++ false);
+ } else {
+ ret = zxdh_rc_send(&iwuqp->qp, &info,
+- false);
++ false);
+ }
+ }
+ if (ret)
+@@ -1995,9 +2020,7 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
+ iwuqp->inline_data, ib_wr,
+ &info.op.inline_rdma_write.len);
+ if (ret) {
+- verbs_err(
+- &iwvctx->ibv_ctx,
+- "zrdma: get inline data fail\n");
++ printf("err:zxdh_get_inline_data fail\n");
+ pthread_spin_unlock(&iwuqp->lock);
+ return -EINVAL;
+ }
+@@ -2007,8 +2030,8 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
+ ib_wr->wr.rdma.remote_addr;
+ info.op.inline_rdma_write.rem_addr.stag =
+ ib_wr->wr.rdma.rkey;
+- ret = zxdh_inline_rdma_write(&iwuqp->qp, &info,
+- false);
++ ret = zxdh_inline_rdma_write(&iwuqp->qp,
++ &info, false);
+ } else {
+ info.op.rdma_write.lo_sg_list =
+ (void *)ib_wr->sg_list;
+@@ -2017,7 +2040,8 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
+ ib_wr->wr.rdma.remote_addr;
+ info.op.rdma_write.rem_addr.stag =
+ ib_wr->wr.rdma.rkey;
+- ret = zxdh_rdma_write(&iwuqp->qp, &info, false);
++ ret = zxdh_rdma_write(&iwuqp->qp, &info,
++ false);
+ }
+ if (ret)
+ err = (ret == ZXDH_ERR_QP_TOOMANY_WRS_POSTED) ?
+@@ -2036,7 +2060,8 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
+
+ info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
+ info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
+- ret = zxdh_rdma_read(&iwuqp->qp, &info, false, false);
++ ret = zxdh_rdma_read(&iwuqp->qp, &info, false,
++ false);
+ if (ret)
+ err = (ret == ZXDH_ERR_QP_TOOMANY_WRS_POSTED) ?
+ ENOMEM :
+@@ -2383,20 +2408,17 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
+
+ ret = zxdh_mw_bind(&iwuqp->qp, &info, false);
+ if (ret)
+- err = (ret == ZXDH_ERR_QP_TOOMANY_WRS_POSTED) ?
+- ENOMEM :
+- EINVAL;
++ err = ENOMEM;
++
+ break;
+ case IBV_WR_LOCAL_INV:
+ info.op_type = ZXDH_OP_TYPE_LOCAL_INV;
+ info.op.inv_local_stag.target_stag =
+ ib_wr->invalidate_rkey;
+ ret = zxdh_stag_local_invalidate(&iwuqp->qp, &info,
+- true);
++ true);
+ if (ret)
+- err = (ret == ZXDH_ERR_QP_TOOMANY_WRS_POSTED) ?
+- ENOMEM :
+- EINVAL;
++ err = ENOMEM;
+ break;
+ default:
+ /* error */
+@@ -2441,6 +2463,7 @@ int zxdh_upost_recv(struct ibv_qp *ib_qp, struct ibv_recv_wr *ib_wr,
+
+ if (unlikely(ib_qp->state == IBV_QPS_RESET || ib_qp->srq)) {
+ *bad_wr = ib_wr;
++ printf("err:post recv at reset or using srq\n");
+ return -EINVAL;
+ }
+
+@@ -2490,9 +2513,18 @@ error:
+ struct ibv_ah *zxdh_ucreate_ah(struct ibv_pd *ibpd, struct ibv_ah_attr *attr)
+ {
+ struct zxdh_uah *ah;
++ union ibv_gid sgid;
+ struct zxdh_ucreate_ah_resp resp;
+ int err;
+
++ memset(&resp, 0, sizeof(resp));
++ err = ibv_query_gid(ibpd->context, attr->port_num, attr->grh.sgid_index,
++ &sgid);
++ if (err) {
++ errno = err;
++ return NULL;
++ }
++
+ ah = calloc(1, sizeof(*ah));
+ if (!ah)
+ return NULL;
+@@ -2584,10 +2616,10 @@ int zxdh_uresize_cq(struct ibv_cq *cq, int cqe)
+ if (!(dev_attrs->feature_flags & ZXDH_FEATURE_CQ_RESIZE))
+ return -EOPNOTSUPP;
+
+- if (cqe > ZXDH_MAX_CQ_SIZE)
++ if (cqe > dev_attrs->max_hw_cq_size)
+ return -EINVAL;
+
+- cqe_needed = zxdh_cq_round_up(cqe + 1);
++ cqe_needed = zxdh_cq_round_up(cqe);
+
+ if (cqe_needed < ZXDH_U_MINCQ_SIZE)
+ cqe_needed = ZXDH_U_MINCQ_SIZE;
+@@ -2609,6 +2641,10 @@ int zxdh_uresize_cq(struct ibv_cq *cq, int cqe)
+ goto err_buf;
+ }
+
++ ret = pthread_spin_lock(&iwucq->lock);
++ if (ret)
++ goto err_lock;
++
+ new_mr.ibv_mr.pd = iwucq->vmr.ibv_mr.pd;
+ reg_mr_cmd.reg_type = ZXDH_MEMREG_TYPE_CQ;
+ reg_mr_cmd.cq_pages = cq_pages;
+@@ -2620,10 +2656,6 @@ int zxdh_uresize_cq(struct ibv_cq *cq, int cqe)
+ if (ret)
+ goto err_dereg_mr;
+
+- ret = pthread_spin_lock(&iwucq->lock);
+- if (ret)
+- goto err_lock;
+-
+ cmd.user_cq_buffer = (__u64)((uintptr_t)cq_base);
+ ret = ibv_cmd_resize_cq(&iwucq->verbs_cq.cq, cqe_needed, &cmd.ibv_cmd,
+ sizeof(cmd), &resp, sizeof(resp));
+@@ -2642,10 +2674,10 @@ int zxdh_uresize_cq(struct ibv_cq *cq, int cqe)
+ return ret;
+
+ err_resize:
+- pthread_spin_unlock(&iwucq->lock);
+-err_lock:
+ ibv_cmd_dereg_mr(&new_mr);
+ err_dereg_mr:
++ pthread_spin_unlock(&iwucq->lock);
++err_lock:
+ free(cq_buf);
+ err_buf:
+ zxdh_free_hw_buf(cq_base, cq_size);
+@@ -2735,7 +2767,8 @@ static int zxdh_alloc_srq_buf(struct zxdh_usrq *iwusrq,
+ return 0;
+ }
+
+-static int zxdh_reg_srq_mr(struct ibv_pd *pd, struct zxdh_srq_init_info *info,
++static int zxdh_reg_srq_mr(struct ibv_pd *pd,
++ struct zxdh_srq_init_info *info,
+ size_t total_srq_size, uint16_t srq_pages,
+ uint16_t srq_list_pages, struct zxdh_usrq *iwusrq)
+ {
+@@ -3043,7 +3076,8 @@ int zxdh_uquery_srq(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr)
+ }
+
+ static int zxdh_check_srq_valid(struct ibv_recv_wr *recv_wr,
+- struct zxdh_usrq *iwusrq, struct zxdh_srq *srq)
++ struct zxdh_usrq *iwusrq,
++ struct zxdh_srq *srq)
+ {
+ if (unlikely(recv_wr->num_sge > iwusrq->max_sge))
+ return -EINVAL;
+@@ -3054,8 +3088,9 @@ static int zxdh_check_srq_valid(struct ibv_recv_wr *recv_wr,
+ return 0;
+ }
+
+-static void zxdh_fill_srq_wqe(struct zxdh_usrq *iwusrq, struct zxdh_srq *srq,
+- __le64 *wqe_64, struct ibv_recv_wr *recv_wr)
++static void zxdh_fill_srq_wqe(struct zxdh_usrq *iwusrq,
++ struct zxdh_srq *srq, __le64 *wqe_64,
++ struct ibv_recv_wr *recv_wr)
+ {
+ __u32 byte_off;
+ int i;
+@@ -3099,8 +3134,8 @@ static void zxdh_fill_srq_wqe(struct zxdh_usrq *iwusrq, struct zxdh_srq *srq,
+ zxdh_dbg(ZXDH_DBG_SRQ, "%s wqe_64[5]:0x%llx\n", __func__, wqe_64[5]);
+ }
+
+-static void zxdh_get_wqe_index(struct zxdh_srq *srq, __le16 *wqe_16, __u16 *buf,
+- __u16 nreq, __u16 *idx)
++static void zxdh_get_wqe_index(struct zxdh_srq *srq, __le16 *wqe_16,
++ __u16 *buf, __u16 nreq, __u16 *idx)
+ {
+ int i;
+
+@@ -3207,3 +3242,10 @@ void zxdh_set_debug_mask(void)
+ if (env)
+ zxdh_debug_mask = strtol(env, NULL, 0);
+ }
++
++int zxdh_get_write_imm_split_switch(void)
++{
++ char *env;
++ env = getenv("ZXDH_WRITE_IMM_SPILT_ENABLE");
++ return (env != NULL) ? atoi(env) : 0;
++}
+diff --git a/providers/zrdma/zxdh_verbs.h b/providers/zrdma/zxdh_verbs.h
+index e3974c1..b72fa74 100644
+--- a/providers/zrdma/zxdh_verbs.h
++++ b/providers/zrdma/zxdh_verbs.h
+@@ -661,10 +661,10 @@ __le64 *zxdh_qp_get_next_recv_wqe(struct zxdh_qp *qp, __u32 *wqe_idx);
+ void zxdh_clean_cq(void *q, struct zxdh_cq *cq);
+ enum zxdh_status_code zxdh_nop(struct zxdh_qp *qp, __u64 wr_id, bool signaled,
+ bool post_sq);
+-enum zxdh_status_code zxdh_fragcnt_to_quanta_sq(__u32 frag_cnt, __u16 *quanta);
+ enum zxdh_status_code zxdh_fragcnt_to_wqesize_rq(__u32 frag_cnt,
+ __u16 *wqe_size);
+ void zxdh_get_sq_wqe_shift(__u32 sge, __u32 inline_data, __u8 *shift);
++
+ void zxdh_get_rq_wqe_shift(__u32 sge, __u8 *shift);
+ enum zxdh_status_code zxdh_get_sqdepth(struct zxdh_dev_attrs *dev_attrs,
+ __u32 sq_size, __u8 shift,
+--
+2.27.0
+
diff --git a/rdma-core.spec b/rdma-core.spec
index 0edb02b..f98780b 100644
--- a/rdma-core.spec
+++ b/rdma-core.spec
@@ -1,6 +1,6 @@
Name: rdma-core
Version: 50.0
-Release: 23
+Release: 24
Summary: RDMA core userspace libraries and daemons
License: GPL-2.0-only OR BSD-2-Clause AND BSD-3-Clause
Url: https://github.com/linux-rdma/rdma-core
@@ -52,6 +52,11 @@ patch43: 0043-libhns-Fix-missing-fields-for-SRQ-WC.patch
patch44: 0044-libxscale-Add-Yunsilicon-User-Space-RDMA-Driver.patch
patch45: 0045-libhns-fix-incorrectly-using-fixed-pagesize.patch
patch46: 0046-libhns-fix-missing-new-IO-support-for-DCA.patch
+patch47: 0047-libzrdma-Fix-wqe-polarity-set-error.patch
+patch48: 0048-libzrdma-Add-interface-aligned-with-kernel.patch
+patch49: 0049-libzrdma-Add-poll-cqe-error-to-Failed-status.patch
+patch50: 0050-libzrdma-Add-sq-rq-flush-cqe-and-log-optimization.patch
+patch51: 0051-libzrdma-Fix-capability-related-bugs.patch
BuildRequires: binutils cmake >= 2.8.11 gcc libudev-devel pkgconfig pkgconfig(libnl-3.0)
BuildRequires: pkgconfig(libnl-route-3.0) systemd systemd-devel
@@ -631,6 +636,12 @@ fi
%doc %{_docdir}/%{name}-%{version}/70-persistent-ipoib.rules
%changelog
+* Sat Mar 29 2025 Li Fuyan - 50.0-24
+- Type: requirement
+- ID: NA
+- SUG: NA
+- DESC: fix some libzrdma bugs and add some optimization
+
* Tue Mar 11 2025 Xinghai Cen - 50.0-23
- Type: bugfix
- ID: NA
--
Gitee