diff --git a/0099-libhns-Fix-double-free-of-rinl-buf-wqe-list.patch b/0099-libhns-Fix-double-free-of-rinl-buf-wqe-list.patch new file mode 100644 index 0000000000000000000000000000000000000000..042c03d687301d13a636ac6a47a45ea2a9e2ef5c --- /dev/null +++ b/0099-libhns-Fix-double-free-of-rinl-buf-wqe-list.patch @@ -0,0 +1,48 @@ +From 0a14854f63540a745fcda95872d4ae0298bbc5f0 Mon Sep 17 00:00:00 2001 +From: wenglianfa +Date: Mon, 26 May 2025 21:20:29 +0800 +Subject: [PATCH 099/105] libhns: Fix double-free of rinl buf->wqe list + +rinl_buf->wqe_list will be double-freed in error flow, first in +alloc_recv_rinl_buf() and then in free_recv_rinl_buf(). Actually +free_recv_rinl_buf() shouldn't be called when alloc_recv_rinl_buf() +failed. + +Fixes: 83b0baff3ccf ("libhns: Refactor rq inline") +Signed-off-by: wenglianfa +Signed-off-by: Junxian Huang +--- + providers/hns/hns_roce_u_verbs.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c +index 7418d2c..7d83a33 100644 +--- a/providers/hns/hns_roce_u_verbs.c ++++ b/providers/hns/hns_roce_u_verbs.c +@@ -1658,18 +1658,20 @@ static int qp_alloc_wqe(struct ibv_qp_init_attr_ex *attr, + qp->dca_wqe.shift = qp->pageshift; + qp->dca_wqe.bufs = calloc(qp->dca_wqe.max_cnt, sizeof(void *)); + if (!qp->dca_wqe.bufs) +- goto err_alloc; ++ goto err_alloc_recv_rinl_buf; + verbs_debug(&ctx->ibv_ctx, "alloc DCA buf.\n"); + } else { + if (hns_roce_alloc_buf(&qp->buf, qp->buf_size, + 1 << qp->pageshift)) +- goto err_alloc; ++ goto err_alloc_recv_rinl_buf; + } + + return 0; + +-err_alloc: ++err_alloc_recv_rinl_buf: + free_recv_rinl_buf(&qp->rq_rinl_buf); ++ ++err_alloc: + if (qp->rq.wrid) + free(qp->rq.wrid); + +-- +2.33.0 + diff --git a/0100-libhns-Fix-ret-not-assigned-in-create-srq.patch b/0100-libhns-Fix-ret-not-assigned-in-create-srq.patch new file mode 100644 index 0000000000000000000000000000000000000000..fa05de9719ebf16e13c64735833ffe37e326f5b8 --- /dev/null +++ b/0100-libhns-Fix-ret-not-assigned-in-create-srq.patch @@ -0,0 +1,46 @@ +From 138d2d80aea27adea77fee042ba6107adaee8687 Mon Sep 17 00:00:00 2001 +From: Junxian Huang +Date: Wed, 23 Apr 2025 16:55:14 +0800 +Subject: [PATCH 100/105] libhns: Fix ret not assigned in create srq() + +Fix the problem that ret may not be assigned in the error flow +of create_srq(). + +Fixes: b38bae4b5b9e ("libhns: Add support for lock-free SRQ") +Fixes: b914c76318f5 ("libhns: Refactor the process of create_srq") +Signed-off-by: Junxian Huang +--- + providers/hns/hns_roce_u_verbs.c | 10 +++++++--- + 1 file changed, 7 insertions(+), 3 deletions(-) + +diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c +index 7d83a33..3a1c40e 100644 +--- a/providers/hns/hns_roce_u_verbs.c ++++ b/providers/hns/hns_roce_u_verbs.c +@@ -1070,16 +1070,20 @@ static struct ibv_srq *create_srq(struct ibv_context *context, + goto err; + } + +- if (hns_roce_srq_spinlock_init(context, srq, init_attr)) ++ ret = hns_roce_srq_spinlock_init(context, srq, init_attr); ++ if (ret) + goto err_free_srq; + + set_srq_param(context, srq, init_attr); +- if (alloc_srq_buf(srq)) ++ ret = alloc_srq_buf(srq); ++ if (ret) + goto err_destroy_lock; + + srq->rdb = hns_roce_alloc_db(hr_ctx, HNS_ROCE_SRQ_TYPE_DB); +- if (!srq->rdb) ++ if (!srq->rdb) { ++ ret = ENOMEM; + goto err_srq_buf; ++ } + + ret = exec_srq_create_cmd(context, srq, init_attr); + if (ret) +-- +2.33.0 + diff --git a/0101-libhns-Add-error-logs-to-help-diagnosis.patch b/0101-libhns-Add-error-logs-to-help-diagnosis.patch new file mode 100644 index 0000000000000000000000000000000000000000..1edde090ea41a2fd13d2aa70cad014a1652af6e4 --- /dev/null +++ b/0101-libhns-Add-error-logs-to-help-diagnosis.patch @@ -0,0 +1,240 @@ +From b9513a369315c7d5c56b19b468369f1a6025d45f Mon Sep 17 00:00:00 2001 +From: Junxian Huang +Date: Fri, 27 Dec 2024 14:02:29 +0800 +Subject: [PATCH 101/105] libhns: Add error logs to help diagnosis + +Add error logs to help diagnosis. + +Signed-off-by: Junxian Huang +--- + providers/hns/hns_roce_u.c | 4 +- + providers/hns/hns_roce_u_hw_v2.c | 3 ++ + providers/hns/hns_roce_u_verbs.c | 87 +++++++++++++++++++++++++------- + 3 files changed, 74 insertions(+), 20 deletions(-) + +diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c +index dfcd798..32a73c7 100644 +--- a/providers/hns/hns_roce_u.c ++++ b/providers/hns/hns_roce_u.c +@@ -268,8 +268,10 @@ static int hns_roce_mmap(struct hns_roce_device *hr_dev, + + context->uar = mmap(NULL, page_size, PROT_READ | PROT_WRITE, + MAP_SHARED, cmd_fd, 0); +- if (context->uar == MAP_FAILED) ++ if (context->uar == MAP_FAILED) { ++ verbs_err(&context->ibv_ctx, "error: failed to mmap uar page.\n"); + return -ENOMEM; ++ } + + return 0; + } +diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c +index 70fe2f7..56a42e7 100644 +--- a/providers/hns/hns_roce_u_hw_v2.c ++++ b/providers/hns/hns_roce_u_hw_v2.c +@@ -3131,6 +3131,9 @@ static int fill_send_wr_ops(const struct ibv_qp_init_attr_ex *attr, + fill_send_wr_ops_ud(qp_ex); + break; + default: ++ verbs_err(verbs_get_ctx(qp_ex->qp_base.context), ++ "QP type %d not supported for qp_ex send ops.\n", ++ attr->qp_type); + return -EOPNOTSUPP; + } + +diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c +index 3a1c40e..271525a 100644 +--- a/providers/hns/hns_roce_u_verbs.c ++++ b/providers/hns/hns_roce_u_verbs.c +@@ -524,8 +524,11 @@ static int verify_cq_create_attr(struct ibv_cq_init_attr_ex *attr, + struct hns_roce_context *context, + struct hnsdv_cq_init_attr *hns_cq_attr) + { +- if (!attr->cqe || attr->cqe > context->max_cqe) +- return -EINVAL; ++ if (!attr->cqe || attr->cqe > context->max_cqe) { ++ verbs_err(&context->ibv_ctx, "unsupported cq depth %u.\n", ++ attr->cqe); ++ return EINVAL; ++ } + + if (!check_comp_mask(attr->comp_mask, CREATE_CQ_SUPPORTED_COMP_MASK)) { + verbs_err(&context->ibv_ctx, "unsupported cq comps 0x%x\n", +@@ -533,8 +536,11 @@ static int verify_cq_create_attr(struct ibv_cq_init_attr_ex *attr, + return EOPNOTSUPP; + } + +- if (!check_comp_mask(attr->wc_flags, CREATE_CQ_SUPPORTED_WC_FLAGS)) +- return -EOPNOTSUPP; ++ if (!check_comp_mask(attr->wc_flags, CREATE_CQ_SUPPORTED_WC_FLAGS)) { ++ verbs_err(&context->ibv_ctx, "unsupported wc flags 0x%llx.\n", ++ attr->wc_flags); ++ return EOPNOTSUPP; ++ } + + if (attr->comp_mask & IBV_CQ_INIT_ATTR_MASK_PD) { + if (!to_hr_pad(attr->parent_domain)) { +@@ -617,8 +623,11 @@ static int exec_cq_create_cmd(struct ibv_context *context, + ret = ibv_cmd_create_cq_ex(context, attr, &cq->verbs_cq, + &cmd_ex.ibv_cmd, sizeof(cmd_ex), + &resp_ex.ibv_resp, sizeof(resp_ex), 0); +- if (ret) ++ if (ret) { ++ verbs_err(verbs_get_ctx(context), ++ "failed to exec create cq cmd, ret = %d.\n", ret); + return ret; ++ } + + cq->cqn = resp_drv->cqn; + cq->flags = resp_drv->cap_flags; +@@ -877,13 +886,20 @@ static int verify_srq_create_attr(struct hns_roce_context *context, + struct ibv_srq_init_attr_ex *attr) + { + if (attr->srq_type != IBV_SRQT_BASIC && +- attr->srq_type != IBV_SRQT_XRC) ++ attr->srq_type != IBV_SRQT_XRC) { ++ verbs_err(&context->ibv_ctx, ++ "unsupported srq type, type = %d.\n", attr->srq_type); + return -EINVAL; ++ } + + if (!attr->attr.max_sge || + attr->attr.max_wr > context->max_srq_wr || +- attr->attr.max_sge > context->max_srq_sge) ++ attr->attr.max_sge > context->max_srq_sge) { ++ verbs_err(&context->ibv_ctx, ++ "invalid srq attr size, max_wr = %u, max_sge = %u.\n", ++ attr->attr.max_wr, attr->attr.max_sge); + return -EINVAL; ++ } + + attr->attr.max_wr = max_t(uint32_t, attr->attr.max_wr, + HNS_ROCE_MIN_SRQ_WQE_NUM); +@@ -1015,8 +1031,12 @@ static int exec_srq_create_cmd(struct ibv_context *context, + ret = ibv_cmd_create_srq_ex(context, &srq->verbs_srq, init_attr, + &cmd_ex.ibv_cmd, sizeof(cmd_ex), + &resp_ex.ibv_resp, sizeof(resp_ex)); +- if (ret) ++ if (ret) { ++ verbs_err(verbs_get_ctx(context), ++ "failed to exec create srq cmd, ret = %d.\n", ++ ret); + return ret; ++ } + + srq->srqn = resp_ex.srqn; + srq->cap_flags = resp_ex.cap_flags; +@@ -1340,9 +1360,12 @@ static int check_qp_create_mask(struct hns_roce_context *ctx, + struct ibv_qp_init_attr_ex *attr) + { + struct hns_roce_device *hr_dev = to_hr_dev(ctx->ibv_ctx.context.device); ++ int ret = 0; + +- if (!check_comp_mask(attr->comp_mask, CREATE_QP_SUP_COMP_MASK)) +- return -EOPNOTSUPP; ++ if (!check_comp_mask(attr->comp_mask, CREATE_QP_SUP_COMP_MASK)) { ++ ret = EOPNOTSUPP; ++ goto out; ++ } + + if (attr->comp_mask & IBV_QP_INIT_ATTR_SEND_OPS_FLAGS && + !check_comp_mask(attr->send_ops_flags, SEND_OPS_FLAG_MASK)) +@@ -1351,22 +1374,26 @@ static int check_qp_create_mask(struct hns_roce_context *ctx, + switch (attr->qp_type) { + case IBV_QPT_UD: + if (hr_dev->hw_version == HNS_ROCE_HW_VER2) +- return -EINVAL; ++ return EINVAL; + SWITCH_FALLTHROUGH; + case IBV_QPT_RC: + case IBV_QPT_XRC_SEND: + if (!(attr->comp_mask & IBV_QP_INIT_ATTR_PD)) +- return -EINVAL; ++ ret = EINVAL; + break; + case IBV_QPT_XRC_RECV: + if (!(attr->comp_mask & IBV_QP_INIT_ATTR_XRCD)) +- return -EINVAL; ++ ret = EINVAL; + break; + default: +- return -EINVAL; ++ return EOPNOTSUPP; + } + +- return 0; ++out: ++ if (ret) ++ verbs_err(&ctx->ibv_ctx, "invalid comp_mask 0x%x.\n", ++ attr->comp_mask); ++ return ret; + } + + static int hns_roce_qp_has_rq(struct ibv_qp_init_attr_ex *attr) +@@ -1391,8 +1418,13 @@ static int verify_qp_create_cap(struct hns_roce_context *ctx, + if (cap->max_send_wr > ctx->max_qp_wr || + cap->max_recv_wr > ctx->max_qp_wr || + cap->max_send_sge > ctx->max_sge || +- cap->max_recv_sge > ctx->max_sge) ++ cap->max_recv_sge > ctx->max_sge) { ++ verbs_err(&ctx->ibv_ctx, ++ "invalid qp cap size, max_send/recv_wr = {%u, %u}, max_send/recv_sge = {%u, %u}.\n", ++ cap->max_send_wr, cap->max_recv_wr, ++ cap->max_send_sge, cap->max_recv_sge); + return -EINVAL; ++ } + + has_rq = hns_roce_qp_has_rq(attr); + if (!has_rq) { +@@ -1401,12 +1433,20 @@ static int verify_qp_create_cap(struct hns_roce_context *ctx, + } + + min_wqe_num = HNS_ROCE_V2_MIN_WQE_NUM; +- if (cap->max_send_wr < min_wqe_num) ++ if (cap->max_send_wr < min_wqe_num) { ++ verbs_debug(&ctx->ibv_ctx, ++ "change sq depth from %u to minimum %u.\n", ++ cap->max_send_wr, min_wqe_num); + cap->max_send_wr = min_wqe_num; ++ } + + if (cap->max_recv_wr) { +- if (cap->max_recv_wr < min_wqe_num) ++ if (cap->max_recv_wr < min_wqe_num) { ++ verbs_debug(&ctx->ibv_ctx, ++ "change rq depth from %u to minimum %u.\n", ++ cap->max_recv_wr, min_wqe_num); + cap->max_recv_wr = min_wqe_num; ++ } + + if (!cap->max_recv_sge) + return -EINVAL; +@@ -1916,6 +1956,11 @@ static int qp_exec_create_cmd(struct ibv_qp_init_attr_ex *attr, + ret = ibv_cmd_create_qp_ex2(&ctx->ibv_ctx.context, &qp->verbs_qp, attr, + &cmd_ex.ibv_cmd, sizeof(cmd_ex), + &resp_ex.ibv_resp, sizeof(resp_ex)); ++ if (ret) { ++ verbs_err(&ctx->ibv_ctx, ++ "failed to exec create qp cmd, ret = %d.\n", ret); ++ return ret; ++ } + + qp->flags = resp_ex.drv_payload.cap_flags; + *dwqe_mmap_key = resp_ex.drv_payload.dwqe_mmap_key; +@@ -1977,8 +2022,12 @@ static int mmap_dwqe(struct ibv_context *ibv_ctx, struct hns_roce_qp *qp, + { + qp->dwqe_page = mmap(NULL, HNS_ROCE_DWQE_PAGE_SIZE, PROT_WRITE, + MAP_SHARED, ibv_ctx->cmd_fd, dwqe_mmap_key); +- if (qp->dwqe_page == MAP_FAILED) ++ if (qp->dwqe_page == MAP_FAILED) { ++ verbs_err(verbs_get_ctx(ibv_ctx), ++ "failed to mmap direct wqe page, QPN = %u.\n", ++ qp->verbs_qp.qp.qp_num); + return -EINVAL; ++ } + + return 0; + } +-- +2.33.0 + diff --git a/0102-libhns-Sync-lock-free-codes-from-mainline-driver.patch b/0102-libhns-Sync-lock-free-codes-from-mainline-driver.patch new file mode 100644 index 0000000000000000000000000000000000000000..62509e3d73d3ab88b9e4660596d35c6ab17a7f1d --- /dev/null +++ b/0102-libhns-Sync-lock-free-codes-from-mainline-driver.patch @@ -0,0 +1,519 @@ +From 8cd132d5f4aa489b9eeaa3f43865c41e4ac28101 Mon Sep 17 00:00:00 2001 +From: Junxian Huang +Date: Wed, 19 Mar 2025 18:13:52 +0800 +Subject: [PATCH 102/105] libhns: Sync lock-free codes from mainline driver + +Sync lock-free codes from mainline driver. There is only one functional +change that add pad refcnt when creating qp/cq/srq, and other changes +are mostly coding cleanup. + +The mainline PR was: +https://github.com/linux-rdma/rdma-core/pull/1482 +https://github.com/linux-rdma/rdma-core/pull/1599/commits/f877d6e610e438515e1535c9ec7a3a3ef37c58e0 +https://github.com/linux-rdma/rdma-core/pull/1599/commits/234d135276ea8ef83633113e224e0cd735ebeca8 + +Signed-off-by: Junxian Huang +--- + providers/hns/hns_roce_u.h | 1 + + providers/hns/hns_roce_u_hw_v2.c | 18 +++- + providers/hns/hns_roce_u_hw_v2.h | 4 +- + providers/hns/hns_roce_u_verbs.c | 163 ++++++++++++++----------------- + 4 files changed, 88 insertions(+), 98 deletions(-) + +diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h +index 863d4b5..7f5872c 100644 +--- a/providers/hns/hns_roce_u.h ++++ b/providers/hns/hns_roce_u.h +@@ -318,6 +318,7 @@ struct hns_roce_cq { + unsigned long flags; + unsigned int cqe_size; + struct hns_roce_v2_cqe *cqe; ++ struct ibv_pd *parent_domain; + struct list_head list_sq; + struct list_head list_rq; + struct list_head list_srq; +diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c +index 56a42e7..acb373c 100644 +--- a/providers/hns/hns_roce_u_hw_v2.c ++++ b/providers/hns/hns_roce_u_hw_v2.c +@@ -1976,8 +1976,11 @@ static int hns_roce_u_v2_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, + return ret; + } + +-void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) ++void hns_roce_lock_cqs(struct ibv_qp *qp) + { ++ struct hns_roce_cq *send_cq = to_hr_cq(qp->send_cq); ++ struct hns_roce_cq *recv_cq = to_hr_cq(qp->recv_cq); ++ + if (send_cq && recv_cq) { + if (send_cq == recv_cq) { + hns_roce_spin_lock(&send_cq->hr_lock); +@@ -1995,8 +1998,11 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) + } + } + +-void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) ++void hns_roce_unlock_cqs(struct ibv_qp *qp) + { ++ struct hns_roce_cq *send_cq = to_hr_cq(qp->send_cq); ++ struct hns_roce_cq *recv_cq = to_hr_cq(qp->recv_cq); ++ + if (send_cq && recv_cq) { + if (send_cq == recv_cq) { + hns_roce_spin_unlock(&send_cq->hr_lock); +@@ -2017,6 +2023,7 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_c + static int hns_roce_u_v2_destroy_qp(struct ibv_qp *ibqp) + { + struct hns_roce_context *ctx = to_hr_ctx(ibqp->context); ++ struct hns_roce_pad *pad = to_hr_pad(ibqp->pd); + struct hns_roce_qp *qp = to_hr_qp(ibqp); + int ret; + +@@ -2029,7 +2036,7 @@ static int hns_roce_u_v2_destroy_qp(struct ibv_qp *ibqp) + + hns_roce_v2_clear_qp(ctx, qp); + +- hns_roce_lock_cqs(to_hr_cq(ibqp->send_cq), to_hr_cq(ibqp->recv_cq)); ++ hns_roce_lock_cqs(ibqp); + + if (ibqp->recv_cq) { + __hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), ibqp->qp_num, +@@ -2045,11 +2052,14 @@ static int hns_roce_u_v2_destroy_qp(struct ibv_qp *ibqp) + list_del(&qp->scq_node); + } + +- hns_roce_unlock_cqs(to_hr_cq(ibqp->send_cq), to_hr_cq(ibqp->recv_cq)); ++ hns_roce_unlock_cqs(ibqp); + + hns_roce_free_qp_buf(qp, ctx); + hns_roce_qp_spinlock_destroy(qp); + ++ if (pad) ++ atomic_fetch_sub(&pad->pd.refcount, 1); ++ + free(qp); + + if (ctx->dca_ctx.mem_cnt > 0) +diff --git a/providers/hns/hns_roce_u_hw_v2.h b/providers/hns/hns_roce_u_hw_v2.h +index fa83bbe..01d16ac 100644 +--- a/providers/hns/hns_roce_u_hw_v2.h ++++ b/providers/hns/hns_roce_u_hw_v2.h +@@ -347,7 +347,7 @@ void hns_roce_v2_clear_qp(struct hns_roce_context *ctx, struct hns_roce_qp *qp); + void hns_roce_attach_cq_ex_ops(struct ibv_cq_ex *cq_ex, uint64_t wc_flags); + int hns_roce_attach_qp_ex_ops(struct ibv_qp_init_attr_ex *attr, + struct hns_roce_qp *qp); +-void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq); +-void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq); ++void hns_roce_lock_cqs(struct ibv_qp *qp); ++void hns_roce_unlock_cqs(struct ibv_qp *qp); + + #endif /* _HNS_ROCE_U_HW_V2_H */ +diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c +index 271525a..0708b95 100644 +--- a/providers/hns/hns_roce_u_verbs.c ++++ b/providers/hns/hns_roce_u_verbs.c +@@ -44,16 +44,11 @@ + #include "hns_roce_u_db.h" + #include "hns_roce_u_hw_v2.h" + +-static int hns_roce_whether_need_lock(struct ibv_pd *pd) ++static bool hns_roce_whether_need_lock(struct ibv_pd *pd) + { +- struct hns_roce_pad *pad; +- bool need_lock = true; +- +- pad = to_hr_pad(pd); +- if (pad && pad->td) +- need_lock = false; ++ struct hns_roce_pad *pad = to_hr_pad(pd); + +- return need_lock; ++ return !(pad && pad->td); + } + + static int hns_roce_spinlock_init(struct hns_roce_spinlock *hr_lock, +@@ -165,7 +160,7 @@ struct ibv_td *hns_roce_u_alloc_td(struct ibv_context *context, + struct hns_roce_td *td; + + if (attr->comp_mask) { +- errno = EINVAL; ++ errno = EOPNOTSUPP; + return NULL; + } + +@@ -184,19 +179,14 @@ struct ibv_td *hns_roce_u_alloc_td(struct ibv_context *context, + int hns_roce_u_dealloc_td(struct ibv_td *ibv_td) + { + struct hns_roce_td *td; +- int ret = 0; + + td = to_hr_td(ibv_td); +- if (atomic_load(&td->refcount) > 1) { +- ret = -EBUSY; +- goto err; +- } ++ if (atomic_load(&td->refcount) > 1) ++ return EBUSY; + + free(td); + +-err: +- errno = abs(ret); +- return ret; ++ return 0; + } + + struct ibv_pd *hns_roce_u_alloc_pd(struct ibv_context *context) +@@ -204,7 +194,6 @@ struct ibv_pd *hns_roce_u_alloc_pd(struct ibv_context *context) + struct hns_roce_alloc_pd_resp resp = {}; + struct ibv_alloc_pd cmd; + struct hns_roce_pd *pd; +- int ret; + + pd = calloc(1, sizeof(*pd)); + if (!pd) { +@@ -212,10 +201,9 @@ struct ibv_pd *hns_roce_u_alloc_pd(struct ibv_context *context) + return NULL; + } + +- ret = ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof(cmd), +- &resp.ibv_resp, sizeof(resp)); +- +- if (ret) ++ errno = ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof(cmd), ++ &resp.ibv_resp, sizeof(resp)); ++ if (errno) + goto err; + + atomic_init(&pd->refcount, 1); +@@ -225,7 +213,6 @@ struct ibv_pd *hns_roce_u_alloc_pd(struct ibv_context *context) + + err: + free(pd); +- errno = abs(ret); + return NULL; + } + +@@ -256,41 +243,40 @@ struct ibv_pd *hns_roce_u_alloc_pad(struct ibv_context *context, + pad->pd.protection_domain = to_hr_pd(attr->pd); + atomic_fetch_add(&pad->pd.protection_domain->refcount, 1); + ++ atomic_init(&pad->pd.refcount, 1); + ibv_initialize_parent_domain(&pad->pd.ibv_pd, + &pad->pd.protection_domain->ibv_pd); + + return &pad->pd.ibv_pd; + } + +-static void hns_roce_free_pad(struct hns_roce_pad *pad) ++static int hns_roce_free_pad(struct hns_roce_pad *pad) + { ++ if (atomic_load(&pad->pd.refcount) > 1) ++ return EBUSY; ++ + atomic_fetch_sub(&pad->pd.protection_domain->refcount, 1); + + if (pad->td) + atomic_fetch_sub(&pad->td->refcount, 1); + + free(pad); ++ return 0; + } + + static int hns_roce_free_pd(struct hns_roce_pd *pd) + { + int ret; + +- if (atomic_load(&pd->refcount) > 1) { +- ret = -EBUSY; +- goto err; +- } ++ if (atomic_load(&pd->refcount) > 1) ++ return EBUSY; + + ret = ibv_cmd_dealloc_pd(&pd->ibv_pd); + if (ret) +- goto err; ++ return ret; + + free(pd); +- +-err: +- errno = abs(ret); +- +- return ret; ++ return 0; + } + + int hns_roce_u_dealloc_pd(struct ibv_pd *ibv_pd) +@@ -298,10 +284,8 @@ int hns_roce_u_dealloc_pd(struct ibv_pd *ibv_pd) + struct hns_roce_pad *pad = to_hr_pad(ibv_pd); + struct hns_roce_pd *pd = to_hr_pd(ibv_pd); + +- if (pad) { +- hns_roce_free_pad(pad); +- return 0; +- } ++ if (pad) ++ return hns_roce_free_pad(pad); + + return hns_roce_free_pd(pd); + } +@@ -524,6 +508,8 @@ static int verify_cq_create_attr(struct ibv_cq_init_attr_ex *attr, + struct hns_roce_context *context, + struct hnsdv_cq_init_attr *hns_cq_attr) + { ++ struct hns_roce_pad *pad = to_hr_pad(attr->parent_domain); ++ + if (!attr->cqe || attr->cqe > context->max_cqe) { + verbs_err(&context->ibv_ctx, "unsupported cq depth %u.\n", + attr->cqe); +@@ -542,11 +528,9 @@ static int verify_cq_create_attr(struct ibv_cq_init_attr_ex *attr, + return EOPNOTSUPP; + } + +- if (attr->comp_mask & IBV_CQ_INIT_ATTR_MASK_PD) { +- if (!to_hr_pad(attr->parent_domain)) { +- verbs_err(&context->ibv_ctx, "failed to check the pad of cq.\n"); +- return EINVAL; +- } ++ if (attr->comp_mask & IBV_CQ_INIT_ATTR_MASK_PD && !pad) { ++ verbs_err(&context->ibv_ctx, "failed to check the pad of cq.\n"); ++ return EINVAL; + } + + attr->cqe = max_t(uint32_t, HNS_ROCE_MIN_CQE_NUM, +@@ -668,19 +652,10 @@ static void hns_roce_uninit_cq_swc(struct hns_roce_cq *cq) + } + } + +-static int hns_roce_cq_spinlock_init(struct ibv_context *context, +- struct hns_roce_cq *cq, ++static int hns_roce_cq_spinlock_init(struct hns_roce_cq *cq, + struct ibv_cq_init_attr_ex *attr) + { +- struct hns_roce_pad *pad = NULL; +- int need_lock; +- +- if (attr->comp_mask & IBV_CQ_INIT_ATTR_MASK_PD) +- pad = to_hr_pad(attr->parent_domain); +- +- need_lock = hns_roce_whether_need_lock(pad ? &pad->pd.ibv_pd : NULL); +- if (!need_lock) +- verbs_info(verbs_get_ctx(context), "configure cq as no lock.\n"); ++ bool need_lock = hns_roce_whether_need_lock(attr->parent_domain); + + return hns_roce_spinlock_init(&cq->hr_lock, need_lock); + } +@@ -689,6 +664,7 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context, + struct ibv_cq_init_attr_ex *attr, + struct hnsdv_cq_init_attr *hns_cq_attr) + { ++ struct hns_roce_pad *pad = to_hr_pad(attr->parent_domain); + struct hns_roce_context *hr_ctx = to_hr_ctx(context); + struct hns_roce_cq *cq; + int ret; +@@ -703,7 +679,12 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context, + goto err; + } + +- ret = hns_roce_cq_spinlock_init(context, cq, attr); ++ if (attr->comp_mask & IBV_CQ_INIT_ATTR_MASK_PD) { ++ cq->parent_domain = attr->parent_domain; ++ atomic_fetch_add(&pad->pd.refcount, 1); ++ } ++ ++ ret = hns_roce_cq_spinlock_init(cq, attr); + if (ret) + goto err_lock; + +@@ -741,6 +722,8 @@ err_db: + err_buf: + hns_roce_spinlock_destroy(&cq->hr_lock); + err_lock: ++ if (attr->comp_mask & IBV_CQ_INIT_ATTR_MASK_PD) ++ atomic_fetch_sub(&pad->pd.refcount, 1); + free(cq); + err: + errno = abs(ret); +@@ -813,6 +796,7 @@ int hns_roce_u_modify_cq(struct ibv_cq *cq, struct ibv_modify_cq_attr *attr) + int hns_roce_u_destroy_cq(struct ibv_cq *cq) + { + struct hns_roce_cq *hr_cq = to_hr_cq(cq); ++ struct hns_roce_pad *pad = to_hr_pad(hr_cq->parent_domain); + int ret; + + ret = ibv_cmd_destroy_cq(cq); +@@ -827,6 +811,9 @@ int hns_roce_u_destroy_cq(struct ibv_cq *cq) + + hns_roce_spinlock_destroy(&hr_cq->hr_lock); + ++ if (pad) ++ atomic_fetch_sub(&pad->pd.refcount, 1); ++ + free(hr_cq); + + return ret; +@@ -1060,15 +1047,10 @@ static void init_srq_cq_list(struct hns_roce_srq *srq, + hns_roce_spin_unlock(&srq_cq->hr_lock); + } + +-static int hns_roce_srq_spinlock_init(struct ibv_context *context, +- struct hns_roce_srq *srq, ++static int hns_roce_srq_spinlock_init(struct hns_roce_srq *srq, + struct ibv_srq_init_attr_ex *attr) + { +- int need_lock; +- +- need_lock = hns_roce_whether_need_lock(attr->pd); +- if (!need_lock) +- verbs_info(verbs_get_ctx(context), "configure srq as no lock.\n"); ++ bool need_lock = hns_roce_whether_need_lock(attr->pd); + + return hns_roce_spinlock_init(&srq->hr_lock, need_lock); + } +@@ -1077,6 +1059,7 @@ static struct ibv_srq *create_srq(struct ibv_context *context, + struct ibv_srq_init_attr_ex *init_attr) + { + struct hns_roce_context *hr_ctx = to_hr_ctx(context); ++ struct hns_roce_pad *pad = to_hr_pad(init_attr->pd); + struct hns_roce_srq *srq; + int ret; + +@@ -1089,8 +1072,10 @@ static struct ibv_srq *create_srq(struct ibv_context *context, + ret = -ENOMEM; + goto err; + } ++ if (pad) ++ atomic_fetch_add(&pad->pd.refcount, 1); + +- ret = hns_roce_srq_spinlock_init(context, srq, init_attr); ++ ret = hns_roce_srq_spinlock_init(srq, init_attr); + if (ret) + goto err_free_srq; + +@@ -1134,6 +1119,8 @@ err_destroy_lock: + hns_roce_spinlock_destroy(&srq->hr_lock); + + err_free_srq: ++ if (pad) ++ atomic_fetch_sub(&pad->pd.refcount, 1); + free(srq); + + err: +@@ -1209,6 +1196,7 @@ static void del_srq_from_cq_list(struct hns_roce_srq *srq) + int hns_roce_u_destroy_srq(struct ibv_srq *ibv_srq) + { + struct hns_roce_context *ctx = to_hr_ctx(ibv_srq->context); ++ struct hns_roce_pad *pad = to_hr_pad(ibv_srq->pd); + struct hns_roce_srq *srq = to_hr_srq(ibv_srq); + int ret; + +@@ -1224,6 +1212,10 @@ int hns_roce_u_destroy_srq(struct ibv_srq *ibv_srq) + free_srq_buf(srq); + + hns_roce_spinlock_destroy(&srq->hr_lock); ++ ++ if (pad) ++ atomic_fetch_sub(&pad->pd.refcount, 1); ++ + free(srq); + + return 0; +@@ -1478,38 +1470,19 @@ static int verify_qp_create_attr(struct hns_roce_context *ctx, + return verify_qp_create_cap(ctx, attr); + } + +-static int hns_roce_qp_spinlock_init(struct hns_roce_context *ctx, +- struct ibv_qp_init_attr_ex *attr, ++static int hns_roce_qp_spinlock_init(struct ibv_qp_init_attr_ex *attr, + struct hns_roce_qp *qp) + { +- int sq_need_lock; +- int rq_need_lock; ++ bool need_lock = hns_roce_whether_need_lock(attr->pd); + int ret; + +- sq_need_lock = hns_roce_whether_need_lock(attr->pd); +- if (!sq_need_lock) +- verbs_warn(&ctx->ibv_ctx, "configure sq as no lock.\n"); +- +- rq_need_lock = hns_roce_whether_need_lock(attr->pd); +- if (!rq_need_lock) +- verbs_warn(&ctx->ibv_ctx, "configure rq as no lock.\n"); +- +- ret = hns_roce_spinlock_init(&qp->sq.hr_lock, sq_need_lock); +- if (ret) { +- verbs_err(&ctx->ibv_ctx, "failed to init sq spinlock.\n"); ++ ret = hns_roce_spinlock_init(&qp->sq.hr_lock, need_lock); ++ if (ret) + return ret; +- } +- +- ret = hns_roce_spinlock_init(&qp->rq.hr_lock, rq_need_lock); +- if (ret) { +- verbs_err(&ctx->ibv_ctx, "failed to init rq spinlock.\n"); +- goto err_rq_lock; +- } +- +- return 0; + +-err_rq_lock: +- hns_roce_spinlock_destroy(&qp->sq.hr_lock); ++ ret = hns_roce_spinlock_init(&qp->rq.hr_lock, need_lock); ++ if (ret) ++ hns_roce_spinlock_destroy(&qp->sq.hr_lock); + + return ret; + } +@@ -2044,7 +2017,7 @@ static void add_qp_to_cq_list(struct ibv_qp_init_attr_ex *attr, + list_node_init(&qp->rcq_node); + list_node_init(&qp->srcq_node); + +- hns_roce_lock_cqs(send_cq, recv_cq); ++ hns_roce_lock_cqs(&qp->verbs_qp.qp); + if (send_cq) + list_add_tail(&send_cq->list_sq, &qp->scq_node); + if (recv_cq) { +@@ -2053,7 +2026,7 @@ static void add_qp_to_cq_list(struct ibv_qp_init_attr_ex *attr, + else + list_add_tail(&recv_cq->list_rq, &qp->rcq_node); + } +- hns_roce_unlock_cqs(send_cq, recv_cq); ++ hns_roce_unlock_cqs(&qp->verbs_qp.qp); + } + + static struct ibv_qp *create_qp(struct ibv_context *ibv_ctx, +@@ -2061,6 +2034,7 @@ static struct ibv_qp *create_qp(struct ibv_context *ibv_ctx, + struct hnsdv_qp_init_attr *hns_attr) + { + struct hns_roce_context *context = to_hr_ctx(ibv_ctx); ++ struct hns_roce_pad *pad = to_hr_pad(attr->pd); + struct hns_roce_cmd_flag cmd_flag = {}; + struct hns_roce_qp *qp; + uint64_t dwqe_mmap_key; +@@ -2078,7 +2052,10 @@ static struct ibv_qp *create_qp(struct ibv_context *ibv_ctx, + + hns_roce_set_qp_params(attr, qp, context); + +- ret = hns_roce_qp_spinlock_init(context, attr, qp); ++ if (pad) ++ atomic_fetch_add(&pad->pd.refcount, 1); ++ ++ ret = hns_roce_qp_spinlock_init(attr, qp); + if (ret) + goto err_spinlock; + +@@ -2121,6 +2098,8 @@ err_cmd: + err_buf: + hns_roce_qp_spinlock_destroy(qp); + err_spinlock: ++ if (pad) ++ atomic_fetch_sub(&pad->pd.refcount, 1); + free(qp); + err: + if (ret < 0) +-- +2.33.0 + diff --git a/0103-verbs-Assign-ibv-srq-pd-when-creating-SRQ.patch b/0103-verbs-Assign-ibv-srq-pd-when-creating-SRQ.patch new file mode 100644 index 0000000000000000000000000000000000000000..c4d3f962c1732d352e391774d341f655021d0589 --- /dev/null +++ b/0103-verbs-Assign-ibv-srq-pd-when-creating-SRQ.patch @@ -0,0 +1,31 @@ +From 93ddf71e89c8a8a4c0e2d7bf2d1f1d2c1bc3d903 Mon Sep 17 00:00:00 2001 +From: Junxian Huang +Date: Wed, 23 Apr 2025 16:55:17 +0800 +Subject: [PATCH 103/105] verbs: Assign ibv srq->pd when creating SRQ + +Some providers need to access ibv_srq->pd during SRQ destruction, but +it may not be assigned currently when using ibv_create_srq_ex(). This +may lead to some SRQ-related resource leaks. Assign ibv_srq->pd when +creating SRQ to ensure pd can be obtained correctly. + +Fixes: 40c1365b2198 ("Add support for XRC SRQs") +Signed-off-by: Junxian Huang +--- + libibverbs/cmd_srq.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/libibverbs/cmd_srq.c b/libibverbs/cmd_srq.c +index dfaaa6a..259ea0d 100644 +--- a/libibverbs/cmd_srq.c ++++ b/libibverbs/cmd_srq.c +@@ -63,6 +63,7 @@ static int ibv_icmd_create_srq(struct ibv_pd *pd, struct verbs_srq *vsrq, + struct verbs_xrcd *vxrcd = NULL; + enum ibv_srq_type srq_type; + ++ srq->pd = pd; + srq->context = pd->context; + pthread_mutex_init(&srq->mutex, NULL); + pthread_cond_init(&srq->cond, NULL); +-- +2.33.0 + diff --git a/0104-libhns-Clean-up-data-type-issues.patch b/0104-libhns-Clean-up-data-type-issues.patch new file mode 100644 index 0000000000000000000000000000000000000000..1f69533098dfe10147cb24fcd173f3fba71427ef --- /dev/null +++ b/0104-libhns-Clean-up-data-type-issues.patch @@ -0,0 +1,113 @@ +From a20bcd29a5c2194f947f1ce24970b4be9d1cf32a Mon Sep 17 00:00:00 2001 +From: Junxian Huang +Date: Thu, 13 Mar 2025 17:26:50 +0800 +Subject: [PATCH 104/105] libhns: Clean up data type issues + +Clean up mixed signed/unsigned type issues. Fix a wrong format +character as well. + +Fixes: cf6d9149f8f5 ("libhns: Introduce hns direct verbs") +Signed-off-by: Junxian Huang +--- + providers/hns/hns_roce_u.h | 2 +- + providers/hns/hns_roce_u_hw_v2.c | 13 +++++++------ + providers/hns/hns_roce_u_verbs.c | 4 ++-- + 3 files changed, 10 insertions(+), 9 deletions(-) + +diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h +index 7f5872c..3e9b487 100644 +--- a/providers/hns/hns_roce_u.h ++++ b/providers/hns/hns_roce_u.h +@@ -367,7 +367,7 @@ struct hns_roce_wq { + unsigned long *wrid; + struct hns_roce_spinlock hr_lock; + unsigned int wqe_cnt; +- int max_post; ++ unsigned int max_post; + unsigned int head; + unsigned int tail; + unsigned int max_gs; +diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c +index acb373c..70e5b1f 100644 +--- a/providers/hns/hns_roce_u_hw_v2.c ++++ b/providers/hns/hns_roce_u_hw_v2.c +@@ -173,7 +173,7 @@ static enum ibv_wc_status get_wc_status(uint8_t status) + { HNS_ROCE_V2_CQE_XRC_VIOLATION_ERR, IBV_WC_REM_INV_RD_REQ_ERR }, + }; + +- for (int i = 0; i < ARRAY_SIZE(map); i++) { ++ for (unsigned int i = 0; i < ARRAY_SIZE(map); i++) { + if (status == map[i].cqe_status) + return map[i].wc_status; + } +@@ -1216,7 +1216,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, + unsigned int sge_mask = qp->ex_sge.sge_cnt - 1; + void *dst_addr, *src_addr, *tail_bound_addr; + uint32_t src_len, tail_len; +- int i; ++ uint32_t i; + + if (sge_info->total_len > qp->sq.ext_sge_cnt * HNS_ROCE_SGE_SIZE) + return EINVAL; +@@ -1286,7 +1286,7 @@ static void fill_ud_inn_inl_data(const struct ibv_send_wr *wr, + + static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len) + { +- int mtu = mtu_enum_to_int(qp->path_mtu); ++ unsigned int mtu = mtu_enum_to_int(qp->path_mtu); + + return (len <= qp->max_inline_data && len <= mtu); + } +@@ -1727,7 +1727,8 @@ static void fill_recv_sge_to_wqe(struct ibv_recv_wr *wr, void *wqe, + unsigned int max_sge, bool rsv) + { + struct hns_roce_v2_wqe_data_seg *dseg = wqe; +- unsigned int i, cnt; ++ unsigned int cnt; ++ int i; + + for (i = 0, cnt = 0; i < wr->num_sge; i++) { + /* Skip zero-length sge */ +@@ -2090,7 +2091,7 @@ static int check_post_srq_valid(struct hns_roce_srq *srq, + static int get_wqe_idx(struct hns_roce_srq *srq, unsigned int *wqe_idx) + { + struct hns_roce_idx_que *idx_que = &srq->idx_que; +- int bit_num; ++ unsigned int bit_num; + int i; + + /* bitmap[i] is set zero if all bits are allocated */ +@@ -2499,7 +2500,7 @@ static void set_sgl_rc(struct hns_roce_v2_wqe_data_seg *dseg, + unsigned int mask = qp->ex_sge.sge_cnt - 1; + unsigned int msg_len = 0; + unsigned int cnt = 0; +- int i; ++ unsigned int i; + + for (i = 0; i < num_sge; i++) { + if (!sge[i].length) +diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c +index 0708b95..1ea7501 100644 +--- a/providers/hns/hns_roce_u_verbs.c ++++ b/providers/hns/hns_roce_u_verbs.c +@@ -510,7 +510,7 @@ static int verify_cq_create_attr(struct ibv_cq_init_attr_ex *attr, + { + struct hns_roce_pad *pad = to_hr_pad(attr->parent_domain); + +- if (!attr->cqe || attr->cqe > context->max_cqe) { ++ if (!attr->cqe || attr->cqe > (uint32_t)context->max_cqe) { + verbs_err(&context->ibv_ctx, "unsupported cq depth %u.\n", + attr->cqe); + return EINVAL; +@@ -1497,7 +1497,7 @@ static int alloc_recv_rinl_buf(uint32_t max_sge, + struct hns_roce_rinl_buf *rinl_buf) + { + unsigned int cnt; +- int i; ++ unsigned int i; + + cnt = rinl_buf->wqe_cnt; + rinl_buf->wqe_list = calloc(cnt, +-- +2.33.0 + diff --git a/0105-libhns-Add-debug-log-for-lock-free-mode.patch b/0105-libhns-Add-debug-log-for-lock-free-mode.patch new file mode 100644 index 0000000000000000000000000000000000000000..28b07624385207a64e3a86dae54e99e32c087f0c --- /dev/null +++ b/0105-libhns-Add-debug-log-for-lock-free-mode.patch @@ -0,0 +1,46 @@ +From 8954a581ff8b82d6cb3cca93f8558c86091ea155 Mon Sep 17 00:00:00 2001 +From: Junxian Huang +Date: Thu, 24 Apr 2025 20:32:12 +0800 +Subject: [PATCH 105/105] libhns: Add debug log for lock-free mode + +Currently there is no way to observe whether the lock-free mode is +configured from the driver's perspective. Add debug log for this. + +Signed-off-by: Junxian Huang +--- + providers/hns/hns_roce_u_verbs.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c +index 1ea7501..8491431 100644 +--- a/providers/hns/hns_roce_u_verbs.c ++++ b/providers/hns/hns_roce_u_verbs.c +@@ -219,6 +219,7 @@ err: + struct ibv_pd *hns_roce_u_alloc_pad(struct ibv_context *context, + struct ibv_parent_domain_init_attr *attr) + { ++ struct hns_roce_pd *protection_domain; + struct hns_roce_pad *pad; + + if (ibv_check_alloc_parent_domain(attr)) +@@ -235,12 +236,16 @@ struct ibv_pd *hns_roce_u_alloc_pad(struct ibv_context *context, + return NULL; + } + ++ protection_domain = to_hr_pd(attr->pd); + if (attr->td) { + pad->td = to_hr_td(attr->td); + atomic_fetch_add(&pad->td->refcount, 1); ++ verbs_debug(verbs_get_ctx(context), ++ "set PAD(0x%x) to lock-free mode.\n", ++ protection_domain->pdn); + } + +- pad->pd.protection_domain = to_hr_pd(attr->pd); ++ pad->pd.protection_domain = protection_domain; + atomic_fetch_add(&pad->pd.protection_domain->refcount, 1); + + atomic_init(&pad->pd.refcount, 1); +-- +2.33.0 + diff --git a/rdma-core.spec b/rdma-core.spec index 131cbd6a67a740033ba3e732fcd5fd0628cfcd1b..6a12bd44d93f5bc00169c317e8cc3dfc0db1ff08 100644 --- a/rdma-core.spec +++ b/rdma-core.spec @@ -1,6 +1,6 @@ Name: rdma-core Version: 41.0 -Release: 35 +Release: 36 Summary: RDMA core userspace libraries and daemons License: GPLv2 or BSD Url: https://github.com/linux-rdma/rdma-core @@ -104,6 +104,13 @@ patch95: 0095-libhns-Adapt-UD-inline-data-size-for-UCX.patch patch96: 0096-libhns-Fix-wrong-order-of-spin_unlock-in-modify_qp.patch patch97: 0097-libxscale-Match-dev-by-vid-and-did.patch patch98: 0098-libxscale-update-to-version-2412GA.patch +patch99: 0099-libhns-Fix-double-free-of-rinl-buf-wqe-list.patch +patch100: 0100-libhns-Fix-ret-not-assigned-in-create-srq.patch +patch101: 0101-libhns-Add-error-logs-to-help-diagnosis.patch +patch102: 0102-libhns-Sync-lock-free-codes-from-mainline-driver.patch +patch103: 0103-verbs-Assign-ibv-srq-pd-when-creating-SRQ.patch +patch104: 0104-libhns-Clean-up-data-type-issues.patch +patch105: 0105-libhns-Add-debug-log-for-lock-free-mode.patch BuildRequires: binutils cmake >= 2.8.11 gcc libudev-devel pkgconfig pkgconfig(libnl-3.0) BuildRequires: pkgconfig(libnl-route-3.0) valgrind-devel systemd systemd-devel @@ -354,6 +361,12 @@ fi %{_mandir}/* %changelog +* Tue Jul 8 2025 Guofeng Yue - 41.0-36 +- Type: bugfix +- ID: NA +- SUG: NA +- DESC: Sync some patches for libhns + * Wed May 14 2025 Xin Tian - 41.0-35 - Type: feature - ID: NA