diff --git a/drivers/infiniband/hw/hns/hns_roce_bond.c b/drivers/infiniband/hw/hns/hns_roce_bond.c index 1f5eb0eca246e606cb69dbb0c8320122726be0c1..294d42521ff21af23741cb729c36b5a27aca33f8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_bond.c +++ b/drivers/infiniband/hw/hns/hns_roce_bond.c @@ -3,7 +3,6 @@ * Copyright (c) 2022 Hisilicon Limited. */ -#include "hnae3.h" #include "hns_roce_device.h" #include "hns_roce_hw_v2.h" #include "hns_roce_bond.h" diff --git a/drivers/infiniband/hw/hns/hns_roce_dca.c b/drivers/infiniband/hw/hns/hns_roce_dca.c index 996fe0387e1b39877754c7e665e8a664c506a2fe..04c4ffaa0d2078a7dfa351cf537a7a946ace2316 100644 --- a/drivers/infiniband/hw/hns/hns_roce_dca.c +++ b/drivers/infiniband/hw/hns/hns_roce_dca.c @@ -1145,7 +1145,7 @@ static void load_kdca_param(struct hns_roce_dca_ctx *ctx) else ctx->max_size = roundup(dca_max_size, unit_size); - if (dca_min_size == DCA_MAX_MEM_SIZE) + if (dca_min_size == DCA_MAX_MEM_SIZE || dca_min_size > dca_max_size) ctx->min_size = ctx->max_size; else ctx->min_size = roundup(dca_min_size, unit_size); diff --git a/drivers/infiniband/hw/hns/hns_roce_debugfs.c b/drivers/infiniband/hw/hns/hns_roce_debugfs.c index 756e43cace431b6194bbcc8edf280ec27ab770d8..edd7876e206611083a7a333e5231c31372675898 100644 --- a/drivers/infiniband/hw/hns/hns_roce_debugfs.c +++ b/drivers/infiniband/hw/hns/hns_roce_debugfs.c @@ -193,6 +193,12 @@ static u64 calc_loading_percent(size_t total, size_t free, u32 *out_rem) all_pages = total >> HNS_HW_PAGE_SHIFT; free_pages = free >> HNS_HW_PAGE_SHIFT; + + if (!all_pages) { + percent = 0; + goto out; + } + if (all_pages >= free_pages) { used_pages = all_pages - free_pages; scale = LOADING_PERCENT_SCALE * LOADING_PERCENT_SCALE; @@ -200,6 +206,7 @@ static u64 calc_loading_percent(size_t total, size_t free, u32 *out_rem) percent = div_u64_rem(percent, LOADING_PERCENT_SCALE, &rem); } +out: if (out_rem) *out_rem = rem; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index dcbdd9b43221195e5a1a4765f687ec162f9dc815..09db001ce2efc4a6699821a874d9cf32b0a56cea 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -952,6 +952,7 @@ struct hns_roce_caps { u16 default_ceq_arm_st; u8 cong_cap; enum hns_roce_cong_type default_cong_type; + u32 max_ack_req_msg_len; }; enum hns_roce_device_state { diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index 22f94889f03ce2c283125c97bdff71a730c963b7..a8dccba39927cffa262fba6d2358a4c657e58e8c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -249,15 +249,12 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, } static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, - unsigned long hem_alloc_size, - gfp_t gfp_mask) + unsigned long hem_alloc_size) { struct hns_roce_hem *hem; int order; void *buf; - WARN_ON(gfp_mask & __GFP_HIGHMEM); - order = get_order(hem_alloc_size); if (PAGE_SIZE << order != hem_alloc_size) { dev_err(hr_dev->dev, "invalid hem_alloc_size: %lu!\n", @@ -265,13 +262,12 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, return NULL; } - hem = kmalloc(sizeof(*hem), - gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); + hem = kmalloc(sizeof(*hem), GFP_KERNEL); if (!hem) return NULL; buf = dma_alloc_coherent(hr_dev->dev, hem_alloc_size, - &hem->dma, gfp_mask); + &hem->dma, GFP_KERNEL); if (!buf) goto fail; @@ -378,7 +374,6 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev, { u32 bt_size = mhop->bt_chunk_size; struct device *dev = hr_dev->dev; - gfp_t flag; u64 bt_ba; u32 size; int ret; @@ -417,8 +412,7 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev, * alloc bt space chunk for MTT/CQE. */ size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size; - flag = GFP_KERNEL | __GFP_NOWARN; - table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size, flag); + table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size); if (!table->hem[index->buf]) { ret = -ENOMEM; goto err_alloc_hem; @@ -546,9 +540,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev, goto out; } - table->hem[i] = hns_roce_alloc_hem(hr_dev, - table->table_chunk_size, - GFP_KERNEL | __GFP_NOWARN); + table->hem[i] = hns_roce_alloc_hem(hr_dev, table->table_chunk_size); if (!table->hem[i]) { ret = -ENOMEM; goto out; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 595292fb88f7bf7beb7a787779444a334882125d..f9e4e9aebd45881a7754412af89f4bcf55857cf8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -2351,31 +2351,36 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev) static int hns_roce_query_caps(struct hns_roce_dev *hr_dev) { - struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM]; + struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM] = {}; struct hns_roce_caps *caps = &hr_dev->caps; struct hns_roce_query_pf_caps_a *resp_a; struct hns_roce_query_pf_caps_b *resp_b; struct hns_roce_query_pf_caps_c *resp_c; struct hns_roce_query_pf_caps_d *resp_d; struct hns_roce_query_pf_caps_e *resp_e; + struct hns_roce_query_pf_caps_f *resp_f; enum hns_roce_opcode_type cmd; int ctx_hop_num; int pbl_hop_num; + int cmd_num; int ret; int i; cmd = hr_dev->is_vf ? HNS_ROCE_OPC_QUERY_VF_CAPS_NUM : HNS_ROCE_OPC_QUERY_PF_CAPS_NUM; + cmd_num = hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ? + HNS_ROCE_QUERY_PF_CAPS_CMD_NUM_HIP08 : + HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; - for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) { + for (i = 0; i < cmd_num; i++) { hns_roce_cmq_setup_basic_desc(&desc[i], cmd, true); - if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1)) + if (i < (cmd_num - 1)) desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); else desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); } - ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM); + ret = hns_roce_cmq_send(hr_dev, desc, cmd_num); if (ret) return ret; @@ -2384,6 +2389,7 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev) resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data; resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data; resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data; + resp_f = (struct hns_roce_query_pf_caps_f *)desc[5].data; caps->local_ca_ack_delay = resp_a->local_ca_ack_delay; caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg); @@ -2451,6 +2457,8 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev) caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS); caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS); + caps->max_ack_req_msg_len = le32_to_cpu(resp_f->max_ack_req_msg_len); + caps->qpc_hop_num = ctx_hop_num; caps->sccc_hop_num = ctx_hop_num; caps->srqc_hop_num = ctx_hop_num; @@ -2794,7 +2802,7 @@ static int hns_roce_v2_get_reset_page(struct hns_roce_dev *hr_dev) return 0; err_with_vmap: - put_page(hr_dev->reset_page); + __free_page(hr_dev->reset_page); return -ENOMEM; } @@ -2802,7 +2810,7 @@ static void hns_roce_v2_put_reset_page(struct hns_roce_dev *hr_dev) { vunmap(hr_dev->reset_kaddr); hr_dev->reset_kaddr = NULL; - put_page(hr_dev->reset_page); + __free_page(hr_dev->reset_page); hr_dev->reset_page = NULL; } @@ -3196,7 +3204,7 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) ret = get_hem_table(hr_dev); if (ret) - goto err_clear_extdb_failed; + goto err_get_hem_table_failed; if (hr_dev->is_vf) return 0; @@ -3211,6 +3219,8 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) err_llm_init_failed: put_hem_table(hr_dev); +err_get_hem_table_failed: + hns_roce_function_clear(hr_dev); err_clear_extdb_failed: hns_roce_v2_put_reset_page(hr_dev); error_get_reset_page_failed: @@ -4901,6 +4911,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, enum ib_mtu ib_mtu; u8 ack_req_freq; const u8 *smac; + int lp_msg_len; u8 lp_pktn_ini; u64 *mtts; u8 *dmac; @@ -4994,7 +5005,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, return -EINVAL; #define MIN_LP_MSG_LEN 1024 /* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */ - lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu); + lp_msg_len = max(mtu, MIN_LP_MSG_LEN); + lp_pktn_ini = ilog2(lp_msg_len / mtu); if (attr_mask & IB_QP_PATH_MTU) { hr_reg_write(context, QPC_MTU, ib_mtu); @@ -5004,23 +5016,21 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini); hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI); -#define MAX_ACK_REQ_MSG_LEN 65536 /* * There are several constraints for ACK_REQ_FREQ: * 1. mtu * (2 ^ ACK_REQ_FREQ) should not be too large, otherwise * it may cause some unexpected retries when sending large - * payload. 64K is a recommended value. + * payload. * 2. ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI. - * But we don't need to add a check since the calculation here - * already guarantees this. * 3. ACK_REQ_FREQ must be equal to LP_PKTN_INI when using LDCP * or HC3 congestion control algorithm. */ if (hr_qp->cong_type == CONG_TYPE_LDCP || - hr_qp->cong_type == CONG_TYPE_HC3) + hr_qp->cong_type == CONG_TYPE_HC3 || + hr_dev->caps.max_ack_req_msg_len < lp_msg_len) ack_req_freq = lp_pktn_ini; else - ack_req_freq = ilog2(MAX_ACK_REQ_MSG_LEN / mtu); + ack_req_freq = ilog2(hr_dev->caps.max_ack_req_msg_len / mtu); hr_reg_write(context, QPC_ACK_REQ_FREQ, ack_req_freq); hr_reg_clear(qpc_mask, QPC_ACK_REQ_FREQ); @@ -5704,11 +5714,10 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_v2_qp_context ctx[2]; - struct hns_roce_v2_qp_context *context = ctx; - struct hns_roce_v2_qp_context *qpc_mask = ctx + 1; + struct hns_roce_v2_qp_context *context; + struct hns_roce_v2_qp_context *qpc_mask; struct ib_device *ibdev = &hr_dev->ib_dev; - int ret; + int ret = -ENOMEM; if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; @@ -5719,7 +5728,11 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, * we should set all bits of the relevant fields in context mask to * 0 at the same time, else set them to 0x1. */ - memset(context, 0, hr_dev->caps.qpc_sz); + context = kvzalloc(sizeof(*context), GFP_KERNEL); + qpc_mask = kvzalloc(sizeof(*qpc_mask), GFP_KERNEL); + if (!context || !qpc_mask) + goto out; + memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz); ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state, @@ -5764,6 +5777,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, hns_roce_modify_dca(hr_dev, hr_qp, udata); out: + kvfree(qpc_mask); + kvfree(context); return ret; } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 3d519b828cbd9f57759e8d919cb4568b3173a900..a9948a40bbab6b6f93c48e2cd8f1a6d463506b52 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -1181,7 +1181,8 @@ struct hns_roce_cfg_gmv_tb_b { #define GMV_TB_B_SMAC_H GMV_TB_B_FIELD_LOC(47, 32) #define GMV_TB_B_SGID_IDX GMV_TB_B_FIELD_LOC(71, 64) -#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 5 +#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM_HIP08 5 +#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 6 struct hns_roce_query_pf_caps_a { u8 number_ports; u8 local_ca_ack_delay; @@ -1293,6 +1294,11 @@ struct hns_roce_query_pf_caps_e { __le16 aeq_period; }; +struct hns_roce_query_pf_caps_f { + __le32 max_ack_req_msg_len; + __le32 rsv[5]; +}; + #define PF_CAPS_E_FIELD_LOC(h, l) \ FIELD_LOC(struct hns_roce_query_pf_caps_e, h, l) diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 18aba51ee1f51ddc5f01b4d77e30219214b0d892..f85f2bb6f0627f7125e052681001db765887fed4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -1236,9 +1236,7 @@ static void hns_roce_teardown_hca(struct hns_roce_dev *hr_dev) mutex_destroy(&hr_dev->db_unfree_list_mutex); mutex_destroy(&hr_dev->mtr_unfree_list_mutex); mutex_destroy(&hr_dev->uctx_list_mutex); - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || - hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) - mutex_destroy(&hr_dev->pgdir_mutex); + mutex_destroy(&hr_dev->pgdir_mutex); } /** @@ -1265,11 +1263,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) INIT_LIST_HEAD(&hr_dev->db_unfree_list); mutex_init(&hr_dev->db_unfree_list_mutex); - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || - hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) { - INIT_LIST_HEAD(&hr_dev->pgdir_list); - mutex_init(&hr_dev->pgdir_mutex); - } + INIT_LIST_HEAD(&hr_dev->pgdir_list); + mutex_init(&hr_dev->pgdir_mutex); hns_roce_init_uar_table(hr_dev); @@ -1304,9 +1299,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) err_uar_table_free: ida_destroy(&hr_dev->uar_ida.ida); - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || - hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) - mutex_destroy(&hr_dev->pgdir_mutex); + mutex_destroy(&hr_dev->pgdir_mutex); mutex_destroy(&hr_dev->db_unfree_list_mutex); mutex_destroy(&hr_dev->mtr_unfree_list_mutex); mutex_destroy(&hr_dev->uctx_list_mutex); diff --git a/drivers/infiniband/hw/hns/hns_roce_sysfs.c b/drivers/infiniband/hw/hns/hns_roce_sysfs.c index cebb0229dd0833ad261e43527d7aa3643f912af4..cd498c5ea4178f0812b7759eaaadcdfc53fadea2 100644 --- a/drivers/infiniband/hw/hns/hns_roce_sysfs.c +++ b/drivers/infiniband/hw/hns/hns_roce_sysfs.c @@ -5,7 +5,6 @@ #include -#include "hnae3.h" #include "hns_roce_device.h" #include "hns_roce_hw_v2.h" diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 1082dd06a7db6fdba1f2704e8f1dd1013ef06c38..5a702b0147520ef9971dcbbf3adefaf59f4d1f57 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -967,9 +967,6 @@ struct hnae3_handle { u8 netdev_flags; struct dentry *hnae3_dbgfs; - /* protects concurrent contention between debugfs commands */ - struct mutex dbgfs_lock; - char **dbgfs_buf; /* Network interface message level enabled bits */ u32 msg_enable; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index c9e645e884546bab849b40f196e47efc005d2bbe..0cc52cbe80bd10c19126f2d473828e46f49326e8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -1262,71 +1262,55 @@ static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data, static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { - struct hns3_dbg_data *dbg_data = filp->private_data; + char *buf = filp->private_data; + + return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); +} + +static int hns3_dbg_open(struct inode *inode, struct file *filp) +{ + struct hns3_dbg_data *dbg_data = inode->i_private; struct hnae3_handle *handle = dbg_data->handle; struct hns3_nic_priv *priv = handle->priv; - ssize_t size = 0; - char **save_buf; - char *read_buf; u32 index; + char *buf; int ret; + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return -EBUSY; + ret = hns3_dbg_get_cmd_index(dbg_data, &index); if (ret) return ret; - mutex_lock(&handle->dbgfs_lock); - save_buf = &handle->dbgfs_buf[index]; - - if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || - test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) { - ret = -EBUSY; - goto out; - } - - if (*save_buf) { - read_buf = *save_buf; - } else { - read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL); - if (!read_buf) { - ret = -ENOMEM; - goto out; - } - - /* save the buffer addr until the last read operation */ - *save_buf = read_buf; - } - - /* get data ready for the first time to read */ - if (!*ppos) { - ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd, - read_buf, hns3_dbg_cmd[index].buf_len); - if (ret) - goto out; - } + buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; - size = simple_read_from_buffer(buffer, count, ppos, read_buf, - strlen(read_buf)); - if (size > 0) { - mutex_unlock(&handle->dbgfs_lock); - return size; + ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd, + buf, hns3_dbg_cmd[index].buf_len); + if (ret) { + kvfree(buf); + return ret; } -out: - /* free the buffer for the last read operation */ - if (*save_buf) { - kvfree(*save_buf); - *save_buf = NULL; - } + filp->private_data = buf; + return 0; +} - mutex_unlock(&handle->dbgfs_lock); - return ret; +static int hns3_dbg_release(struct inode *inode, struct file *filp) +{ + kvfree(filp->private_data); + filp->private_data = NULL; + return 0; } static const struct file_operations hns3_dbg_fops = { .owner = THIS_MODULE, - .open = simple_open, + .open = hns3_dbg_open, .read = hns3_dbg_read, + .release = hns3_dbg_release, }; static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd) @@ -1383,13 +1367,6 @@ int hns3_dbg_init(struct hnae3_handle *handle) int ret; u32 i; - handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev, - ARRAY_SIZE(hns3_dbg_cmd), - sizeof(*handle->dbgfs_buf), - GFP_KERNEL); - if (!handle->dbgfs_buf) - return -ENOMEM; - hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry = debugfs_create_dir(name, hns3_dbgfs_root); handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry; @@ -1399,8 +1376,6 @@ int hns3_dbg_init(struct hnae3_handle *handle) debugfs_create_dir(hns3_dbg_dentry[i].name, handle->hnae3_dbgfs); - mutex_init(&handle->dbgfs_lock); - for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) { if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES && ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) || @@ -1429,24 +1404,13 @@ int hns3_dbg_init(struct hnae3_handle *handle) out: debugfs_remove_recursive(handle->hnae3_dbgfs); handle->hnae3_dbgfs = NULL; - mutex_destroy(&handle->dbgfs_lock); return ret; } void hns3_dbg_uninit(struct hnae3_handle *handle) { - u32 i; - debugfs_remove_recursive(handle->hnae3_dbgfs); handle->hnae3_dbgfs = NULL; - - for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) - if (handle->dbgfs_buf[i]) { - kvfree(handle->dbgfs_buf[i]); - handle->dbgfs_buf[i] = NULL; - } - - mutex_destroy(&handle->dbgfs_lock); } void hns3_dbg_register_debugfs(const char *debugfs_dir_name) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index dc00fc5113652a33305dc2ebb0d8491a07ef5690..4a5c9fff17c6d9185e4dc67126606d8abdda02f1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -1635,19 +1635,24 @@ static const struct hclge_dbg_item tm_pri_items[] = { static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len) { - char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN]; struct hclge_tm_shaper_para c_shaper_para, p_shaper_para; char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str; char content[HCLGE_DBG_TM_INFO_LEN]; u8 pri_num, sch_mode, weight, i, j; + char *data_str; int pos, ret; ret = hclge_tm_get_pri_num(hdev, &pri_num); if (ret) return ret; + data_str = kcalloc(ARRAY_SIZE(tm_pri_items), HCLGE_DBG_DATA_STR_LEN, + GFP_KERNEL); + if (!data_str) + return -ENOMEM; + for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++) - result[i] = &data_str[i][0]; + result[i] = &data_str[i * HCLGE_DBG_DATA_STR_LEN]; hclge_dbg_fill_content(content, sizeof(content), tm_pri_items, NULL, ARRAY_SIZE(tm_pri_items)); @@ -1656,23 +1661,23 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len) for (i = 0; i < pri_num; i++) { ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode); if (ret) - return ret; + goto out; ret = hclge_tm_get_pri_weight(hdev, i, &weight); if (ret) - return ret; + goto out; ret = hclge_tm_get_pri_shaper(hdev, i, HCLGE_OPC_TM_PRI_C_SHAPPING, &c_shaper_para); if (ret) - return ret; + goto out; ret = hclge_tm_get_pri_shaper(hdev, i, HCLGE_OPC_TM_PRI_P_SHAPPING, &p_shaper_para); if (ret) - return ret; + goto out; sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : "sp"; @@ -1689,7 +1694,9 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len) pos += scnprintf(buf + pos, len - pos, "%s", content); } - return 0; +out: + kfree(data_str); + return ret; } static const struct hclge_dbg_item tm_qset_items[] = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index e49eef9aa3fe91a2754390a2df52e03113ffe22f..d44e6c52ae335a1b4fa698d59df0127ddaf1c3f2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2398,6 +2398,8 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev) clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); + + /* timer needs to be initialized before misc irq */ timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0); mutex_init(&hdev->mbx_resp.mbx_mutex); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c index 637d83a400d3e981fcf361b2b8a2b89eaa6b0bb4..809012bb2bc5a7e19ce75b89470ef1649e9f863b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c @@ -134,17 +134,17 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, reg += hclgevf_reg_get_header(reg); /* fetching per-VF registers values from VF PCIe register space */ - reg_num = sizeof(cmdq_reg_addr_list) / sizeof(u32); + reg_num = ARRAY_SIZE(cmdq_reg_addr_list); reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_num, reg); for (i = 0; i < reg_num; i++) *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); - reg_num = sizeof(common_reg_addr_list) / sizeof(u32); + reg_num = ARRAY_SIZE(common_reg_addr_list); reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_num, reg); for (i = 0; i < reg_num; i++) *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); - reg_num = sizeof(ring_reg_addr_list) / sizeof(u32); + reg_num = ARRAY_SIZE(ring_reg_addr_list); for (j = 0; j < hdev->num_tqps; j++) { reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_num, reg); tqp = &hdev->htqp[j].q; @@ -154,7 +154,7 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, ring_reg_addr_list[i]); } - reg_num = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); + reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list); for (j = 0; j < hdev->num_msi_used - 1; j++) { reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR, reg_num, reg); for (i = 0; i < reg_num; i++)