diff --git a/example/multiStream/multiStream_multiGraph_demo.cpp b/example/multiStream/multiStream_multiGraph_demo.cpp index ff6491a004788a06b9e2bb0b2a13f16f34a35f2c..17a0e039e0a57422cc3b3eb3c41598759f0f3b05 100644 --- a/example/multiStream/multiStream_multiGraph_demo.cpp +++ b/example/multiStream/multiStream_multiGraph_demo.cpp @@ -237,8 +237,7 @@ int main() packRW.outTensors.resize(outTensorNum); operationWR->InferShape(intensorDescs, outtensorDescs); - aclError ret; - ret = CreateInTensors(packWR.inTensors, intensorDescs); + aclError ret = CreateInTensors(packWR.inTensors, intensorDescs); if (ret != 0) { exit(ret); } diff --git a/example/multiStream/multiStream_singleGraph_demo.cpp b/example/multiStream/multiStream_singleGraph_demo.cpp index d95873fdc85b3870f0929f2675c6cdbc2244cedc..e1eb53afacbb41606813c9002614047c954bb4e5 100644 --- a/example/multiStream/multiStream_singleGraph_demo.cpp +++ b/example/multiStream/multiStream_singleGraph_demo.cpp @@ -264,8 +264,7 @@ int main() outtensorDescs.resize(outTensorNum); pack.outTensors.resize(outTensorNum); operation->InferShape(intensorDescs, outtensorDescs); - aclError ret; - ret = CreateOutTensors(pack.outTensors, outtensorDescs); + aclError ret = CreateOutTensors(pack.outTensors, outtensorDescs); if (ret != 0) { exit(ret); } diff --git a/src/kernels/mixkernels/gmm_deq_swiglu_quant_gmm_deq/gmm_deq_swiglu_quant_gmm_deq_operation.cpp b/src/kernels/mixkernels/gmm_deq_swiglu_quant_gmm_deq/gmm_deq_swiglu_quant_gmm_deq_operation.cpp index c03469b145a36e07f56e5fcdb0b3dc7e4ebcd5a8..1337ebf55d0f2fee79846b0c4e7bffccd0673f1e 100644 --- a/src/kernels/mixkernels/gmm_deq_swiglu_quant_gmm_deq/gmm_deq_swiglu_quant_gmm_deq_operation.cpp +++ b/src/kernels/mixkernels/gmm_deq_swiglu_quant_gmm_deq/gmm_deq_swiglu_quant_gmm_deq_operation.cpp @@ -102,9 +102,9 @@ private: "Param groupListType only support GROUP_LIST_CUM_SUM (0).", return false); MKI_CHECK(param.weightUpPermuteType != OpParam::GmmDeqSwigluQuantGmmDeq::PERMUTE_INVALID, "Param weightUpPermuteType has invalid value.", return false); - MKI_CHECK(param.transposeWeightUp == false, + MKI_CHECK(!param.transposeWeightUp, "Param transposeWeightUp only support false.", return false); - MKI_CHECK(param.transposeWeightDown == true, + MKI_CHECK(param.transposeWeightDown, "Param transposeWeightDown only support true.", return false); return true; } diff --git a/src/kernels/mixkernels/mm_deq_swiglu_quant_mm_deq/mm_deq_swiglu_quant_mm_deq_operation.cpp b/src/kernels/mixkernels/mm_deq_swiglu_quant_mm_deq/mm_deq_swiglu_quant_mm_deq_operation.cpp index 2a4021818aa9eb9d2e05d8c82c65fe2123cd37f1..135beb06e24d719eebf937a2856a7fedf9740a9c 100644 --- a/src/kernels/mixkernels/mm_deq_swiglu_quant_mm_deq/mm_deq_swiglu_quant_mm_deq_operation.cpp +++ b/src/kernels/mixkernels/mm_deq_swiglu_quant_mm_deq/mm_deq_swiglu_quant_mm_deq_operation.cpp @@ -94,9 +94,9 @@ private: "Param outputType only support OUTPUT_FLOAT16 (0).", return false); MKI_CHECK(param.weightUpPermuteType != OpParam::MmDeqSwigluQuantMmDeq::PERMUTE_INVALID, "Param weightUpPermuteType has invalid value.", return false); - MKI_CHECK(param.transposeWeightUp == false, + MKI_CHECK(!param.transposeWeightUp, "Param transposeWeightUp only support false.", return false); - MKI_CHECK(param.transposeWeightDown == true, + MKI_CHECK(param.transposeWeightDown, "Param transposeWeightDown only support true.", return false); return true; } diff --git a/src/ops_infer/gmm_deq_swiglu_quant_gmm_deq/gmm_deq_swiglu_quant_gmm_deq_operation.cpp b/src/ops_infer/gmm_deq_swiglu_quant_gmm_deq/gmm_deq_swiglu_quant_gmm_deq_operation.cpp index e63c92e9882aadf5294fc4e23abdc3ca1f0b1081..de8be6983146e14fcbe139b7f68ff0374c7b7967 100644 --- a/src/ops_infer/gmm_deq_swiglu_quant_gmm_deq/gmm_deq_swiglu_quant_gmm_deq_operation.cpp +++ b/src/ops_infer/gmm_deq_swiglu_quant_gmm_deq/gmm_deq_swiglu_quant_gmm_deq_operation.cpp @@ -102,12 +102,12 @@ bool ParamCheck(const atb::infer::GmmDeqSwigluQuantGmmDeqParam &opParam) return false; } - if (opParam.transposeWeightUp != false) { + if (opParam.transposeWeightUp) { ATB_LOG(ERROR) << "Param transposeWeightUp only support false."; return false; } - if (opParam.transposeWeightDown != true) { + if (!opParam.transposeWeightDown) { ATB_LOG(ERROR) << "Param transposeWeightDown only support true."; return false; } diff --git a/src/ops_infer/mm_deq_swiglu_quant_mm_deq/mm_deq_swiglu_quant_mm_deq_operation.cpp b/src/ops_infer/mm_deq_swiglu_quant_mm_deq/mm_deq_swiglu_quant_mm_deq_operation.cpp index d96602ba46c7e7acd690729a54546aadc04ecc3e..64cf34fb124331696f7cf4fd6b4dd5a2285494d1 100644 --- a/src/ops_infer/mm_deq_swiglu_quant_mm_deq/mm_deq_swiglu_quant_mm_deq_operation.cpp +++ b/src/ops_infer/mm_deq_swiglu_quant_mm_deq/mm_deq_swiglu_quant_mm_deq_operation.cpp @@ -87,12 +87,12 @@ bool ParamCheck(const atb::infer::MmDeqSwigluQuantMmDeqParam &opParam) return false; } - if (opParam.transposeWeightUp != false) { + if (opParam.transposeWeightUp) { ATB_LOG(ERROR) << "Param transposeWeightUp only support false."; return false; } - if (opParam.transposeWeightDown != true) { + if (!opParam.transposeWeightDown) { ATB_LOG(ERROR) << "Param transposeWeightDown only support true."; return false; } diff --git a/src/ops_infer/multi_latent_attention/multi_latent_attention_operation.cpp b/src/ops_infer/multi_latent_attention/multi_latent_attention_operation.cpp index 8bdfceafde218f8b38958632a5a09bf7ce6ae743..4cfbc6cde008684b24064210d892139ef43e57f0 100644 --- a/src/ops_infer/multi_latent_attention/multi_latent_attention_operation.cpp +++ b/src/ops_infer/multi_latent_attention/multi_latent_attention_operation.cpp @@ -461,7 +461,7 @@ Status MultiLatentAttentionOperation::DimCheckInt8Nz(const SVector & return ERROR_INVALID_TENSOR_DIM; } if (inTensorDesc.at(idx + 1).shape.dims[0] != param_.headNum) { - ATB_LOG(ERROR) << GetLogPrefix() << "dim 0 of of pvDescale(intensor" << idx + 1 + ATB_LOG(ERROR) << GetLogPrefix() << "dim 0 of of pvDescale(intensor" << (idx + 1) << ") should be equal to dim0 of headNum"; return ERROR_INVALID_TENSOR_DIM; } diff --git a/src/ops_infer/ring_mla/ring_mla_operation.cpp b/src/ops_infer/ring_mla/ring_mla_operation.cpp index 8106fdd3bdab29439618697d760480d2c5010853..0376eabba7187d738aeea636570ba258c652095d 100644 --- a/src/ops_infer/ring_mla/ring_mla_operation.cpp +++ b/src/ops_infer/ring_mla/ring_mla_operation.cpp @@ -394,8 +394,7 @@ bool RingMLAOperation::InputLseDimCheck(const SVector &inTensorDescs Status RingMLAOperation::InferShapeCheckImpl(const SVector &inTensorDescs) const { - Status st; - st = DimCheck(inTensorDescs); + Status st = DimCheck(inTensorDescs); if (st != NO_ERROR) { return st; }