diff --git a/core/ops/linear/linear_ops_runner_builder.h b/core/ops/linear/linear_ops_runner_builder.h index 05c8c07e0e639a99603761291999d991c5cc5213..fadf4f04f24fb8b6fef72c3199da1cb1b3deed82 100644 --- a/core/ops/linear/linear_ops_runner_builder.h +++ b/core/ops/linear/linear_ops_runner_builder.h @@ -32,7 +32,7 @@ public: char version[versionLen] = {0}; AsdRtDeviceGetSocVersion(version, versionLen); ASD_LOG(INFO) << "SocVersion:" << std::string(version); - is910B_ = std::string(version) == "Ascend910B4"; + is910B_ = std::string(version).find("Ascend910B") != std::npos; } virtual ~LinearOpsRunnerBuilder() = default; Runner *Build() override diff --git a/examples/torch/layer/layer_torch.cpp b/examples/torch/layer/layer_torch.cpp index 3adbe08243c46c89f19960caebabaa066de9dd62..9766034108f321318c998f9605b9195b2e31ec47 100644 --- a/examples/torch/layer/layer_torch.cpp +++ b/examples/torch/layer/layer_torch.cpp @@ -95,6 +95,34 @@ std::vector LayerTorch::Execute(std::vector inTens return outTensors; } +void LayerTorch::ExecuteOut(std::vector inTensors, std::vector outTensors) +{ + AsdOps::Timer timer; + AclTransformer::VariantPack variantPack; + ASD_LOG(INFO) << "LayerTorch::ExecuteOut start"; + for (size_t i = 0; i < inTensors.size(); ++i) { + inTensors.at(i) = inTensors.at(i).contiguous(); + ASD_LOG(INFO) << "inTensors[" << i << "].options:" << inTensors.at(i).options() + << ", data:" << inTensors.at(i).data_ptr(); + variantPack.inTensors.push_back(ExampleUtil::AtTensor2AsdTensor(inTensors.at(i))); + } + + for (size_t i = 0; i < outTensors.size(); ++i) { + outTensors.at(i) = outTensors.at(i).contiguous(); + ASD_LOG(INFO) << "outTensors[" << i << "].options:" << outTensors.at(i).options() + << ", data:" << outTensors.at(i).data_ptr(); + variantPack.outTensors.push_back(ExampleUtil::AtTensor2AsdTensor(outTensors.at(i))); + } + + AclTransformer::Handle handle = {ExampleUtil::GetCurrentStream()}; + if (handle.stream != nullptr) { + ASD_LOG(INFO) << "LayerTorch::Get Handle success!"; + } + layer_->Execute(handle, variantPack); + + ASD_LOG(WARN) << "LayerTorch::ExecuteOut end, use time:" << timer.ElapsedMicroSecond() << " microsecond"; +} + void LayerTorch::CreateAtOutTensors(const AsdOps::SVector &inTensors, std::vector &atOutTensors) { @@ -113,5 +141,6 @@ TORCH_LIBRARY(LayerTorch, m) m.class_("LayerTorch") .def(torch::init()) .def("execute", &LayerTorch::Execute) + .def("execute_out", &LayerTorch::ExecuteOut) .def("set_param", &LayerTorch::SetParam); } \ No newline at end of file diff --git a/examples/torch/layer/layer_torch.h b/examples/torch/layer/layer_torch.h index b547417f7c9f05c6e29a4213fa675f7ef7ff988c..6adcd22ef95e792d16617bfdc6df0d157296bad5 100644 --- a/examples/torch/layer/layer_torch.h +++ b/examples/torch/layer/layer_torch.h @@ -27,6 +27,7 @@ public: ~LayerTorch(); void SetParam(std::string param); std::vector Execute(std::vector inTensors); + void ExecuteOut(std::vector inTensors, std::vector outTensors); c10::intrusive_ptr clone() const { return c10::make_intrusive(layerName_); } private: diff --git a/scripts/build.sh b/scripts/build.sh index cc8fc428037ff9df616d849bf8802d4d2487d550..5bd841c03ea6e9f1ab4868556974f1bb1b8fb927 100644 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -87,8 +87,8 @@ function fn_build_asdops() fi build_options="$build_options --output=$THIRD_PARTY_DIR" - echo "bash scripts/build.sh release $build_options" - bash scripts/build.sh release $build_options + echo "bash scripts/build.sh dev $build_options" + bash scripts/build.sh dev $build_options } function fn_build_nlohmann_json()