diff --git a/BUILD.gn b/BUILD.gn index 5bcce284a18292cd7b7d4e0eb6af9cd55a273601..28fc28623a40587387dcd9a3e9e69d747cb144f9 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import("//arkcompiler/ets_frontend/es2panda/es2abc_config.gni") +import("//build/config/components/ets_frontend/es2abc_config.gni") import("//arkcompiler/ets_runtime/js_runtime_config.gni") import("$build_root/toolchain/toolchain.gni") @@ -136,6 +136,22 @@ group("ark_js_unittest_suit") { "ecmascript/serializer/tests:unittest", "ecmascript/snapshot/tests:unittest", "ecmascript/tests:unittest", + "common_components/base/tests:unittest", + "common_components/common_runtime/tests:unittest", + "common_components/common/tests:unittest", + "common_components/heap/allocator/tests:unittest", + "common_components/heap/ark_collector/tests:unittest", + "common_components/heap/barrier/tests:unittest", + "common_components/heap/collector/tests:unittest", + "common_components/heap/space/tests:unittest", + "common_components/heap/tests:unittest", + "common_components/mutator/tests:unittest", + "common_components/objects/tests:unittest", + "common_components/serialize/tests:unittest", + "common_components/thread/tests:unittest", + "common_components/log/tests:unittest", + "common_components/platform/unix/tests:unittest", + "common_components/taskpool/tests:unittest", ] if (is_ohos && is_standard_system) { deps += [ "test/fuzztest:fuzztest" ] @@ -151,43 +167,6 @@ group("ark_js_unittest_suit") { } } -group("common_components_js_unittest") { - testonly = true - deps = [] - - if (ets_runtime_enable_cmc_gc) { - deps += [ - #"common_components/base_runtime/tests:unittest", - #"common_components/heap/w_collector/tests:unittest", - #"common_components/base/tests:unittest", - #"common_components/heap/allocator/tests:unittest", - #"common_components/mutator/tests:unittest", - #"common_components/heap/collector/tests:unittest", - #"common_components/objects/tests:unittest", - #"common_components/heap/barrier/tests:unittest", - ] - } -} - -group("common_components_unittest") { - testonly = true - deps = [] - - if (ets_runtime_enable_cmc_gc) { - deps += [ - #"common_components/base_runtime/tests:host_unittest", - #"common_components/heap/w_collector/tests:host_unittest", - #"common_components/base/tests:host_unittest", - #"common_components/heap/allocator/tests:host_unittest", - #"common_components/mutator/tests:host_unittest", - #"common_components/heap/collector/tests:host_unittest", - #"common_components/objects/tests:host_unittest", - #"common_components/heap/barrier/tests:host_unittest", - ] - } -} - - group("ark_js_perform") { testonly = true deps = [] @@ -222,7 +201,24 @@ group("ark_unittest") { "ecmascript/serializer/tests:host_unittest", "ecmascript/snapshot/tests:host_unittest", "ecmascript/tests:host_unittest", + "common_components/base/tests:host_unittest", + "common_components/common_runtime/tests:host_unittest", + "common_components/common/tests:host_unittest", + "common_components/heap/allocator/tests:host_unittest", + "common_components/heap/ark_collector/tests:host_unittest", + "common_components/heap/barrier/tests:host_unittest", + "common_components/heap/collector/tests:host_unittest", + "common_components/heap/space/tests:host_unittest", + "common_components/heap/tests:host_unittest", + "common_components/mutator/tests:host_unittest", + "common_components/objects/tests:host_unittest", + "common_components/serialize/tests:host_unittest", + "common_components/thread/tests:host_unittest", + "common_components/log/tests:host_unittest", + "common_components/platform/unix/tests:host_unittest", + "common_components/taskpool/tests:host_unittest", ] + if (!run_with_asan) { if (!(ark_standalone_build && current_os == "ohos")) { deps += [ @@ -231,17 +227,6 @@ group("ark_unittest") { ] } } - if (ets_runtime_enable_cmc_gc) { - deps += [ - #"common_components/base_runtime/tests:host_unittest", - #"common_components/heap/w_collector/tests:host_unittest", - #"common_components/base/tests:host_unittest", - #"common_components/heap/allocator/tests:host_unittest", - #"common_components/mutator/tests:host_unittest", - #"common_components/heap/barrier/tests:unittest", - #"common_components/heap/collector/tests:host_unittest", - ] - } } ohos_static_library("ark_runtime_host_unittest") { @@ -404,10 +389,6 @@ config("hiviewdfx_config") { if (enable_unwinder) { defines += [ "ENABLE_UNWINDER" ] } - if (enable_rss) { - defines += [ "ENABLE_RSS" ] - } - include_dirs = [ "$hilog_root/include" ] } @@ -416,12 +397,10 @@ config("ark_jsruntime_common_config") { defines = [ "PANDA_ENABLE_LTO" ] - if (is_ohos && current_cpu == "arm64") { + if (is_ohos) { defines += [ "ENABLE_COLD_STARTUP_GC_POLICY" ] } if (ets_runtime_enable_cmc_gc) { - defines += [ "DEFAULT_USE_CMC_GC" ] - defines += [ "ARK_HYBRID" ] defines += [ "USE_CMC_GC" ] if (ets_runtime_enable_cmc_rb_dfx) { defines += [ "ENABLE_CMC_RB_DFX" ] @@ -509,6 +488,13 @@ config("ark_jsruntime_common_config") { defines += [ "ANDROID_PLATFORM" ] } + if (is_ohos && is_standard_system && !is_arkui_x && !is_qemu_runtime && + defined(global_parts_info) && + defined(global_parts_info.resourceschedule_qos_manager)) { + defines += [ "ENABLE_QOS" ] + } + + if (ark_compile_mode != "release") { ark_profiler_features = [ "ECMASCRIPT_SUPPORT_CPUPROFILER", @@ -723,11 +709,11 @@ config("ark_jsruntime_common_config") { # ecmascript unit testcase config config("ecma_test_config") { visibility = [ + "./common_components/*", "./ecmascript/*", "./test/executiontest/*", "./test/fuzztest/*", "./tools/ap_file_viewer/*", - "./common_components/*", ] configs = [ @@ -778,6 +764,7 @@ ecma_source = [ "ecmascript/base/string_helper.cpp", "ecmascript/base/typed_array_helper.cpp", "ecmascript/base/bit_helper.cpp", + "ecmascript/builtin_entries.cpp", "ecmascript/builtins/builtins.cpp", "ecmascript/builtins/builtins_ark_tools.cpp", "ecmascript/builtins/builtins_array.cpp", @@ -917,7 +904,6 @@ ecma_source = [ "ecmascript/interpreter/interpreter-inl.cpp", "ecmascript/interpreter/slow_runtime_stub.cpp", "ecmascript/intl/locale_helper.cpp", - "ecmascript/jit/rewriter/reloc_rewriter_aarch64.cpp", "ecmascript/jit/compile_decision.cpp", "ecmascript/jit/jit.cpp", "ecmascript/jit/jit_dfx.cpp", @@ -1091,8 +1077,6 @@ ecma_source = [ "ecmascript/property_accessor.cpp", "ecmascript/serializer/base_deserializer.cpp", "ecmascript/serializer/base_serializer.cpp", - "ecmascript/serializer/inter_op_value_deserializer.cpp", - "ecmascript/serializer/inter_op_value_serializer.cpp", "ecmascript/serializer/module_deserializer.cpp", "ecmascript/serializer/module_serializer.cpp", "ecmascript/serializer/value_serializer.cpp", @@ -1186,7 +1170,9 @@ ecma_source += [ "ecmascript/cross_vm/jsnapi_expo_hybrid.cpp", "ecmascript/cross_vm/object_factory_hybrid.cpp", "ecmascript/cross_vm/ecma_vm_hybrid.cpp", - "ecmascript/cross_vm/js_thread_hybrid.cpp" + "ecmascript/cross_vm/js_thread_hybrid.cpp", + "ecmascript/serializer/inter_op_value_deserializer.cpp", + "ecmascript/serializer/inter_op_value_serializer.cpp", ] hitrace_scope_source = [] @@ -1441,8 +1427,8 @@ if (enable_target_compilation) { # Only use when cmc-gc enable ohos_source_set("libcommon_components_set") { sources = [ - "common_components/base_runtime/base_runtime.cpp", - "common_components/base_runtime/base_runtime_param.cpp", + "common_components/common_runtime/base_runtime.cpp", + "common_components/common_runtime/base_runtime_param.cpp", "common_components/heap/heap_allocator.cpp", "common_components/heap/heap_visitor.cpp", "common_components/profiler/heap_profiler_listener.cpp", @@ -1825,7 +1811,7 @@ ohos_prebuilt_etc("app_aot_jit_enable_list") { ohos_prebuilt_etc("app_startup_snapshot") { relative_install_dir = "ark" - source = "$js_root/ecmascript/ohos/app_startup_snapshot.conf" + source = "$js_root/ecmascript/ohos/app_startup_snapshot.json" # Set the subsystem name part_name = "ets_runtime" diff --git a/bundle.json b/bundle.json index 81a9ff1cd635b2cc98c8d1ac99aff7b80d63cda1..ecf7d1e2c7ae62018448f55dbe201a329ea64e52 100644 --- a/bundle.json +++ b/bundle.json @@ -49,7 +49,6 @@ "access_token", "eventhandler", "ipc", - "resource_schedule_service", "safwk", "samgr", "common_event_service", @@ -106,7 +105,6 @@ ], "test": [ "//arkcompiler/ets_runtime:ark_js_unittest", - "//arkcompiler/ets_runtime:common_components_js_unittest", "//arkcompiler/ets_runtime/compiler_service/test:compiler_service_unittest", "//arkcompiler/ets_runtime/compiler_service/test:compiler_service_fuzztest" ] diff --git a/common_components/BUILD.gn b/common_components/BUILD.gn index 23ca5bac129bc673bfdf6e5b04361c608b50091e..f18d39fedb1e758747298d6408454255744a0137 100755 --- a/common_components/BUILD.gn +++ b/common_components/BUILD.gn @@ -26,12 +26,21 @@ source_Base = [ source_Common = [ "common/run_type.cpp" ] source_Heap = [ + "heap/ark_collector/ark_collector.cpp", + "heap/ark_collector/copy_barrier.cpp", + "heap/ark_collector/idle_barrier.cpp", + "heap/ark_collector/enum_barrier.cpp", + "heap/ark_collector/marking_barrier.cpp", + "heap/ark_collector/remark_barrier.cpp", + "heap/ark_collector/preforward_barrier.cpp", + "heap/ark_collector/post_marking_barrier.cpp", "heap/heap.cpp", "heap/allocator/allocator.cpp", "heap/allocator/treap.cpp", "heap/allocator/memory_map.cpp", "heap/allocator/region_manager.cpp", - "heap/allocator/region_space.cpp", + "heap/allocator/regional_heap.cpp", + "heap/allocator/fix_heap.cpp", "heap/barrier/barrier.cpp", "heap/verification.cpp", "heap/collector/collector.cpp", @@ -43,16 +52,10 @@ source_Heap = [ "heap/collector/heuristic_gc_policy.cpp", "heap/collector/task_queue.cpp", "heap/collector/copy_data_manager.cpp", - "heap/w_collector/w_collector.cpp", - "heap/w_collector/copy_barrier.cpp", - "heap/w_collector/idle_barrier.cpp", - "heap/w_collector/enum_barrier.cpp", - "heap/w_collector/trace_barrier.cpp", - "heap/w_collector/remark_barrier.cpp", - "heap/w_collector/preforward_barrier.cpp", - "heap/w_collector/post_trace_barrier.cpp", - "heap/collector/trace_collector.cpp", + "heap/collector/marking_collector.cpp", "heap/space/from_space.cpp", + "heap/space/nonmovable_space.cpp", + "heap/space/large_space.cpp", "heap/space/old_space.cpp", "heap/space/to_space.cpp", "heap/space/young_space.cpp", @@ -138,7 +141,7 @@ config("common_components_common_config") { if (ets_runtime_enable_cmc_gc) { defines += [ "USE_CMC_GC" ] - if (is_ohos && current_cpu == "arm64") { + if (is_ohos) { defines += [ "ENABLE_COLD_STARTUP_GC_POLICY" ] } } else { @@ -359,8 +362,8 @@ ohos_shared_library("libark_common_components_test") { configs = [ ":common_components_test_config" ] sources = [ - "base_runtime/base_runtime.cpp", - "base_runtime/base_runtime_param.cpp", + "common_runtime/base_runtime.cpp", + "common_runtime/base_runtime_param.cpp", "base/utf_helper.cpp", "heap/heap_allocator.cpp", "log/log.cpp", @@ -410,12 +413,15 @@ ohos_shared_library("libark_common_components_test") { if (!ark_standalone_build) { public_external_deps = [ "zlib:libz", + "bounds_checking_function:libsec_shared", ] public_external_deps += hiviewdfx_ext_deps } else { external_deps += [ "zlib:libz", + "bounds_checking_function:libsec_shared", ] + external_deps += hiviewdfx_ext_deps } ldflags = [] @@ -441,3 +447,78 @@ ohos_shared_library("libark_common_components_test") { part_name = "ets_runtime" subsystem_name = "arkcompiler" } + +ohos_shared_library("libark_common_components_fuzz_test") { + testonly = true + stack_protector_ret = false + + configs = [ ":common_components_test_config" ] + + sources = [ + "log/log.cpp", + "base/utf_helper.cpp", + ] + + if (is_mingw) { + sources += [ + "platform/windows/cpu.cpp", + "platform/windows/os.cpp", + ] + } else if (is_mac) { + sources += [ + "platform/unix/mac/cpu.cpp", + "platform/unix/mac/os.cpp", + ] + } else if (is_ohos || target_os == "android") { + sources += [ + "platform/unix/linux/cpu.cpp", + "platform/unix/linux/os.cpp", + ] + } else if (is_linux) { + sources += [ + "platform/unix/linux/cpu.cpp", + "platform/unix/linux/os.cpp", + ] + } + + # deps = [ ":libarkcommon-runtime" ] + + public_configs = [ ":common_components_public_config" ] + public_configs += [ "//arkcompiler/ets_runtime:include_llvm" ] + + external_deps = [] + if (!ark_standalone_build) { + public_external_deps = [ + "zlib:libz", + ] + public_external_deps += hiviewdfx_ext_deps + } else { + external_deps += [ + "zlib:libz", + ] + external_deps += hiviewdfx_ext_deps + } + + ldflags = [] + if (enable_coverage) { + ldflags += [ "--coverage" ] + cflags_cc = [ "--coverage" ] + } + if (!ark_standalone_build) { + ldflags += [ "-Wl,--lto-O0" ] + } + + install_enable = false + if (!is_mingw && !is_mac) { + output_extension = "so" + } + + if (!is_arkui_x) { + external_deps += [ "runtime_core:libarkfile_runtime_static" ] + } else { + deps = [ "$ark_root/libpandafile:libarkfile_runtime_static" ] + } + + part_name = "ets_runtime" + subsystem_name = "arkcompiler" +} diff --git a/common_components/base/ark_sanitizer.h b/common_components/base/ark_sanitizer.h index da1ee18a38df55786b8aecebd58d6e9b8f4e6334..1e2b3efb42a52ec099fd4cef79de014840f61ff5 100644 --- a/common_components/base/ark_sanitizer.h +++ b/common_components/base/ark_sanitizer.h @@ -24,7 +24,7 @@ #define ARK_ASAN_ON #endif -#ifdef USE_ASAN +#if defined(USE_ASAN) && defined(ARK_ASAN_ON) extern "C" { // NOLINTNEXTLINE(readability-identifier-naming) void __asan_poison_memory_region(void const volatile *addr, size_t size) __attribute__((visibility("default"))); diff --git a/common_components/base/c_string.cpp b/common_components/base/c_string.cpp index d04568e26534106abd2db77b50c585fc109ab8ee..bd5e5db6991b8d1cc8c5f66e7608e7356530c453 100755 --- a/common_components/base/c_string.cpp +++ b/common_components/base/c_string.cpp @@ -199,9 +199,11 @@ CString& CString::Append(const CString& addStr, size_t addLen) addLen = strlen(addStr.str_); } EnsureSpace(addLen); + DCHECK_CC(addLen <= addStr.length_); LOGF_IF(memcpy_s(str_ + length_, capacity_ - length_, addStr.str_, addLen) != EOK) << "CString::Append memcpy_s failed"; length_ += addLen; + DCHECK_CC(str_ != nullptr); str_[length_] = '\0'; return *this; } @@ -215,6 +217,7 @@ CString& CString::Append(const char* addStr, size_t addLen) addLen = strlen(addStr); } EnsureSpace(addLen); + DCHECK_CC(addLen <= strlen(addStr)); LOGF_IF(memcpy_s(str_ + length_, capacity_ - length_, addStr, addLen) != EOK) << "CString::Append memcpy_s failed"; length_ += addLen; @@ -315,6 +318,7 @@ CString CString::SubStr(size_t index, size_t len) const return newStr; } newStr.length_ = len; + DCHECK_CC(newStr.str_ != nullptr); newStr.str_[newStr.length_] = '\0'; return newStr; } @@ -404,6 +408,9 @@ bool CString::IsNumber(const CString& s) size_t i = 0; char it = s.Str()[i]; if (it == '-') { + if (s.Length() == 1) { + return false; + } i++; } for (; i < s.Length(); ++i) { @@ -426,6 +433,7 @@ CString CString::RemoveBlankSpace() const if (length_ == 0) { return noBlankSpaceStr; } + DCHECK_CC(noBlankSpaceStr.str_ != nullptr); for (size_t i = 0; i < length_; i++) { if (str_[i] != ' ') { noBlankSpaceStr.str_[index++] = str_[i]; @@ -553,6 +561,9 @@ void CString::Replace(size_t pos, CString cStr) void CString::ReplaceAll(CString replacement, CString target) { + if (replacement.Length() == 0 || target.Length() == 0) { + return; + } int index = -1; int ret = Find(target.Str()); while (ret != -1) { diff --git a/common_components/base/c_string.h b/common_components/base/c_string.h index 00572c5809967d5635c9062466428de873f36f85..959607a29bc6c1ba57620c1e3f847e1b99deaecd 100755 --- a/common_components/base/c_string.h +++ b/common_components/base/c_string.h @@ -19,6 +19,7 @@ #include #include +#include "common_interfaces/base/common.h" #include "securec.h" namespace common { @@ -85,7 +86,11 @@ public: void ReplaceAll(CString replacement, CString target); - bool operator==(const CString& other) const { return strcmp(str_, other.str_) == 0; } + bool operator==(const CString& other) const + { + DCHECK_CC(other.str_ != nullptr); + return strcmp(str_, other.str_) == 0; + } bool operator!=(const CString& other) const { return !(strcmp(str_, other.str_) == 0); } diff --git a/common_components/base/config.h b/common_components/base/config.h index 79d9df0d6c36da209e307feee06413a6c4ac00b7..90ff4762ee19729be7e716b0e58190027de496fd 100644 --- a/common_components/base/config.h +++ b/common_components/base/config.h @@ -30,6 +30,8 @@ namespace common { #define NEXT_OPTIMIZATION_BOOL false #endif +#define ECMASCRIPT_ENABLE_TRACE_STRING_TABLE 0 + } // namespace common #endif // COMMON_COMPONENTS_BASE_CONFIG_H \ No newline at end of file diff --git a/common_components/base/rw_lock.h b/common_components/base/rw_lock.h index 27398fdfc22a2b078a4354b8021e1a61eacb8177..2b52c642a7618dd318764d19d716e4d72a6c12c9 100755 --- a/common_components/base/rw_lock.h +++ b/common_components/base/rw_lock.h @@ -66,7 +66,7 @@ public: void UnlockRead() { int count = lockCount_.fetch_sub(1); - if (count < 0) { + if (count < 0) { //LCOV_EXCL_BR_LINE LOG_COMMON(FATAL) << "Unresolved fatal"; UNREACHABLE_CC(); } diff --git a/common_components/base/tests/BUILD.gn b/common_components/base/tests/BUILD.gn index 39e86f58d2ed0cbdc56ec3a7cdcd8f967e4197f8..b539cdb7f81c7f641bda9ad704d9cdf2f5f84b22 100755 --- a/common_components/base/tests/BUILD.gn +++ b/common_components/base/tests/BUILD.gn @@ -30,6 +30,52 @@ host_unittest_action("C_String_Test") { deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +host_unittest_action("Mem_Utils_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "mem_utils_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "runtime_core:libarkassembler_static", + ] +} + +host_unittest_action("Utf_Helper_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "utf_helper_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + # hiviewdfx libraries external_deps = [ "icu:shared_icui18n", @@ -44,6 +90,8 @@ group("unittest") { # deps file deps = [ ":C_String_Test", + ":Mem_Utils_Test", + ":Utf_Helper_Test", ] } @@ -53,5 +101,7 @@ group("host_unittest") { # deps file deps = [ ":C_String_TestAction", + ":Mem_Utils_TestAction", + ":Utf_Helper_TestAction", ] } \ No newline at end of file diff --git a/common_components/base/tests/c_string_test.cpp b/common_components/base/tests/c_string_test.cpp index 56b0436ac55873a17bfd6cd19d706b945a9fa16b..785b9bfa5f999bf3d885ffaaaca99cb5972e67a8 100755 --- a/common_components/base/tests/c_string_test.cpp +++ b/common_components/base/tests/c_string_test.cpp @@ -15,7 +15,6 @@ #include "common_components/base/c_string.h" #include "common_components/tests/test_helper.h" -#include using namespace common; namespace common::test { @@ -46,6 +45,18 @@ HWTEST_F_L0(CStringTest, ParseTimeFromEnvTest) env = "abcms"; EXPECT_EQ(CString::ParseTimeFromEnv(env), static_cast(0)); + + env = ""; + EXPECT_EQ(CString::ParseTimeFromEnv(env), static_cast(0)); + + env = " 10 s "; + EXPECT_EQ(CString::ParseTimeFromEnv(env), static_cast(10 * 1000UL * 1000 * 1000)); + + env = "123"; + EXPECT_EQ(CString::ParseTimeFromEnv(env), static_cast(0)); + + env = "18446744073709551615ns"; + EXPECT_EQ(CString::ParseTimeFromEnv(env), static_cast(18446744073709551615UL)); } HWTEST_F_L0(CStringTest, ParseNumFromEnvTest) @@ -66,6 +77,15 @@ HWTEST_F_L0(CStringTest, ParseNumFromEnvTest) env = "+123"; EXPECT_EQ(CString::ParseNumFromEnv(env), 0); + + env = " 123 "; + EXPECT_EQ(CString::ParseNumFromEnv(env), 123); + + env = "2147483647"; + EXPECT_EQ(CString::ParseNumFromEnv(env), 2147483647); + + env = "123abc456"; + EXPECT_EQ(CString::ParseNumFromEnv(env), 0); } HWTEST_F_L0(CStringTest, ParsePosNumFromEnvTest) @@ -89,6 +109,15 @@ HWTEST_F_L0(CStringTest, ParsePosNumFromEnvTest) env = "-123"; EXPECT_EQ(CString::ParsePosNumFromEnv(env), static_cast(0)); + + env = "007"; + EXPECT_EQ(CString::ParsePosNumFromEnv(env), static_cast(7)); + + env = " 123 "; + EXPECT_EQ(CString::ParsePosNumFromEnv(env), static_cast(123)); + + env = "18446744073709551615"; + EXPECT_EQ(CString::ParsePosNumFromEnv(env), static_cast(18446744073709551615UL)); } HWTEST_F_L0(CStringTest, ParsePosDecFromEnvTest) @@ -112,6 +141,21 @@ HWTEST_F_L0(CStringTest, ParsePosDecFromEnvTest) env = "-123.45"; EXPECT_EQ(CString::ParsePosDecFromEnv(env), 0.0); + + env = "123"; + EXPECT_EQ(CString::ParsePosDecFromEnv(env), 123); + + env = " 123.45 "; + EXPECT_EQ(CString::ParsePosDecFromEnv(env), 123.45); + + env = "abc"; + EXPECT_EQ(CString::ParsePosDecFromEnv(env), 0.0); + + env = "1.7976931348623157e+308"; + EXPECT_EQ(CString::ParsePosDecFromEnv(env), 1.7976931348623157e+308); + + env = "2.2250738585072014e-308"; + EXPECT_EQ(CString::ParsePosDecFromEnv(env), 2.2250738585072014e-308); } HWTEST_F_L0(CStringTest, WhileLoopCoverageTest) @@ -127,6 +171,36 @@ HWTEST_F_L0(CStringTest, WhileLoopCoverageTest) CString replacement1 = "replace"; str1.ReplaceAll(replacement1, target1); EXPECT_STREQ(str1.Str(), "hello world"); + + CString str2("hello world"); + CString target2(""); + CString replacement2("abc"); + str2.ReplaceAll(replacement2, target2); + EXPECT_STREQ(str2.Str(), "hello world"); + + CString str3("hello world"); + CString target3("world"); + CString replacement3(""); + str3.ReplaceAll(replacement3, target3); + EXPECT_STREQ(str3.Str(), "hello world"); + + CString str4("hello world"); + CString target4("world"); + CString replacement4("world"); + str4.ReplaceAll(replacement4, target4); + EXPECT_STREQ(str4.Str(), "hello world"); + + CString str5("aaaa"); + CString target5("aa"); + CString replacement5("b"); + str5.ReplaceAll(replacement5, target5); + EXPECT_STREQ(str5.Str(), "baba"); + + CString str6(""); + CString target6("world"); + CString replacement6("universe"); + str6.ReplaceAll(replacement6, target6); + EXPECT_STREQ(str6.Str(), ""); } HWTEST_F_L0(CStringTest, RemoveBlankSpaceTest) @@ -146,6 +220,18 @@ HWTEST_F_L0(CStringTest, RemoveBlankSpaceTest) CString allSpaceStr = " "; CString result4 = allSpaceStr.RemoveBlankSpace(); EXPECT_STREQ(result4.Str(), ""); + + CString leadingSpaceStr(" Hello"); + CString result5 = leadingSpaceStr.RemoveBlankSpace(); + EXPECT_STREQ("Hello", result5.Str()); + + CString trailingSpaceStr("World "); + CString result6 = trailingSpaceStr.RemoveBlankSpace(); + EXPECT_STREQ("World", result6.Str()); + + CString mixedStr(" Hello World "); + CString result7 = mixedStr.RemoveBlankSpace(); + EXPECT_STREQ("HelloWorld", result7.Str()); } HWTEST_F_L0(CStringTest, ParseSizeFromEnvTest) @@ -156,6 +242,13 @@ HWTEST_F_L0(CStringTest, ParseSizeFromEnvTest) EXPECT_EQ(CString::ParseSizeFromEnv("5mb"), static_cast(5 * 1024)); EXPECT_EQ(CString::ParseSizeFromEnv("2GB"), static_cast(2 * 1024 * 1024)); EXPECT_EQ(CString::ParseSizeFromEnv("10tb"), static_cast(0)); + EXPECT_EQ(CString::ParseSizeFromEnv(""), static_cast(0)); + EXPECT_EQ(CString::ParseSizeFromEnv(" "), static_cast(0)); + EXPECT_EQ(CString::ParseSizeFromEnv("1k"), static_cast(0)); + EXPECT_EQ(CString::ParseSizeFromEnv("abcKB"), static_cast(0)); + EXPECT_EQ(CString::ParseSizeFromEnv("1024TB"), static_cast(0)); + EXPECT_EQ(CString::ParseSizeFromEnv(" 2048 KB "), static_cast(2048)); + EXPECT_EQ(CString::ParseSizeFromEnv("18446744073709551615Kb"), static_cast(18446744073709551615UL)); } HWTEST_F_L0(CStringTest, IsPosDecimalTest) @@ -166,7 +259,15 @@ HWTEST_F_L0(CStringTest, IsPosDecimalTest) EXPECT_FALSE(CString::IsPosDecimal("abc")); EXPECT_FALSE(CString::IsPosDecimal("-1.5")); EXPECT_FALSE(CString::IsPosDecimal("0")); + EXPECT_FALSE(CString::IsPosDecimal("12.34.56")); + EXPECT_FALSE(CString::IsPosDecimal("123e")); + EXPECT_FALSE(CString::IsPosDecimal("0.0000000")); + EXPECT_TRUE(CString::IsPosDecimal("1e10")); EXPECT_TRUE(CString::IsPosDecimal("123.45")); + EXPECT_TRUE(CString::IsPosDecimal("+123.45")); + EXPECT_TRUE(CString::IsPosDecimal("0.0000001")); + EXPECT_TRUE(CString::IsPosDecimal("999999999999999.9")); + EXPECT_TRUE(CString::IsPosDecimal("1.79769e+308")); } HWTEST_F_L0(CStringTest, IsNumberTest) @@ -174,9 +275,16 @@ HWTEST_F_L0(CStringTest, IsNumberTest) EXPECT_FALSE(CString::IsNumber("")); EXPECT_FALSE(CString::IsNumber("abc")); EXPECT_FALSE(CString::IsNumber("12a3")); + EXPECT_FALSE(CString::IsNumber("-")); + EXPECT_FALSE(CString::IsNumber("12!3")); + EXPECT_FALSE(CString::IsNumber(" 123")); + EXPECT_FALSE(CString::IsNumber("123a")); + EXPECT_FALSE(CString::IsNumber("a")); + EXPECT_FALSE(CString::IsNumber("+123")); EXPECT_TRUE(CString::IsNumber("123")); EXPECT_TRUE(CString::IsNumber("-456")); + EXPECT_TRUE(CString::IsNumber("12345678901234567890")); } HWTEST_F_L0(CStringTest, IsPosNumberTest) @@ -186,9 +294,15 @@ HWTEST_F_L0(CStringTest, IsPosNumberTest) EXPECT_FALSE(CString::IsPosNumber("0")); EXPECT_FALSE(CString::IsPosNumber("abc")); EXPECT_FALSE(CString::IsPosNumber("-123")); + EXPECT_FALSE(CString::IsPosNumber("12a45")); + EXPECT_FALSE(CString::IsPosNumber("+12-45")); + EXPECT_FALSE(CString::IsPosNumber("12.45")); EXPECT_TRUE(CString::IsPosNumber("123")); EXPECT_TRUE(CString::IsPosNumber("+123")); + EXPECT_TRUE(CString::IsPosNumber("00123")); + EXPECT_TRUE(CString::IsPosNumber("+00123")); + EXPECT_TRUE(CString::IsPosNumber("12345678901234567890")); } HWTEST_F_L0(CStringTest, SubStrTest) @@ -199,6 +313,9 @@ HWTEST_F_L0(CStringTest, SubStrTest) EXPECT_EQ(std::string(str.SubStr(4, 3).Str()), ""); EXPECT_EQ(std::string(str.SubStr(10).Str()), ""); EXPECT_EQ(std::string(str.SubStr(2).Str()), "cdef"); + EXPECT_EQ(std::string(str.SubStr(0, str.Length()).Str()), "abcdef"); + EXPECT_EQ(std::string(str.SubStr(str.Length(), 1).Str()), ""); + EXPECT_EQ(std::string(str.SubStr(str.Length()-1, 1).Str()), "f"); } HWTEST_F_L0(CStringTest, SplitTest) @@ -213,6 +330,24 @@ HWTEST_F_L0(CStringTest, SplitTest) EXPECT_EQ(std::string(tokens[0].Str()), "a"); EXPECT_EQ(std::string(tokens[1].Str()), "b"); EXPECT_EQ(std::string(tokens[2].Str()), "c"); + + CString source("single"); + tokens = CString::Split(source, ','); + ASSERT_EQ(1, tokens.size()); + EXPECT_STREQ("single", tokens[0].Str()); + + CString source1(",hello,world"); + tokens = CString::Split(source1, ','); + ASSERT_EQ(2, tokens.size()); + EXPECT_STREQ("hello", tokens[0].Str()); + EXPECT_STREQ("world", tokens[1].Str()); + + CString source2("hello;world;test"); + tokens = CString::Split(source2, ';'); + ASSERT_EQ(3, tokens.size()); + EXPECT_STREQ("hello", tokens[0].Str()); + EXPECT_STREQ("world", tokens[1].Str()); + EXPECT_STREQ("test", tokens[2].Str()); } HWTEST_F_L0(CStringTest, FIndandRfindTest) @@ -224,6 +359,9 @@ HWTEST_F_L0(CStringTest, FIndandRfindTest) EXPECT_EQ(str.Find('h', 20), -1); EXPECT_EQ(str.Find('o', 4), 4); + EXPECT_EQ(str.Find("Hello", 0), -1); + EXPECT_EQ(str.Find("xyz", 0), -1); + EXPECT_EQ(str.Find("worlds", 6), -1); EXPECT_EQ(str.RFind("xyz"), -1); CString multiStr("abababa"); @@ -274,4 +412,137 @@ HWTEST_F_L0(CStringTest, ConstructTest) EXPECT_EQ(nonEmptyStr.Length(), static_cast(0)); EXPECT_EQ(nonEmptyStr.Str()[0], '\0'); } + +HWTEST_F_L0(CStringTest, FormatStringBasicTest) +{ + CString result = CString::FormatString("Hello, %s!", "World"); + EXPECT_STREQ(result.Str(), "Hello, World!"); +} + +HWTEST_F_L0(CStringTest, FormatString_InvalidArguments_ReturnsError) +{ + CString result = CString::FormatString("%n", nullptr); + EXPECT_STREQ(result.Str(), "invalid arguments for FormatString"); +} + +HWTEST_F_L0(CStringTest, InsertMiddle_Success) +{ + CString str("helloworld"); + EXPECT_STREQ(str.Insert(5, ", ").Str(), "hello, world"); +} + +HWTEST_F_L0(CStringTest, TruncateValidIndex_Success) +{ + CString str("hello world"); + EXPECT_STREQ(str.Truncate(5).Str(), "hello"); + EXPECT_EQ(str.Length(), static_cast(5)); +} + +HWTEST_F_L0(CStringTest, GetStr_NonEmptyString_ReturnsCorrect) +{ + CString str("test string"); + EXPECT_STREQ(str.GetStr(), "test string"); +} + +HWTEST_F_L0(CStringTest, CombineWithEmptyString_ReturnsOriginal) +{ + CString str("original"); + CString emptyStr; + CString combined = str.Combine(emptyStr); + EXPECT_STREQ(combined.Str(), "original"); + EXPECT_EQ(combined.Length(), str.Length()); +} + +HWTEST_F_L0(CStringTest, CombineWithEmptyCStr_ReturnsOriginal) +{ + CString str("original"); + const char* emptyCStr = ""; + CString combined = str.Combine(emptyCStr); + EXPECT_STREQ(combined.Str(), "original"); + EXPECT_EQ(combined.Length(), str.Length()); +} + +HWTEST_F_L0(CStringTest, AppendNullptr_NoChange) +{ + CString str("original"); + str.Append(nullptr, 5); + EXPECT_STREQ(str.Str(), "original"); + EXPECT_EQ(str.Length(), strlen("original")); +} + +HWTEST_F_L0(CStringTest, AppendZeroLength_NoChange) +{ + CString str("test"); + CString emptyStr; + + str.Append(emptyStr, 0); + + EXPECT_STREQ(str.Str(), "test"); + EXPECT_EQ(str.Length(), strlen("test")); +} + +HWTEST_F_L0(CStringTest, AppendSelf_ValidResult) +{ + CString str("abc"); + str.Append(str.Str(), str.Length()); + EXPECT_STREQ(str.Str(), "abcabc"); + EXPECT_EQ(str.Length(), strlen("abcabc")); +} + +HWTEST_F_L0(CStringTest, AppendEmptyCString_NoChange) +{ + CString str("original"); + CString emptyStr; + str.Append(emptyStr); + EXPECT_STREQ(str.Str(), "original"); + EXPECT_EQ(str.Length(), strlen("original")); +} + +HWTEST_F_L0(CStringTest, AppendEmptyCStringZeroLength_NoChange) +{ + CString str("test"); + CString emptyStr; + + str.Append(emptyStr, 0); + + EXPECT_STREQ(str.Str(), "test"); + EXPECT_EQ(str.Length(), strlen("test")); +} + +HWTEST_F_L0(CStringTest, AppendValidCString_CorrectResult) +{ + CString str("hello"); + CString addStr(" world!"); + str.Append(addStr, strlen(addStr.Str())); + EXPECT_STREQ(str.Str(), "hello world!"); + EXPECT_EQ(str.Length(), strlen("hello world!")); +} + +HWTEST_F_L0(CStringTest, EnsureMultipleCalls_CapacityGrowsCorrectly) +{ + CString str("initial"); + char* firstPtr = str.GetStr(); + + str.EnsureSpace(16); + char* secondPtr = str.GetStr(); + + EXPECT_NE(firstPtr, secondPtr); + + str.EnsureSpace(100); + char* thirdPtr = str.GetStr(); + + EXPECT_NE(secondPtr, thirdPtr); + EXPECT_EQ(str.Length(), strlen("initial")); + EXPECT_STREQ(str.SubStr(0, str.Length()).Str(), "initial"); +} + +HWTEST_F_L0(CStringTest, CStringSubscriptOperatorTest) +{ + const CString constStr("hello world"); + EXPECT_EQ(constStr[0], 'h'); + + CString mutableStr("mutable"); + mutableStr[0] = 'M'; + EXPECT_EQ(mutableStr[0], 'M'); +} } \ No newline at end of file diff --git a/test/fuzztest/containersprivateload_fuzzer/containersprivateload_fuzzer.cpp b/common_components/base/tests/mem_utils_test.cpp similarity index 36% rename from test/fuzztest/containersprivateload_fuzzer/containersprivateload_fuzzer.cpp rename to common_components/base/tests/mem_utils_test.cpp index 5e529af89e0ea44f3b56a66cd32642acfa92d7f7..49c7f470777faef7612c761fe143eaa1fe88078b 100644 --- a/test/fuzztest/containersprivateload_fuzzer/containersprivateload_fuzzer.cpp +++ b/common_components/base/tests/mem_utils_test.cpp @@ -1,44 +1,50 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "containersprivateload_fuzzer.h" -#include "test/fuzztest/containersprivatecommon_fuzzer/containersprivatecommon_fuzzer.h" - -#include "ecmascript/containers/containers_private.h" -#include "ecmascript/ecma_string-inl.h" -#include "ecmascript/ecma_vm.h" -#include "ecmascript/global_env.h" -#include "ecmascript/js_handle.h" -#include "ecmascript/napi/include/jsnapi.h" - -using namespace panda; -using namespace panda::test; -using namespace panda::ecmascript; -using namespace panda::ecmascript::containers; - -namespace OHOS { - void ContainersPrivateLoadFuzzTest(const uint8_t* data, size_t size) - { - ContainersPrivateFuzzTestHelper::ContainersPrivateCommonFuzzTest(data, size, *data); - } -} - -// Fuzzer entry point. -extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) -{ - // Run your code on data. - OHOS::ContainersPrivateLoadFuzzTest(data, size); - return 0; -} \ No newline at end of file +/* +* Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "common_components/base/mem_utils.h" +#include "common_components/tests/test_helper.h" + +using namespace common; +namespace common::test { +class MemUtilsTest : public common::test::BaseTestWithScope { +}; + +HWTEST_F_L0(MemUtilsTest, CopyZeroBytes) +{ + char dest[100] = {}; + const char* src = "hello world"; + MemoryCopy(reinterpret_cast(dest), 0, + reinterpret_cast(src), strlen(src) + 1); + EXPECT_EQ(dest[0], '\0'); +} + +HWTEST_F_L0(MemUtilsTest, CopyTwoChunks) +{ + constexpr size_t totalSize = 100; + char dest[totalSize] = {}; + char src[totalSize] = {}; + for (size_t i = 0; i < totalSize; ++i) { + src[i] = static_cast('A' + (i % 26)); + } + + MemoryCopy(reinterpret_cast(dest), totalSize, + reinterpret_cast(src), totalSize); + + EXPECT_EQ(memcmp(dest, src, totalSize), 0); +} +} // namespace common::test \ No newline at end of file diff --git a/common_components/base/tests/utf_helper_test.cpp b/common_components/base/tests/utf_helper_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a6bd777ea9adb40927c6dcc211cf5c928aec32cf --- /dev/null +++ b/common_components/base/tests/utf_helper_test.cpp @@ -0,0 +1,329 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/base/utf_helper.h" +#include "common_components/tests/test_helper.h" + +using namespace common; +namespace common::test { +class UtfHelperTest : public common::test::BaseTestWithScope { +}; + +HWTEST_F_L0(UtfHelperTest, DecodeUTF16Test1) +{ + uint16_t utf16[] = {0xD7FF}; + size_t index = 0; + size_t len = 1; + uint32_t result = utf_helper::DecodeUTF16(utf16, len, &index, false); + EXPECT_EQ(result, 0xD7FF); + + uint16_t utf16In[] = {0xDC00}; + result = utf_helper::DecodeUTF16(utf16In, len, &index, false); + EXPECT_EQ(result, 0xDC00); + + uint16_t utf16In1[] = {0xD800}; + result = utf_helper::DecodeUTF16(utf16In1, len, &index, false); + EXPECT_EQ(result, 0xD800); + + uint16_t utf16In2[] = {0xD7FF}; + len = 2; + result = utf_helper::DecodeUTF16(utf16In2, len, &index, false); + EXPECT_EQ(result, 0xD7FF); + + len = 1; + result = utf_helper::DecodeUTF16(utf16In2, len, &index, false); + EXPECT_EQ(result, 0xD7FF); + + result = utf_helper::DecodeUTF16(utf16In, len, &index, false); + EXPECT_EQ(result, 0xDC00); +} + +HWTEST_F_L0(UtfHelperTest, DecodeUTF16Test2) +{ + size_t index = 0; + uint16_t utf16[] = {0xD800, 0xDC00}; + size_t len = 2; + utf16[1] = 0xFFFF; + uint32_t result = utf_helper::DecodeUTF16(utf16, len, &index, false); + EXPECT_EQ(result, 0xD800); +} + +HWTEST_F_L0(UtfHelperTest, DecodeUTF16Test3) +{ + size_t index = 0; + uint16_t utf16[] = {0xD800, 0xDC00}; + size_t len = 2; + uint32_t result = utf_helper::DecodeUTF16(utf16, len, &index, true); + EXPECT_EQ(result, 0xD800); + + uint16_t utf16In[] = {0xD800, 0x0041}; + result = utf_helper::DecodeUTF16(utf16In, len, &index, false); + EXPECT_EQ(result, 0xD800); +} + +HWTEST_F_L0(UtfHelperTest, HandleAndDecodeInvalidUTF16Test1) +{ + uint16_t input[] = {0xDC00}; + size_t index = 0; + size_t len = sizeof(input) / sizeof(input[0]); + uint32_t result = utf_helper::HandleAndDecodeInvalidUTF16(input, len, &index); + EXPECT_EQ(result, utf_helper::UTF16_REPLACEMENT_CHARACTER); +} + +HWTEST_F_L0(UtfHelperTest, HandleAndDecodeInvalidUTF16Test2) +{ + uint16_t input[] = {0xD800}; + size_t index = 0; + size_t len = sizeof(input) / sizeof(input[0]); + uint32_t result = utf_helper::HandleAndDecodeInvalidUTF16(input, len, &index); + EXPECT_EQ(result, utf_helper::UTF16_REPLACEMENT_CHARACTER); + + uint16_t input1[] = {0xD800, 0xD800}; + size_t len1 = sizeof(input1) / sizeof(input1[0]); + result = utf_helper::HandleAndDecodeInvalidUTF16(input1, len1, &index); + EXPECT_EQ(result, utf_helper::UTF16_REPLACEMENT_CHARACTER); + + uint16_t input2[] = {'A'}; + size_t len2 = sizeof(input2) / sizeof(input2[0]); + result = utf_helper::HandleAndDecodeInvalidUTF16(input2, len2, &index); + EXPECT_EQ(result, 'A'); + + uint16_t input3[] = {0xD800 ^ 0x01}; + size_t len3 = sizeof(input3) / sizeof(input3[0]); + result = utf_helper::HandleAndDecodeInvalidUTF16(input3, len3, &index); + EXPECT_EQ(result, utf_helper::UTF16_REPLACEMENT_CHARACTER); +} + +HWTEST_F_L0(UtfHelperTest, HandleAndDecodeInvalidUTF16Test3) +{ + uint16_t input[] = {0xDBFF, 0xDFFF}; + size_t index = 0; + size_t len = sizeof(input) / sizeof(input[0]); + uint32_t expected = ((0xDBFF - utf_helper::DECODE_LEAD_LOW) << utf_helper::UTF16_OFFSET) + + (0xDFFF - utf_helper::DECODE_TRAIL_LOW) + utf_helper::DECODE_SECOND_FACTOR; + uint32_t result = utf_helper::HandleAndDecodeInvalidUTF16(input, len, &index); + EXPECT_EQ(result, expected); +} + +HWTEST_F_L0(UtfHelperTest, HandleAndDecodeInvalidUTF16Test4) +{ + uint16_t input[] = {0xD800, 0xDC00}; + size_t index = 0; + size_t len = sizeof(input) / sizeof(input[0]); + uint32_t expected = ((0xD800 - utf_helper::DECODE_LEAD_LOW) << utf_helper::UTF16_OFFSET) + + (0xDC00 - utf_helper::DECODE_TRAIL_LOW) + utf_helper::DECODE_SECOND_FACTOR; + uint32_t result = utf_helper::HandleAndDecodeInvalidUTF16(input, len, &index); + EXPECT_EQ(result, expected); + EXPECT_EQ(index, 1); +} + +HWTEST_F_L0(UtfHelperTest, IsValidUTF8Test1) +{ + std::vector data = {0xED, 0xA0, 0x80}; + EXPECT_FALSE(utf_helper::IsValidUTF8(data)); + + std::vector data1 = {0xED, 0x90, 0x80}; + EXPECT_TRUE(utf_helper::IsValidUTF8(data1)); + + std::vector data2 = {0xED, 0xC0, 0x80}; + EXPECT_FALSE(utf_helper::IsValidUTF8(data2)); + + std::vector data3 = {0xED, 0x80, 0x80}; + EXPECT_TRUE(utf_helper::IsValidUTF8(data3)); + + std::vector data4 = {0xE0, 0xA0, 0x80}; + EXPECT_TRUE(utf_helper::IsValidUTF8(data4)); +} + +HWTEST_F_L0(UtfHelperTest, IsValidUTF8Test2) +{ + std::vector data = {0xF4, 0x90, 0x80, 0x80}; + EXPECT_FALSE(utf_helper::IsValidUTF8(data)); + + std::vector data1 = {0xF5, 0x80, 0x80, 0x80}; + EXPECT_FALSE(utf_helper::IsValidUTF8(data1)); + + std::vector data2 = {0xF0, 0x90, 0x80, 0x80}; + EXPECT_TRUE(utf_helper::IsValidUTF8(data2)); + + std::vector data3 = {0xF1, 0x80, 0x80, 0x80}; + EXPECT_TRUE(utf_helper::IsValidUTF8(data3)); + + std::vector data4 = {0xF4, 0x80, 0x80, 0x80}; + EXPECT_TRUE(utf_helper::IsValidUTF8(data4)); +} + +HWTEST_F_L0(UtfHelperTest, ConvertRegionUtf16ToUtf8Test3) +{ + uint8_t utf8Out[10]; + size_t result = utf_helper::ConvertRegionUtf16ToUtf8(nullptr, utf8Out, 5, 10, 0, false, false, false); + EXPECT_EQ(result, 0); + + uint16_t utf16In[] = {0x0041}; + result = utf_helper::ConvertRegionUtf16ToUtf8(utf16In, nullptr, 1, sizeof(utf16In), 0, false, false, false); + EXPECT_EQ(result, 0); + + result = utf_helper::ConvertRegionUtf16ToUtf8(utf16In, utf8Out, 1, 0, 0, false, false, false); + EXPECT_EQ(result, 0); + + result = utf_helper::ConvertRegionUtf16ToUtf8(nullptr, utf8Out, 1, 0, 0, false, false, false); + EXPECT_EQ(result, 0); + + result = utf_helper::ConvertRegionUtf16ToUtf8(utf16In, nullptr, 1, 0, 0, false, false, false); + EXPECT_EQ(result, 0); + + result = utf_helper::ConvertRegionUtf16ToUtf8(nullptr, nullptr, 1, sizeof(utf8Out), 0, false, false, false); + EXPECT_EQ(result, 0); + + result = utf_helper::ConvertRegionUtf16ToUtf8(nullptr, nullptr, 1, 0, 0, false, false, false); + EXPECT_EQ(result, 0); + + result = utf_helper::ConvertRegionUtf16ToUtf8(utf16In, utf8Out, 1, sizeof(utf8Out), 0, false, false, false); + EXPECT_EQ(result, 1); +} + +HWTEST_F_L0(UtfHelperTest, ConvertRegionUtf16ToLatin1Test) +{ + uint8_t utf8Out[10]; + size_t result = utf_helper::ConvertRegionUtf16ToLatin1(nullptr, utf8Out, 5, 10); + EXPECT_EQ(result, 0); + + uint16_t utf16In[] = {0x0041}; + result = utf_helper::ConvertRegionUtf16ToLatin1(utf16In, nullptr, 1, sizeof(utf16In)); + EXPECT_EQ(result, 0); + + result = utf_helper::ConvertRegionUtf16ToLatin1(utf16In, utf8Out, 1, 0); + EXPECT_EQ(result, 0); + + result = utf_helper::ConvertRegionUtf16ToLatin1(nullptr, utf8Out, 1, 0); + EXPECT_EQ(result, 0); + + result = utf_helper::ConvertRegionUtf16ToLatin1(utf16In, nullptr, 1, 0); + EXPECT_EQ(result, 0); + + result = utf_helper::ConvertRegionUtf16ToLatin1(nullptr, nullptr, 1, sizeof(utf8Out)); + EXPECT_EQ(result, 0); + + result = utf_helper::ConvertRegionUtf16ToLatin1(nullptr, nullptr, 1, 0); + EXPECT_EQ(result, 0); + + result = utf_helper::ConvertRegionUtf16ToLatin1(utf16In, utf8Out, 1, sizeof(utf8Out)); + EXPECT_EQ(result, 1); + + const uint16_t input[] = {0x0041, 0x0042, 0x0043}; + uint8_t output[2] = {0}; + result = utf_helper::ConvertRegionUtf16ToLatin1(input, output, 3, 2); + EXPECT_EQ(result, 2); +} + +HWTEST_F_L0(UtfHelperTest, DebuggerConvertRegionUtf16ToUtf8Test) +{ + uint8_t utf8Out[10]; + size_t result = utf_helper::DebuggerConvertRegionUtf16ToUtf8(nullptr, utf8Out, 5, sizeof(utf8Out), 0, false, false); + EXPECT_EQ(result, 0); + + uint16_t utf16In[] = {0x0041}; + result = utf_helper::DebuggerConvertRegionUtf16ToUtf8(utf16In, nullptr, 1, sizeof(utf16In), 0, false, false); + EXPECT_EQ(result, 0); + + result = utf_helper::DebuggerConvertRegionUtf16ToUtf8(nullptr, nullptr, 1, sizeof(utf8Out), 0, false, false); + EXPECT_EQ(result, 0); + + result = utf_helper::DebuggerConvertRegionUtf16ToUtf8(nullptr, utf8Out, 1, 0, 0, false, false); + EXPECT_EQ(result, 0); + + result = utf_helper::DebuggerConvertRegionUtf16ToUtf8(nullptr, utf8Out, 1, 0, 0, false, false); + EXPECT_EQ(result, 0); + + result = utf_helper::DebuggerConvertRegionUtf16ToUtf8(nullptr, nullptr, 1, 0, 0, false, false); + EXPECT_EQ(result, 0); + + result = utf_helper::DebuggerConvertRegionUtf16ToUtf8(utf16In, utf8Out, 1, 0, 0, false, false); + EXPECT_EQ(result, 0); + + result = utf_helper::DebuggerConvertRegionUtf16ToUtf8(utf16In, utf8Out, 1, sizeof(utf8Out), 0, false, false); + EXPECT_EQ(result, 1); + + uint16_t utf16In1[] = {0x0041, 0x0042, 0x0043}; + result = utf_helper::DebuggerConvertRegionUtf16ToUtf8(utf16In1, nullptr, 3, 0, 0, false, false); + EXPECT_EQ(result, 0); +} + +HWTEST_F_L0(UtfHelperTest, DecodeUTF16Test4) +{ + uint16_t utf16[] = {0xD800, 0xDC00}; + size_t index = 0; + size_t len = sizeof(utf16) / sizeof(utf16[0]); + + uint32_t result = utf_helper::DecodeUTF16(utf16, len, &index, false); + EXPECT_EQ(result, 0x10000); + EXPECT_EQ(index, 1); +} + +HWTEST_F_L0(UtfHelperTest, Utf8ToUtf16Size_LastByteIsTwoByteStart_TrimLastByte) +{ + const uint8_t utf8[] = {0xC0}; + size_t utf8Len = sizeof(utf8); + + size_t result = utf_helper::Utf8ToUtf16Size(utf8, utf8Len); + EXPECT_EQ(result, 1); +} + +HWTEST_F_L0(UtfHelperTest, Utf8ToUtf16Size_LastTwoBytesStartWithThreeByteHeader_TrimTwoBytes) +{ + const uint8_t utf8[] = {0xE2, 0x82}; + size_t utf8Len = sizeof(utf8); + + size_t result = utf_helper::Utf8ToUtf16Size(utf8, utf8Len); + EXPECT_EQ(result, 2); +} + +HWTEST_F_L0(UtfHelperTest, Utf8ToUtf16Size_IncompleteSequenceAtEnd_ReturnsSizeWithoutInvalid) +{ + const uint8_t utf8[] = {0xF0, 0x90, 0x8D}; + size_t result = utf_helper::Utf8ToUtf16Size(utf8, 3); + EXPECT_EQ(result, 3); +} + +HWTEST_F_L0(UtfHelperTest, DebuggerConvertRegionUtf16ToUtf8_ZeroCodepoint_WriteNulChar) +{ + uint16_t utf16In[] = {0x0000}; + uint8_t utf8Out[10] = {0}; + size_t utf16Len = 1; + size_t utf8Len = sizeof(utf8Out); + size_t start = 0; + bool modify = false; + bool isWriteBuffer = true; + + size_t result = utf_helper::DebuggerConvertRegionUtf16ToUtf8( + utf16In, utf8Out, utf16Len, utf8Len, start, modify, isWriteBuffer); + + EXPECT_EQ(result, 1); + EXPECT_EQ(utf8Out[0], 0x00U); +} + +HWTEST_F_L0(UtfHelperTest, ConvertUtf16ToUtf8_NulChar_WriteBufferMode_ReturnsZeroByte) +{ + uint16_t d0 = 0x0000; + uint16_t d1 = 0; + bool modify = false; + bool isWriteBuffer = true; + + utf_helper::Utf8Char result = utf_helper::ConvertUtf16ToUtf8(d0, d1, modify, isWriteBuffer); + + EXPECT_EQ(result.n, 1); + EXPECT_EQ(result.ch[0], 0x00U); +} +} // namespace common::test \ No newline at end of file diff --git a/common_components/base/utf_helper.cpp b/common_components/base/utf_helper.cpp index ef8f496c16b011ef02685457d0c3e30b84d7bf3b..14255455084deb5f5767ebcded1410c6bc56661e 100644 --- a/common_components/base/utf_helper.cpp +++ b/common_components/base/utf_helper.cpp @@ -13,6 +13,7 @@ * limitations under the License. */ +#include "common_components/base/config.h" #include "common_components/base/utf_helper.h" #include "common_components/log/log.h" @@ -143,7 +144,7 @@ bool IsValidUTF8(const std::vector &data) return false; } break; - default: + default: //LCOV_EXCL_BR_LINE LOG_COMMON(FATAL) << "this branch is unreachable"; UNREACHABLE_CC(); break; @@ -247,6 +248,70 @@ size_t Utf16ToUtf8Size(const uint16_t *utf16, uint32_t length, bool modify, bool return res; } +#if ENABLE_NEXT_OPTIMIZATION && defined(USE_CMC_GC) +size_t ConvertRegionUtf16ToUtf8(const uint16_t *utf16In, uint8_t *utf8Out, size_t utf16Len, size_t utf8Len, + size_t start, bool modify, bool isWriteBuffer, bool cesu8) +{ + if (utf16In == nullptr || utf8Out == nullptr || utf8Len == 0) { + return 0; + } + size_t utf8Pos = 0; + size_t end = start + utf16Len; + for (size_t i = start; i < end; ++i) { + uint32_t codepoint = utf16In[i]; + if (codepoint == 0) { + if (isWriteBuffer) { + utf8Out[utf8Pos++] = UTF8_NUL; + continue; + } + if (modify) { + utf8Out[utf8Pos++] = UTF8_2B_FIRST; + utf8Out[utf8Pos++] = UTF8_2B_SECOND; + } + continue; + } + if (codepoint >= DECODE_LEAD_LOW && codepoint <= DECODE_LEAD_HIGH && i + 1 < end) { + uint32_t high = utf16In[i]; + uint32_t low = utf16In[i + 1]; + if (!cesu8) { + if (low >= DECODE_TRAIL_LOW && low <= DECODE_TRAIL_HIGH) { + codepoint = + SURROGATE_RAIR_START + ((high - DECODE_LEAD_LOW) << UTF16_OFFSET) + (low - DECODE_TRAIL_LOW); + i++; + } + } + } + if (codepoint <= UTF8_1B_MAX) { + if (UNLIKELY(utf8Pos + UTF8_SINGLE_BYTE_LENGTH > utf8Len)) { + break; + } + utf8Out[utf8Pos++] = static_cast(codepoint); + } else if (codepoint <= UTF8_2B_MAX) { + if (UNLIKELY(utf8Pos + UTF8_DOUBLE_BYTE_LENGTH > utf8Len)) { + break; + } + utf8Out[utf8Pos++] = (BIT_MASK_2 | (codepoint >> OFFSET_6POS)); + utf8Out[utf8Pos++] = (byteMark | (codepoint & LOW_6BITS)); + } else if (codepoint <= UTF8_3B_MAX) { + if (UNLIKELY(utf8Pos + UTF8_TRIPLE_BYTE_LENGTH > utf8Len)) { + break; + } + utf8Out[utf8Pos++] = (UTF8_3B_FIRST | (codepoint >> OFFSET_12POS)); + utf8Out[utf8Pos++] = (byteMark | ((codepoint >> OFFSET_6POS) & LOW_6BITS)); + utf8Out[utf8Pos++] = (byteMark | (codepoint & LOW_6BITS)); + } else { + if (UNLIKELY(utf8Pos + UTF8_QUAD_BYTE_LENGTH > utf8Len)) { + break; + } + utf8Out[utf8Pos++] = (UTF8_4B_FIRST | (codepoint >> OFFSET_18POS)); + utf8Out[utf8Pos++] = (byteMark | ((codepoint >> OFFSET_12POS) & LOW_6BITS)); + utf8Out[utf8Pos++] = (byteMark | ((codepoint >> OFFSET_6POS) & LOW_6BITS)); + utf8Out[utf8Pos++] = (byteMark | (codepoint & LOW_6BITS)); + } + } + return utf8Pos; +} +#else size_t ConvertRegionUtf16ToUtf8(const uint16_t *utf16In, uint8_t *utf8Out, size_t utf16Len, size_t utf8Len, size_t start, bool modify, bool isWriteBuffer, bool cesu8) { @@ -277,6 +342,7 @@ size_t ConvertRegionUtf16ToUtf8(const uint16_t *utf16In, uint8_t *utf8Out, size_ } return utf8Pos; } +#endif size_t DebuggerConvertRegionUtf16ToUtf8(const uint16_t *utf16In, uint8_t *utf8Out, size_t utf16Len, size_t utf8Len, size_t start, bool modify, bool isWriteBuffer) diff --git a/common_components/base/utf_helper.h b/common_components/base/utf_helper.h index c3a198a252388a343c37a1e2edb41b7f393ad8f9..b2c7e02494e0784b4df4c09ab227fc3793131d24 100644 --- a/common_components/base/utf_helper.h +++ b/common_components/base/utf_helper.h @@ -80,6 +80,11 @@ constexpr size_t MASK_16BIT = 0xffff; static constexpr uint8_t UTF8_1B_MAX = 0x7f; +static constexpr size_t UTF8_SINGLE_BYTE_LENGTH = 1; +static constexpr size_t UTF8_DOUBLE_BYTE_LENGTH = 2; +static constexpr size_t UTF8_TRIPLE_BYTE_LENGTH = 3; +static constexpr size_t UTF8_QUAD_BYTE_LENGTH = 4; +static constexpr uint8_t UTF8_NUL = 0x00U; static constexpr uint16_t UTF8_2B_MAX = 0x7ff; static constexpr uint8_t UTF8_2B_FIRST = 0xc0; static constexpr uint8_t UTF8_2B_SECOND = 0x80; @@ -172,7 +177,7 @@ static inline uint32_t CombineTwoU16(uint16_t d0, uint16_t d1) return codePoint; } -std::pair ConvertUtf8ToUnicodeChar(const uint8_t *utf8, size_t maxLen); +std::pair PUBLIC_API ConvertUtf8ToUnicodeChar(const uint8_t *utf8, size_t maxLen); static inline bool IsHexDigits(uint16_t ch) { diff --git a/common_components/common/page_allocator.h b/common_components/common/page_allocator.h index e339fe475670edf1cd54b9ab911edb0ef8723738..8eb8f669b26e5312977d844fc5ca86e936653d53 100755 --- a/common_components/common/page_allocator.h +++ b/common_components/common/page_allocator.h @@ -132,6 +132,7 @@ public: // create page if nonFull_ is nullptr if (nonFull_ == nullptr) { Page* cur = CreatePage(); + DCHECK_CC(cur != nullptr); InitPage(*cur); ++totalPages_; nonFull_ = cur; diff --git a/common_components/common/page_pool.h b/common_components/common/page_pool.h index 499638bc5f3aa9751b3173e86928b379a11326d4..a4a44642ce2c18118aaed68d3467ebb9240858b8 100755 --- a/common_components/common/page_pool.h +++ b/common_components/common/page_pool.h @@ -135,7 +135,7 @@ protected: { #ifdef _WIN64 void* result = VirtualAlloc(NULL, size, isCommit ? MEM_COMMIT : MEM_RESERVE, PAGE_READWRITE); - if (result == NULL) { + if (result == NULL) { //LCOV_EXCL_BR_LINE LOG_COMMON(FATAL) << "allocate create page failed! Out of Memory!"; UNREACHABLE_CC(); } diff --git a/common_components/heap/w_collector/tests/BUILD.gn b/common_components/common/tests/BUILD.gn old mode 100755 new mode 100644 similarity index 84% rename from common_components/heap/w_collector/tests/BUILD.gn rename to common_components/common/tests/BUILD.gn index 300f6daa0b1dcc2c5899d092818445821ff393a1..09fbabcdd34e39e4926672245866ea5717b5864a --- a/common_components/heap/w_collector/tests/BUILD.gn +++ b/common_components/common/tests/BUILD.gn @@ -15,15 +15,12 @@ import("//arkcompiler/ets_runtime/common_components/tests/test_helper.gni") module_output_path = "ets_runtime" -host_unittest_action("W_Collector_Test") { +host_unittest_action("Page_Cache_Test") { module_out_path = module_output_path sources = [ # test file - "post_trace_barrier_test.cpp", - "preforward_barrier_test.cpp", - "trace_barrier_test.cpp", - "w_collector_test.cpp", + "page_cache_test.cpp", ] configs = [ @@ -46,7 +43,7 @@ group("unittest") { # deps file deps = [ - ":W_Collector_Test", + ":Page_Cache_Test", ] } @@ -55,6 +52,6 @@ group("host_unittest") { # deps file deps = [ - ":W_Collector_TestAction", + ":Page_Cache_TestAction", ] } \ No newline at end of file diff --git a/common_components/common/tests/page_cache_test.cpp b/common_components/common/tests/page_cache_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f57ff2584c00446852ff1728a73f0ec7dd5d75d7 --- /dev/null +++ b/common_components/common/tests/page_cache_test.cpp @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/common/page_cache.h" +#include "common_components/tests/test_helper.h" + +using namespace common; + +class PageCacheTestSimple : public common::test::BaseTestWithScope { +protected: + void SetUp() override {} + void TearDown() override {} +}; + +HWTEST_F(PageCacheTestSimple, NewSpan_AllocatesSingleSpan, testing::ext::TestSize.Level0) { + Span* span = PageCache::GetInstance()->NewSpan(1); + ASSERT_NE(span, nullptr); + EXPECT_EQ(span->pageNum, 1U); +} + +HWTEST_F(PageCacheTestSimple, MapObjectToSpan_ReturnsCorrectSpan, testing::ext::TestSize.Level0) { + Span* span = PageCache::GetInstance()->NewSpan(1); + void* obj = reinterpret_cast(span->pageId << PAGE_SHIFT); + + Span* result = PageCache::GetInstance()->MapObjectToSpan(obj); + EXPECT_EQ(result, span); +} + +HWTEST_F(PageCacheTestSimple, ReleaseSpanToPageCache_CanMerge, testing::ext::TestSize.Level0) { + Span* span1 = PageCache::GetInstance()->NewSpan(1); + Span* span2 = PageCache::GetInstance()->NewSpan(1); + + span1->pageNum = 1; + span1->pageId = 100; + span2->pageNum = 1; + span2->pageId = 101; + + PageCache::GetInstance()->ReleaseSpanToPageCache(span1); + PageCache::GetInstance()->ReleaseSpanToPageCache(span2); + + Span* mergedSpan = PageCache::GetInstance()->NewSpan(2); + EXPECT_NE(mergedSpan, nullptr); + EXPECT_EQ(mergedSpan->pageNum, 2U); +} + +HWTEST_F(PageCacheTestSimple, NewSpan_SplitFromLargerSpan, testing::ext::TestSize.Level0) { + Span* bigSpan = PageCache::GetInstance()->NewSpan(5); + ASSERT_NE(bigSpan, nullptr); + EXPECT_EQ(bigSpan->pageNum, 5U); + + Span* smallSpan = PageCache::GetInstance()->NewSpan(2); + ASSERT_NE(smallSpan, nullptr); + EXPECT_EQ(smallSpan->pageNum, 2U); + + Span* remainingSpan = PageCache::GetInstance()->NewSpan(3); + ASSERT_NE(remainingSpan, nullptr); + EXPECT_EQ(remainingSpan->pageNum, 3U); +} + +HWTEST_F(PageCacheTestSimple, ReleaseSpanToPageCache_MergeForward, testing::ext::TestSize.Level0) { + Span* span1 = PageCache::GetInstance()->NewSpan(1); + Span* span2 = PageCache::GetInstance()->NewSpan(1); + + span1->pageNum = 1; + span1->pageId = 100; + span2->pageNum = 1; + span2->pageId = 101; + + PageCache::GetInstance()->ReleaseSpanToPageCache(span1); + PageCache::GetInstance()->ReleaseSpanToPageCache(span2); + + Span* mergedSpan = PageCache::GetInstance()->NewSpan(2); + EXPECT_NE(mergedSpan, nullptr); + EXPECT_EQ(mergedSpan->pageNum, 2U); + EXPECT_EQ(mergedSpan->pageId, 100U); +} \ No newline at end of file diff --git a/common_components/common/type_def.h b/common_components/common/type_def.h index 0bd90877a0e5b01460a19ab8367a55c7f65fd854..7109d446334e9a55e94d0e544c91414c6fc7f643 100755 --- a/common_components/common/type_def.h +++ b/common_components/common/type_def.h @@ -43,7 +43,8 @@ using MIndex = uint64_t; // index of array // this is acceptable. enum class AllocType { MOVEABLE_OBJECT = 0, - PINNED_OBJECT, + MOVEABLE_OLD_OBJECT, + NONMOVABLE_OBJECT, RAW_POINTER_OBJECT, READ_ONLY_OBJECT, }; diff --git a/common_components/common/work_stack-inl.h b/common_components/common/work_stack-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..dbfa3e5e60345570b01bf4566c5795b49e992aeb --- /dev/null +++ b/common_components/common/work_stack-inl.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef COMMON_COMPONENTS_COMMON_WORK_STACK_INL_H +#define COMMON_COMPONENTS_COMMON_WORK_STACK_INL_H + +#include "common_components/common/work_stack.h" + +#include + +namespace common { + +template +bool StackBase::IsEmpty() const +{ + DCHECK_CC(top_ <= capacity); + return top_ == 0; +} + +template +bool StackBase::IsFull() const +{ + DCHECK_CC(top_ <= capacity); + return top_ == capacity; +} + +template +void StackBase::Push(T *e) +{ + DCHECK_CC(top_ <= capacity); + DCHECK_CC(!IsFull()); + DCHECK_CC(e != nullptr); + data_[top_++] = e; +} + +template +void StackBase::Pop(T **e) +{ + DCHECK_CC(top_ <= capacity); + DCHECK_CC(!IsEmpty()); + T *result = data_[--top_]; + DCHECK_CC(result != nullptr); + *e = result; +} + +template +StackBase *StackBase::GetNext() const +{ + return next_; +} + +template +void StackBase::SetNext(StackBase *next) +{ + next_ = next; +} + +template +void StackList::Push(InternalStack *stack) +{ + DCHECK_CC(stack != nullptr); + DCHECK_CC(!stack->IsEmpty()); + std::lock_guard guard(mutex_); + stack->SetNext(head_); + head_ = stack; +} + +template +void StackList::Pop(InternalStack **stack) +{ + std::lock_guard guard(mutex_); + *stack = head_; + if (head_ != nullptr) { + head_ = head_->GetNext(); + } +} + +template +size_t StackList::Count() +{ + size_t cnt = 0; + std::lock_guard guard(mutex_); + InternalStack *current = head_; + while (current != nullptr) { + ++cnt; + current = current->GetNext(); + } + return cnt; +} + +namespace __work_stack_internal_impl { + +template +void LocalStackImpl::Push(T *e) +{ + DCHECK_CC(e != nullptr); + if (UNLIKELY_CC(inStack_->IsFull())) { + PushInStackToGlobal(); + } + DCHECK_CC(!inStack_->IsFull()); + inStack_->Push(e); +} + +template +bool LocalStackImpl::Pop(T **e) +{ + if (UNLIKELY_CC(outStack_->IsEmpty())) { + if (UNLIKELY_CC(!inStack_->IsEmpty())) { + std::swap(inStack_, outStack_); + } else if (!PopOutStackFromGlobal()) { + return false; + } + } + DCHECK_CC(!outStack_->IsEmpty()); + outStack_->Pop(e); + return true; +} + +template +bool LocalStackImpl::IsEmpty() const +{ + return inStack_->IsEmpty() && outStack_->IsEmpty(); +} + +template +void LocalStackImpl::Publish() +{ + if (!inStack_->IsEmpty()) { + PushInStackToGlobal(); + } + std::swap(inStack_, outStack_); + if (!inStack_->IsEmpty()) { + PushInStackToGlobal(); + } +} + +template +void LocalStackImpl::PushInStackToGlobal() +{ + DCHECK_CC(!inStack_->IsEmpty()); + globalStack_->Push(inStack_); + if constexpr (HAS_PUSH_TO_GLOBAL_NOTIFY) { + this->NotifyPushToGlobal(); + } + inStack_ = new InternalStack(); +} + +template +bool LocalStackImpl::PopOutStackFromGlobal() +{ + DCHECK_CC(outStack_->IsEmpty()); + InternalStack *newStack = nullptr; + globalStack_->Pop(&newStack); + if (LIKELY_CC(newStack != nullptr)) { + delete outStack_; + outStack_ = newStack; + return true; + } + return false; +} +} // namespace __work_stack_internal_impl +} // namespace common +#endif // COMMON_COMPONENTS_COMMON_WORK_STACK_INL_H diff --git a/common_components/common/work_stack.h b/common_components/common/work_stack.h new file mode 100644 index 0000000000000000000000000000000000000000..b42928753e1611bb2b6d87c9775909ab9fe28ff9 --- /dev/null +++ b/common_components/common/work_stack.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef COMMON_COMPONENTS_COMMON_WORK_STACK_H +#define COMMON_COMPONENTS_COMMON_WORK_STACK_H + +#include +#include + +#include "common_interfaces/base/common.h" +#include "common_components/log/log.h" + +namespace common { +template +class StackBase { +public: + StackBase() = default; + ~StackBase() + { + DCHECK_CC(IsEmpty()); + } + + NO_COPY_SEMANTIC_CC(StackBase); + NO_MOVE_SEMANTIC_CC(StackBase); + + bool IsEmpty() const; + + bool IsFull() const; + + void Push(T *e); + + void Pop(T **e); + + StackBase *GetNext() const; + + void SetNext(StackBase *next); + +private: + size_t top_ {0}; + StackBase *next_ {nullptr}; + T *data_[capacity]; +}; + +namespace __work_stack_internal_impl { +template +class LocalStackImpl; +} + +template +class StackList { +public: + using InternalStack = StackBase; + StackList() = default; + ~StackList() + { + DCHECK_CC(head_ == nullptr); + } + + NO_COPY_SEMANTIC_CC(StackList); + NO_MOVE_SEMANTIC_CC(StackList); + + size_t Count(); + +private: + void Push(InternalStack *stack); + + void Pop(InternalStack **stack); + + InternalStack *head_ {nullptr}; + std::mutex mutex_; + + template + friend class __work_stack_internal_impl::LocalStackImpl; +}; + +namespace __work_stack_internal_impl { + +class LocalStackBaseWithoutNotify { +protected: + LocalStackBaseWithoutNotify() = default; + ~LocalStackBaseWithoutNotify() = default; + + NO_COPY_SEMANTIC_CC(LocalStackBaseWithoutNotify); + NO_MOVE_SEMANTIC_CC(LocalStackBaseWithoutNotify); +}; + +template +class LocalStackBaseWithNotify { +protected: + explicit LocalStackBaseWithNotify(PushToGlobalNotify *pushToGlobalNotify) + : pushToGlobalNotify_(pushToGlobalNotify) + { + DCHECK_CC(pushToGlobalNotify_ != nullptr); + } + ~LocalStackBaseWithNotify() + { + pushToGlobalNotify_ = nullptr; + } + + NO_COPY_SEMANTIC_CC(LocalStackBaseWithNotify); + NO_MOVE_SEMANTIC_CC(LocalStackBaseWithNotify); + + void NotifyPushToGlobal() + { + DCHECK_CC(pushToGlobalNotify_ != nullptr); + (*pushToGlobalNotify_)(); + } +private: + PushToGlobalNotify *pushToGlobalNotify_ {nullptr}; +}; + +struct DummyNoPushToGlobalNotify {}; + +template +class LocalStackImpl final : public std::conditional_t, + LocalStackBaseWithoutNotify, + LocalStackBaseWithNotify> { + using InternalStack = StackBase; + using GlobalStack = StackList; +private: + static constexpr bool HAS_PUSH_TO_GLOBAL_NOTIFY = !std::is_same_v; +public: + template >> + LocalStackImpl(GlobalStack *globalStack, PushToGlobalNotify *pushToGlobalNotify) + : LocalStackBaseWithNotify(pushToGlobalNotify), globalStack_(globalStack) + { + inStack_ = new InternalStack(); + outStack_ = new InternalStack(); + } + + template >> + explicit LocalStackImpl(GlobalStack *globalStack) : globalStack_(globalStack) + { + inStack_ = new InternalStack(); + outStack_ = new InternalStack(); + } + + ~LocalStackImpl() + { + DCHECK_CC(IsEmpty()); + delete inStack_; + delete outStack_; + inStack_ = nullptr; + outStack_ = nullptr; + } + + NO_COPY_SEMANTIC_CC(LocalStackImpl); + NO_MOVE_SEMANTIC_CC(LocalStackImpl); + + void Push(T *e); + + bool Pop(T **e); + + bool IsEmpty() const; + + void Publish(); + +private: + void PushInStackToGlobal(); + + bool PopOutStackFromGlobal(); + + GlobalStack *globalStack_ {nullptr}; + InternalStack *inStack_ {nullptr}; + InternalStack *outStack_ {nullptr}; +}; +} // namespace __work_stack_internal_impl + +template +using LocalStack = __work_stack_internal_impl::LocalStackImpl; +} // namespace common +#endif // COMMON_COMPONENTS_COMMON_WORK_STACK_H diff --git a/common_components/base_runtime/base_runtime.cpp b/common_components/common_runtime/base_runtime.cpp similarity index 81% rename from common_components/base_runtime/base_runtime.cpp rename to common_components/common_runtime/base_runtime.cpp index a651ff4693569ee94c0a57450e78436df627ad4a..1338ddd7ddbbab4ebffa22d561ef2257382727b8 100755 --- a/common_components/base_runtime/base_runtime.cpp +++ b/common_components/common_runtime/base_runtime.cpp @@ -15,8 +15,8 @@ #include "common_interfaces/base_runtime.h" -#include "common_components/base_runtime/base_runtime_param.h" -#include "common_components/base_runtime/hooks.h" +#include "common_components/common_runtime/base_runtime_param.h" +#include "common_components/common_runtime/hooks.h" #include "common_components/common/page_pool.h" #include "common_components/heap/allocator/region_desc.h" #include "common_components/heap/collector/heuristic_gc_policy.h" @@ -111,8 +111,11 @@ void BaseRuntime::InitFromDynamic(const RuntimeParam ¶m) } param_ = param; - - PagePool::Instance().Init(param_.heapParam.heapSize * KB / COMMON_PAGE_SIZE); + size_t pagePoolSize = param_.heapParam.heapSize; +#if defined(PANDA_TARGET_32) + pagePoolSize = pagePoolSize / 128; // 128 means divided. +#endif + PagePool::Instance().Init(pagePoolSize * KB / COMMON_PAGE_SIZE); mutatorManager_ = NewAndInit(); heapManager_ = NewAndInit(param_); baseClassRoots_ = NewAndInit(); @@ -123,7 +126,7 @@ void BaseRuntime::InitFromDynamic(const RuntimeParam ¶m) "Heap utilization: %.2f\n\tHeap growth: %.2f\n\tAllocation rate: %.2f(MB/s)\n\tAlloction wait time: %zuns\n\t" "GC Threshold: %zu(KB)\n\tGarbage threshold: %.2f\n\tGC interval: %zums\n\tBackup GC interval: %zus\n\t" "Log level: %d\n\tThread stack size: %zu(KB)\n\tArkcommon stack size: %zu(KB)\n\t" - "Processor number: %d", param_.heapParam.heapSize, param_.heapParam.regionSize, + "Processor number: %d", pagePoolSize, param_.heapParam.regionSize, param_.heapParam.exemptionThreshold, param_.heapParam.heapUtilization, 1 + param_.heapParam.heapGrowth, param_.heapParam.allocationRate, param_.heapParam.allocationWaitTime, param_.gcParam.gcThreshold / KB, param_.gcParam.garbageThreshold, @@ -163,23 +166,38 @@ void BaseRuntime::FiniFromDynamic() void BaseRuntime::PreFork(ThreadHolder *holder) { // Need appspawn space and compress gc. - RequestGC(GcType::APPSPAWN); + RequestGC(GC_REASON_APPSPAWN, false, GC_TYPE_FULL); { ThreadNativeScope scope(holder); HeapManager::StopRuntimeThreads(); } } -void BaseRuntime::PostFork() +void BaseRuntime::PostFork([[maybe_unused]] bool enableWarmStartup) { HeapManager::StartRuntimeThreads(); #ifdef ENABLE_COLD_STARTUP_GC_POLICY - StartupStatusManager::OnAppStartup(); + if (!enableWarmStartup) { + StartupStatusManager::OnAppStartup(); + } #endif } +void BaseRuntime::NotifyWarmStart() +{ + if (!Heap::GetHeap().IsGcStarted() && !Heap::GetHeap().OnStartupEvent()) { + StartupStatusManager::OnAppStartup(); + } +} + +void BaseRuntime::WriteRoot(void *obj) +{ + Heap::GetBarrier().WriteRoot(reinterpret_cast(obj)); +} + void BaseRuntime::WriteBarrier(void* obj, void* field, void* ref) { + DCHECK_CC(field != nullptr); Heap::GetBarrier().WriteBarrier(reinterpret_cast(obj), *reinterpret_cast*>(field), reinterpret_cast(ref)); } @@ -201,34 +219,22 @@ void* BaseRuntime::AtomicReadBarrier(void* obj, void* field, std::memory_order o *reinterpret_cast*>(field), order)); } -void BaseRuntime::RequestGC(GcType type) +void BaseRuntime::RequestGC(GCReason reason, bool async, GCType gcType) { - switch (type) { - case GcType::SYNC: { - HeapManager::RequestGC(GC_REASON_USER, false); - break; - } - case GcType::ASYNC: { - HeapManager::RequestGC(GC_REASON_USER, true); - break; - } - case GcType::FULL: { - HeapManager::RequestGC(GC_REASON_BACKUP, false); - break; - } - case GcType::APPSPAWN: { - HeapManager::RequestGC(GC_REASON_APPSPAWN, false); - break; - } - case GcType::FULL_WITH_XREF: { - HeapManager::RequestGC(GC_REASON_XREF, false); - break; - } + if (reason < GC_REASON_BEGIN || reason > GC_REASON_END || + gcType < GC_TYPE_BEGIN || gcType > GC_TYPE_END) { + VLOG(ERROR, "Invalid gc reason or gc type, gc reason: %s, gc type: %s", + GCReasonToString(reason), GCTypeToString(gcType)); + return; } + HeapManager::RequestGC(reason, async, gcType); } void BaseRuntime::WaitForGCFinish() { Heap::GetHeap().WaitForGCFinish(); } +void BaseRuntime::EnterGCCriticalSection() { return Heap::GetHeap().MarkGCStart(); } +void BaseRuntime::ExitGCCriticalSection() { return Heap::GetHeap().MarkGCFinish(); } + bool BaseRuntime::ForEachObj(HeapVisitor& visitor, bool safe) { return Heap::GetHeap().ForEachObject(visitor, safe); @@ -263,4 +269,9 @@ bool BaseRuntime::CheckAndTriggerHintGC(MemoryReduceDegree degree) { return Heap::GetHeap().CheckAndTriggerHintGC(degree); } + +void BaseRuntime::NotifyHighSensitive(bool isStart) +{ + Heap::GetHeap().NotifyHighSensitive(isStart); +} } // namespace common diff --git a/common_components/base_runtime/base_runtime_param.cpp b/common_components/common_runtime/base_runtime_param.cpp similarity index 96% rename from common_components/base_runtime/base_runtime_param.cpp rename to common_components/common_runtime/base_runtime_param.cpp index e87e6473eef5e60248e00cb73525bad653172bff..51db0d92b07057e4134671362e81da84e8b5b458 100755 --- a/common_components/base_runtime/base_runtime_param.cpp +++ b/common_components/common_runtime/base_runtime_param.cpp @@ -13,7 +13,7 @@ * limitations under the License. */ -#include "common_components/base_runtime/base_runtime_param.h" +#include "common_components/common_runtime/base_runtime_param.h" #include "common_components/platform/cpu.h" diff --git a/common_components/base_runtime/base_runtime_param.h b/common_components/common_runtime/base_runtime_param.h similarity index 95% rename from common_components/base_runtime/base_runtime_param.h rename to common_components/common_runtime/base_runtime_param.h index 8ac4c77950762f42f43994dd8688da6920170eea..d8b68cd2e63ab99eb45154731c09587d87691528 100755 --- a/common_components/base_runtime/base_runtime_param.h +++ b/common_components/common_runtime/base_runtime_param.h @@ -26,7 +26,11 @@ public: static RuntimeParam DefaultRuntimeParam(); static size_t InitHeapSize(); static void SetConfigHeapSize(RuntimeParam ¶m, size_t configHeapSize); +#ifdef PANDA_TARGET_32 + static constexpr size_t MAX_HEAP_POOL_SIZE = 1 * GB; +#else static constexpr size_t MAX_HEAP_POOL_SIZE = 3.6 * GB; +#endif private: BaseRuntimeParam() = delete; @@ -42,7 +46,7 @@ private: BaseRuntimeParam::InitHeapSize() ) /* KB */; \ V(heapParam, regionSize, size_t, 4, 2048, 1024 ) /* KB */; \ V(heapParam, exemptionThreshold, double, 0.0, 1.0, 0.8 ) /* % */; \ - V(heapParam, heapUtilization, double, 0.0, 1.0, 0.6 ) /* % */; \ + V(heapParam, heapUtilization, double, 0.0, 1.0, 0.95 ) /* % */; \ V(heapParam, heapGrowth, double, 0.0, INT64_MAX, 1.15 ) /* times */; \ V(heapParam, allocationRate, double, 0.0, INT64_MAX, 10240 ) /* rate */; \ V(heapParam, allocationWaitTime, uint64_t, 0, INT64_MAX, 1000 ) /* ns */; \ @@ -52,12 +56,12 @@ private: V(gcParam, garbageThreshold, double, 0.1, 1.0, 0.5 ) /* % */;; \ V(gcParam, gcThreshold, size_t, 0, INT64_MAX, \ BaseRuntimeParam::InitHeapSize() * KB ) /* byte */;\ - V(gcParam, gcInterval, uint64_t, 0, INT64_MAX, 150000 ) /* us */; \ + V(gcParam, gcInterval, uint64_t, 0, INT64_MAX, 150000000 ) /* ns */; \ V(gcParam, backupGCInterval, uint64_t, 0, INT64_MAX, 240000 ) /* ms */; \ V(gcParam, maxGrowBytes, size_t, 0, INT64_MAX, 32 * MB ) /* byte */; \ V(gcParam, minGrowBytes, size_t, 0, INT64_MAX, 8 * MB ) /* byte */; \ V(gcParam, multiplier, double, 0.0, 10.0, 1.0 ) /* % */; \ - V(gcParam, ygcRateAdjustment, double, 0.0, 1.0, 0.5 ) /* % */; \ + V(gcParam, ygcRateAdjustment, double, 0.0, 1.0, 0.8 ) /* % */; \ V(gcParam, kMinConcurrentRemainingBytes, \ size_t, 0, INT64_MAX, 128 * KB ) /* byte */; \ V(gcParam, kMaxConcurrentRemainingBytes, \ @@ -79,12 +83,12 @@ private: V(gcParam, garbageThreshold, double, 0.1, 1.0, 0.5 ) /* % */; \ V(gcParam, gcThreshold, size_t, 0, INT64_MAX, \ BaseRuntimeParam::InitHeapSize() * KB ) /* byte */; \ - V(gcParam, gcInterval, uint64_t, 0, INT64_MAX, 150000 ) /* us */; \ + V(gcParam, gcInterval, uint64_t, 0, INT64_MAX, 150000000 ) /* ns */; \ V(gcParam, backupGCInterval, uint64_t, 0, INT64_MAX, 240000 ) /* ms */; \ V(gcParam, maxGrowBytes, size_t, 0, INT64_MAX, 32 * MB ) /* byte */; \ V(gcParam, minGrowBytes, size_t, 0, INT64_MAX, 8 * MB ) /* byte */; \ V(gcParam, multiplier, double, 0.0, 10.0, 1.0 ) /* % */; \ - V(gcParam, ygcRateAdjustment, double, 0.0, 1.0, 0.5 ) /* % */; \ + V(gcParam, ygcRateAdjustment, double, 0.0, 1.0, 0.8 ) /* % */; \ V(gcParam, kMinConcurrentRemainingBytes, \ size_t, 0, INT64_MAX, 128 * KB ) /* byte */; \ V(gcParam, kMaxConcurrentRemainingBytes, \ diff --git a/common_components/base_runtime/hooks.h b/common_components/common_runtime/hooks.h similarity index 79% rename from common_components/base_runtime/hooks.h rename to common_components/common_runtime/hooks.h index 1bce87bd739f90d3848bd2a8ec63622cafb30fd9..78f8d48258d0e6aad6a3e90db1c3b34876a4c5c5 100644 --- a/common_components/base_runtime/hooks.h +++ b/common_components/common_runtime/hooks.h @@ -23,19 +23,26 @@ #include "common_interfaces/thread/mutator_base.h" // Visitor that iterate all `RefField`s in a TaggedObject and add them to -// `WorkStack` Should be moved to BaseRT and panda namespace later +// `LocalMarkStack` Should be moved to BaseRT and panda namespace later namespace common { // Roots in BaseRuntime PUBLIC_API void VisitBaseRoots(const RefFieldVisitor &visitor); // Dynamic VM Roots scanning PUBLIC_API void VisitDynamicGlobalRoots(const RefFieldVisitor &visitor); PUBLIC_API void VisitDynamicWeakGlobalRoots(const WeakRefFieldVisitor &visitorFunc); +PUBLIC_API void VisitDynamicWeakGlobalRootsOld(const WeakRefFieldVisitor &visitorFunc); PUBLIC_API void VisitDynamicLocalRoots(const RefFieldVisitor &visitor); PUBLIC_API void VisitDynamicWeakLocalRoots(const WeakRefFieldVisitor &visitorFunc); +PUBLIC_API void VisitDynamicPreforwardRoots(const RefFieldVisitor &visitorFunc); +// Inlcude concurrent local and concurrent global roots +PUBLIC_API void VisitDynamicConcurrentRoots(const RefFieldVisitor &visitorFunc); + +PUBLIC_API void InvokeSharedNativePointerCallbacks(); // Visit roots of specific local thread. PUBLIC_API void VisitDynamicThreadRoot(const RefFieldVisitor &visitorFunc, void *vm); PUBLIC_API void VisitDynamicWeakThreadRoot(const WeakRefFieldVisitor &visitorFunc, void *vm); +PUBLIC_API void VisitDynamicThreadPreforwardRoot(const RefFieldVisitor &visitorFunc, void *vm); PUBLIC_API void AddXRefToDynamicRoots(); PUBLIC_API void RemoveXRefFromDynamicRoots(); @@ -47,11 +54,13 @@ PUBLIC_API void SynchronizeGCPhaseToJSThread(void *jsThread, GCPhase gcPhase); PUBLIC_API void FillFreeObject(void *object, size_t size); PUBLIC_API void SetBaseAddress(uintptr_t base); PUBLIC_API void JSGCCallback(void *ecmaVM); +PUBLIC_API bool IsPostForked(); // Jit interfaces PUBLIC_API void SweepThreadLocalJitFort(); PUBLIC_API bool IsMachineCodeObject(uintptr_t obj); PUBLIC_API void JitFortUnProt(size_t size, void* base); +PUBLIC_API void MarkThreadLocalJitFortInstalled(void *thread, void *machineCode); // Used for init/fini BaseRuntime from static PUBLIC_API void CheckAndInitBaseRuntime(const RuntimeParam ¶m); diff --git a/common_components/common_runtime/src/base/log_file.cpp b/common_components/common_runtime/src/base/log_file.cpp deleted file mode 100644 index a64eca43557c46d5dbf7834f971149bbb596fb3d..0000000000000000000000000000000000000000 --- a/common_components/common_runtime/src/base/log_file.cpp +++ /dev/null @@ -1,308 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "common_components/common_runtime/src/base/log_file.h" - -#include - -#include "common_components/base/sys_call.h" -#include "securec.h" - -namespace common { -LogFile::LogFileItem LogFile::logFile_[LOG_TYPE_NUMBER]; - -const char* LOG_TYPE_NAMES[LOG_TYPE_NUMBER] = { - "report", - - "debug", - - "alloc", "region", "fragment", - - "barrier", "ebarrier", "tbarrier", "pbarrier", "fbarrier", - - "gcphase", "enum", "trace", "preforward", "copy", "fix", "finalize", - - "unwind", "exception", "signal", - - "arkthread", -#ifdef ARKCOMMON_SANITIZER_SUPPORT - "sanitizer", -#endif -}; - -Level LogFile::logLevel_ = InitLogLevel(); - -void LogFile::Init() -{ - SetFlags(); -#ifndef NDEBUG - OpenLogFiles(); -#endif -} - -void LogFile::Fini() { CloseLogFiles(); } - -void LogFile::SetFlags() -{ -#ifndef NDEBUG - logFile_[REPORT].enableLog = ARK_ENABLED_LOG(ARK_REPORT); - logFile_[ALLOC].enableLog = ARK_ENABLED_LOG(ARK_LOG_ALLOC); - logFile_[REGION].enableLog = ARK_ENABLED_LOG(ARK_LOG_REGION); - logFile_[FRAGMENT].enableLog = ARK_ENABLED_LOG(ARK_LOG_FRAGMENT); - logFile_[DEBUGY].enableLog = ARK_ENABLED_LOG(ARK_LOG_DEBUG); - logFile_[BARRIER].enableLog = ARK_ENABLED_LOG(ARK_LOG_BARRIER); - logFile_[EBARRIER].enableLog = ARK_ENABLED_LOG(ARK_LOG_EBARRIER); - logFile_[TBARRIER].enableLog = ARK_ENABLED_LOG(ARK_LOG_TBARRIER); - logFile_[PBARRIER].enableLog = ARK_ENABLED_LOG(ARK_LOG_PBARRIER); - logFile_[FBARRIER].enableLog = ARK_ENABLED_LOG(ARK_LOG_FBARRIER); - logFile_[GCPHASE].enableLog = ARK_ENABLED_LOG(ARK_LOG_COMMONPHASE); - logFile_[ENUM].enableLog = ARK_ENABLED_LOG(ARK_LOG_ENUM); - logFile_[TRACE].enableLog = ARK_ENABLED_LOG(ARK_LOG_TRACE); - logFile_[PREFORWARD].enableLog = ARK_ENABLED_LOG(ARK_LOG_PREFORWARD); - logFile_[COPY].enableLog = ARK_ENABLED_LOG(ARK_LOG_COPY); - logFile_[FIX].enableLog = ARK_ENABLED_LOG(ARK_LOG_FIX); - logFile_[FINALIZE].enableLog = ARK_ENABLED_LOG(ARK_LOG_FINALIZE); - logFile_[UNWIND].enableLog = ARK_ENABLED_LOG(ARK_LOG_UNWIND); - logFile_[EXCEPTION].enableLog = ARK_ENABLED_LOG(ARK_LOG_EXCEPTION); - logFile_[SIGNAL].enableLog = ARK_ENABLED_LOG(ARK_LOG_SIGNAL); - logFile_[ARKTHREAD].enableLog = ARK_ENABLED_LOG(ARK_LOG_THREAD); -#ifdef ARKCOMMON_SANITIZER_SUPPORT - logFile_[SANITIZER].enableLog = ARK_ENABLED_LOG(ARK_LOG_SANITIZER); -#endif -#endif -} - -#ifndef NDEBUG -void LogFile::OpenLogFiles() -{ - CString pid = CString(GetPid()); - CString dateDigit = ""; - CString dirName = "."; - - for (int i = 0; i < LOG_TYPE_NUMBER; ++i) { - if (logFile_[i].enableLog) { - if (GetEnv("ARK_LOG_STDOUT", 0) == 1) { - logFile_[i].file = fopen("/dev/stdout", "w"); - if (logFile_[i].file == nullptr) { - LOG_COMMON(ERROR) << "LogFile::OpenLogFiles(): fail to set file /dev/stdout"; - continue; - } - } else { - CString fileName = dirName + "/" + dateDigit + "_" + pid + "." + LOG_TYPE_NAMES[i] + ".txt"; - LOG_COMMON(INFO) << "create log file " << fileName.Str(); - logFile_[i].file = fopen(fileName.Str(), "a+"); // Assignment closes the old file. - if (logFile_[i].file == nullptr) { - LOG_COMMON(ERROR) << "LogFile::OpenLogFiles(): fail to open the file"; - continue; - } - } - } - } -} -#endif - -void LogFile::CloseLogFiles() -{ - for (int i = 0; i < LOG_TYPE_NUMBER; ++i) { - if (logFile_[i].enableLog) { - logFile_[i].enableLog = false; - fclose(logFile_[i].file); - } - } -} - -static bool MaybeRotate(size_t curPos, size_t maxSize, FILE* file) -{ - if (curPos < maxSize) { - return false; - } - (void)fflush(file); - (void)ftruncate(fileno(file), ftell(file)); - rewind(file); - return true; -} - - -static void WriteLogImpl(bool addPrefix, LogType type, const char* format, va_list& args) -{ - char buf[LOG_BUFFER_SIZE]; - if (!LogFile::LogIsEnabled(type)) { - return; - } - int index = 0; - if (addPrefix) { - index = sprintf_s(buf, sizeof(buf), "%s %d ", TimeUtil::GetTimestamp().Str(), GetTid()); - if (index == -1) { - LOG_COMMON(ERROR) << "WriteLogImpl sprintf_s failed. msg: " << strerror(errno); - return; - } - } - - int ret = vsprintf_s(buf + index, sizeof(buf) - index, format, args); - if (ret == -1) { - LOG_COMMON(ERROR) << "WriteLogImpl vsprintf_s failed. msg: " << strerror(errno); - return; - } - index += ret; - - LogFile::LogFileLock(type); -#ifdef PANDA_TARGET_OHOS - auto env = CString(std::getenv("ARK_REPORT")); - if (env.Str() != nullptr) { -#endif - FILE* file = LogFile::GetFile(type); - if (file == nullptr) { - LOG_COMMON(ERROR) << "WriteLog failed. ARK_REPORT is not a valid path. Please check again."; - LogFile::LogFileUnLock(type); - return; - } - int err = fprintf(file, "%s\n", buf); - if ((err - 1) != index) { // 1 = '\n' - LOG_COMMON(ERROR) << "WriteLogImpl fprintf failed. msg: %s\n", strerror(errno); - LogFile::LogFileUnLock(type); - return; - } -#ifdef PANDA_TARGET_OHOS - fflush(file); - } -#else -#ifdef NDEBUG - size_t curPos = LogFile::GetCurPosLocation(type); - LogFile::SetCurPosLocation(type, curPos + index); - if (MaybeRotate(curPos + index, LogFile::GetMaxFileSize(type), file)) { - LogFile::SetCurPosLocation(type, 0); - } -#endif - fflush(file); -#endif - LogFile::LogFileUnLock(type); -} - -void WriteLog(bool addPrefix, LogType type, const char* format, ...) noexcept -{ - va_list args; - va_start(args, format); - WriteLogImpl(addPrefix, type, format, args); - va_end(args); -} - -// Orders of magnitudes. Note: The upperbound of uint64_t is 16E (16 * (1024 ^ 6)) -const char* g_orderOfMagnitude[] = { "", "K", "M", "G", "T", "P", "E" }; - -// Orders of magnitudes. Note: The upperbound of uint64_t is 16E (16 * (1024 ^ 6)) -const char* g_orderOfMagnitudeFromNano[] = { "n", "u", "m", nullptr }; - -// number of digits in a pretty format segment (100,000,000 each has three digits) -constexpr int NUM_DIGITS_PER_SEGMENT = 3; - -CString Pretty(uint64_t number) noexcept -{ - CString orig = CString(number); - int pos = static_cast(orig.Length()) - NUM_DIGITS_PER_SEGMENT; - while (pos > 0) { - orig.Insert(pos, ","); - pos -= NUM_DIGITS_PER_SEGMENT; - } - return orig; -} - -// Useful for informatic units, such as KiB, MiB, GiB, ... -CString PrettyOrderInfo(uint64_t number, const char* unit) -{ - size_t order = 0; - const uint64_t factor = 1024; - - while (number > factor) { - number /= factor; - order += 1; - } - - const char* prefix = g_orderOfMagnitude[order]; - const char* infix = order > 0 ? "i" : ""; // 1KiB = 1024B, but there is no "1iB" - - return CString(number) + prefix + infix + unit; -} - -// Useful for scientific units where number is in nanos: ns, us, ms, s -CString PrettyOrderMathNano(uint64_t number, const char* unit) -{ - size_t order = 0; - const uint64_t factor = 1000; // show in us if under 10ms - - while (number > factor && g_orderOfMagnitudeFromNano[order] != nullptr) { - number /= factor; - order += 1; - } - - const char* prefix = g_orderOfMagnitudeFromNano[order]; - if (prefix == nullptr) { - prefix = ""; - } - - return CString(number) + prefix + unit; -} - -#ifndef NDEBUG -long GetEnv(const char* envName, long defaultValue) -{ - const char* ev = getenv(envName); - if (ev != nullptr) { - char* endptr = nullptr; - long rv = std::strtol(ev, &endptr, 0); // support dec, oct and hex - if (*endptr == '\0') { - return rv; - } - } - - return defaultValue; -} -#endif - -Level InitLogLevel() -{ - auto env = CString(std::getenv("ARK_LOG_LEVEL")); - if (env.Str() == nullptr) { - return Level::ERROR; - } - - CString logLevel = env.RemoveBlankSpace(); - if (logLevel.Length() != 1) { - LOG_COMMON(ERROR) << "Unsupported in ARK_LOG_LEVEL length. Valid length must be 1." - " Valid ARK_LOG_LEVEL must be in ['v', 'd', 'i', 'w', 'e', 'f' 's']."; - return Level::ERROR; - } - - switch (logLevel.Str()[0]) { - case 'v': - return Level::VERBOSE; - case 'd': - return Level::DEBUG; - case 'i': - return Level::INFO; - case 'w': - return Level::WARN; - case 'e': - return Level::ERROR; - case 'f': - return Level::FATAL; - case 's': - return Level::FATAL_WITHOUT_ABORT; - default: - LOG_COMMON(ERROR) << "Unsupported in ARK_LOG_LEVEL. Valid ARK_LOG_LEVEL must be in" - "['v', 'd', 'i', 'w', 'e', 'f' 's'].\n"; - } - return Level::ERROR; -} -} // namespace common diff --git a/common_components/common_runtime/src/base/log_file.h b/common_components/common_runtime/src/base/log_file.h deleted file mode 100755 index f396c5e1080fcef68f7c5fb5d4ab695a16a0c329..0000000000000000000000000000000000000000 --- a/common_components/common_runtime/src/base/log_file.h +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ARK_COMMON_LOG_FILE_H -#define ARK_COMMON_LOG_FILE_H - -#include - -#include "common_components/base/time_utils.h" - - -namespace common { -enum LogType { - // for overall brief report - REPORT = 0, - - // for debug purpose - DEBUGY, - - // for allocator - ALLOC, - REGION, - FRAGMENT, - - // for barriers - BARRIER, // idle phase - EBARRIER, // enum phase - TBARRIER, // trace phase - PBARRIER, // preforward phase - FBARRIER, // copy phase - - // for gc - GCPHASE, - ENUM, - TRACE, - PREFORWARD, - COPY, - FIX, - FINALIZE, - - UNWIND, - EXCEPTION, - SIGNAL, - - ARKTHREAD, - -#ifdef ARKCOMMON_SANITIZER_SUPPORT - SANITIZER, -#endif - - LOG_TYPE_NUMBER -}; - -#ifndef DEFAULT_ARK_REPORT -#define DEFAULT_ARK_REPORT 0 -#endif // DEFAULT_ARK_REPORT - -#ifndef DEFAULT_ARK_LOG_ALLOC -#define DEFAULT_ARK_LOG_ALLOC 0 -#endif // DEFAULT_ARK_LOG_ALLOC - -#ifndef DEFAULT_ARK_LOG_REGION -#define DEFAULT_ARK_LOG_REGION 0 -#endif // DEFAULT_ARK_LOG_REGION - -#ifndef DEFAULT_ARK_LOG_FRAGMENT -#define DEFAULT_ARK_LOG_FRAGMENT 0 -#endif // DEFAULT_ARK_LOG_FRAGMENT - -#ifndef DEFAULT_ARK_LOG_DEBUG -#define DEFAULT_ARK_LOG_DEBUG 0 -#endif // DEFAULT_ARK_LOG_DEBUG - -#ifndef DEFAULT_ARK_LOG_BARRIER -#define DEFAULT_ARK_LOG_BARRIER 0 -#endif // DEFAULT_ARK_LOG_BARRIER - -#ifndef DEFAULT_ARK_LOG_EBARRIER -#define DEFAULT_ARK_LOG_EBARRIER 0 -#endif // DEFAULT_ARK_LOG_EBARRIER - -#ifndef DEFAULT_ARK_LOG_TBARRIER -#define DEFAULT_ARK_LOG_TBARRIER 0 -#endif // DEFAULT_ARK_LOG_TBARRIER - -#ifndef DEFAULT_ARK_LOG_PBARRIER -#define DEFAULT_ARK_LOG_PBARRIER 0 -#endif // DEFAULT_ARK_LOG_PBARRIER - -#ifndef DEFAULT_ARK_LOG_FBARRIER -#define DEFAULT_ARK_LOG_FBARRIER 0 -#endif // DEFAULT_ARK_LOG_FBARRIER - -#ifndef DEFAULT_ARK_LOG_COMMONPHASE -#define DEFAULT_ARK_LOG_COMMONPHASE 0 -#endif // DEFAULT_ARK_LOG_COMMONPHASE - -#ifndef DEFAULT_ARK_LOG_ENUM -#define DEFAULT_ARK_LOG_ENUM 0 -#endif // DEFAULT_ARK_LOG_ENUM - -#ifndef DEFAULT_ARK_LOG_TRACE -#define DEFAULT_ARK_LOG_TRACE 0 -#endif // DEFAULT_ARK_LOG_TRACE - -#ifndef DEFAULT_ARK_LOG_PREFORWARD -#define DEFAULT_ARK_LOG_PREFORWARD 0 -#endif // DEFAULT_ARK_LOG_PREFORWARD - -#ifndef DEFAULT_ARK_LOG_COPY -#define DEFAULT_ARK_LOG_COPY 0 -#endif // DEFAULT_ARK_LOG_COPY - -#ifndef DEFAULT_ARK_LOG_FIX -#define DEFAULT_ARK_LOG_FIX 0 -#endif // DEFAULT_ARK_LOG_FIX - -#ifndef DEFAULT_ARK_LOG_FINALIZE -#define DEFAULT_ARK_LOG_FINALIZE 0 -#endif // DEFAULT_ARK_LOG_FINALIZE - -#ifndef DEFAULT_ARK_LOG_UNWIND -#define DEFAULT_ARK_LOG_UNWIND 0 -#endif // DEFAULT_ARK_LOG_UNWIND - -#ifndef DEFAULT_ARK_LOG_EXCEPTION -#define DEFAULT_ARK_LOG_EXCEPTION 0 -#endif // DEFAULT_ARK_LOG_EXCEPTION - -#ifndef DEFAULT_ARK_LOG_SIGNAL -#define DEFAULT_ARK_LOG_SIGNAL 0 -#endif // DEFAULT_ARK_LOG_SIGNAL - -#ifndef DEFAULT_ARK_LOG_THREAD -#define DEFAULT_ARK_LOG_THREAD 0 -#endif // DEFAULT_ARK_LOG_THREAD - -#ifndef DEFAULT_ARK_LOG2STDOUT -#define DEFAULT_ARK_LOG2STDOUT 0 -#endif // DEFAULT_ARK_LOG2STDOUT - -#ifdef ARKCOMMON_SANITIZER_SUPPORT -#ifndef DEFAULT_ARK_LOG_SANITIZER -#define DEFAULT_ARK_LOG_SANITIZER 0 -#endif // DEFAULT_ARK_LOG_SANITIZER -#endif - -#ifndef NDEBUG -long GetEnv(const char* envName, long defaultValue); // do not use directly - -// Use this macro to get environment variable for log. -// For example: ARK_ENABLED_LOG(ARK_REPORT) -// Will first check if an environment variable "ARK_REPORT" is present, -// and is a valid integer, and use its value. If not present or not a valid -// integer, it will fall back to the default value of the ARK_REPORT -// macro. This lets the user override configuration at run time, which is useful -// for debugging. -#define ARK_ENABLED_LOG(conf) (panda::GetEnv(#conf, DEFAULT_##conf) == 1) -#else -#define ARK_ENABLED_LOG(conf) (0) -#endif - -CString Pretty(uint64_t number) noexcept; -CString PrettyOrderInfo(uint64_t number, const char* unit); -CString PrettyOrderMathNano(uint64_t number, const char* unit); -Level InitLogLevel(); - -void WriteLog(bool addPrefix, LogType type, const char* format, ...) noexcept; - -#define ENABLE_LOG(type) LogFile::LogIsEnabled(type) - -#ifndef NDEBUG -#define DLOG(type, format...) \ - if (LogFile::LogIsEnabled(type)) { \ - WriteLog(true, type, format); \ - } -#define VLOG(type, format...) \ - if (LogFile::LogIsEnabled(type)) { \ - WriteLog(true, type, format); \ - } -#else -#define DLOG(type, format...) (void)(0) -#define VLOG(type, format...) \ - if (LogFile::LogIsEnabled(type)) { \ - WriteLog(true, type, format); \ - } -#endif - -constexpr size_t DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024; -constexpr size_t LOG_BUFFER_SIZE = 1024; - -class LogFile { -public: - LogFile() = default; - ~LogFile() = default; - static void Init(); - static void Fini(); - - struct LogFileItem { - bool enableLog = false; - std::mutex fileMutex; - FILE* file = nullptr; - size_t maxFileSize = DEFAULT_MAX_FILE_SIZE; - size_t curPosLocation = 0; - }; - - static FILE* GetFile(LogType type) { return logFile_[type].file; } - - static void LogFileLock(LogType type) { logFile_[type].fileMutex.lock(); } - - static void LogFileUnLock(LogType type) { logFile_[type].fileMutex.unlock(); } - - static bool LogIsEnabled(LogType type) noexcept - { -#ifdef PANDA_TARGET_OHOS - if (type == REPORT) { - return true; - } -#endif - return logFile_[type].enableLog; - } - - static void EnableLog(LogType type, bool key) { logFile_[type].enableLog = key; } - - static size_t GetMaxFileSize(LogType type) { return logFile_[type].maxFileSize; } - - static size_t GetCurPosLocation(LogType type) { return logFile_[type].curPosLocation; } - - static void SetCurPosLocation(LogType type, size_t curPos) { logFile_[type].curPosLocation = curPos; } - - static Level GetLogLevel() { return logLevel_; } - -private: -#ifndef NDEBUG - static void OpenLogFiles(); -#endif - static void CloseLogFiles(); - - static void SetFlags(); - static LogFileItem logFile_[LOG_TYPE_NUMBER]; - - static Level logLevel_; -}; - -#define ARK_COMMON_PHASE_TIMER(...) Timer ARK_pt_##__LINE__(__VA_ARGS__) - -class Timer { -public: - explicit Timer(const CString& pName, LogType type = REPORT) : name_(pName), logType_(type) - { - if (ENABLE_LOG(type)) { - startTime_ = TimeUtil::MicroSeconds(); - } - } - - ~Timer() - { - if (ENABLE_LOG(logType_)) { - uint64_t stopTime = TimeUtil::MicroSeconds(); - uint64_t diffTime = stopTime - startTime_; - WriteLog(true, logType_, "%s time: %sus", name_.Str(), Pretty(diffTime).Str()); - } - } - -private: - CString name_; - uint64_t startTime_ = 0; - LogType logType_; -}; -} // namespace common -#endif // ARK_COMMON_LOG_FILE_H diff --git a/common_components/common_runtime/src/cpu_profiler/cpu_profiler.cpp b/common_components/common_runtime/src/cpu_profiler/cpu_profiler.cpp deleted file mode 100755 index d638ba98c4821777328dae74829bcabbbb6e1b1c..0000000000000000000000000000000000000000 --- a/common_components/common_runtime/src/cpu_profiler/cpu_profiler.cpp +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "common_components/common_runtime/src/cpu_profiler/cpu_profiler.h" - -#include "common_components/common_runtime/src/mutator/mutator_manager.h" - -namespace panda { -CpuProfiler::~CpuProfiler() -{ - TryStopSampling(); -} - -bool CpuProfiler::StartCpuProfilerForFile() -{ - if (generator_.GetIsStart()) { - LOG_COMMON(ERROR) << "Start CpuProfiler repeatedly."; - return false; - } - generator_.SetIsStart(true); - tid_ = std::thread(CpuProfiler::SamplingThread, std::ref(generator_)); - if (!tid_.joinable()) { - LOG_COMMON(ERROR) << "Failed to create sampling thread."; - return false; - } - return true; -} - -bool CpuProfiler::StopCpuProfilerForFile(const int fd) -{ - if (!generator_.GetIsStart()) { - LOG_COMMON(ERROR) << "CpuProfiler is not in profiling"; - return false; - } - bool ret = generator_.OpenFile(fd); - if (!ret) { - LOG_COMMON(ERROR) << "Open file failed"; - } - TryStopSampling(); - return ret; -} - -void CpuProfiler::TryStopSampling() -{ - if (!generator_.GetIsStart()) { - return; - } - generator_.SetIsStart(false); - if (tid_.joinable()) { - tid_.join(); - } -} - -void CpuProfiler::SamplingThread(SamplesRecord& generator) -{ - generator.InitProfileInfo(); - uint32_t interval = generator.GetSamplingInterval(); - uint64_t startTime = SamplesRecord::GetMicrosecondsTimeStamp(); - generator.SetThreadStartTime(startTime); - uint64_t endTime = startTime; - while (generator.GetIsStart()) { - startTime = SamplesRecord::GetMicrosecondsTimeStamp(); - int64_t ts = static_cast(interval) - static_cast(startTime - endTime); - endTime = startTime; - if (ts > 0) { - usleep(ts); - endTime = SamplesRecord::GetMicrosecondsTimeStamp(); - } - DoSampleStack(); - generator.ParseSampleData(endTime); - generator.DoSingleTask(endTime); - } - generator.RunTaskLoop(); - generator.SetSampleStopTime(SamplesRecord::GetMicrosecondsTimeStamp()); - generator.DumpProfileInfo(); - generator.ReleaseProfileInfo(); -} - -void CpuProfiler::DoSampleStack() -{ - MutatorManager::Instance().TransitionAllMutatorsToCpuProfile(); -} -} diff --git a/common_components/common_runtime/src/cpu_profiler/samples_record.cpp b/common_components/common_runtime/src/cpu_profiler/samples_record.cpp deleted file mode 100755 index aeafcb491dd779589cd49d7eb73f55910fab96e0..0000000000000000000000000000000000000000 --- a/common_components/common_runtime/src/cpu_profiler/samples_record.cpp +++ /dev/null @@ -1,449 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "common_components/common_runtime/src/cpu_profiler/samples_record.h" -#include -#include "common_components/common_runtime/src/base/time_utils.h" - -namespace panda { -const int USEC_PER_SEC = 1000 * 1000; -const int NSEC_PER_USEC = 1000; -uint64_t SamplesRecord::GetMicrosecondsTimeStamp() -{ - struct timespec time; - clock_gettime(CLOCK_MONOTONIC, &time); - return time.tv_sec * USEC_PER_SEC + time.tv_nsec / NSEC_PER_USEC; -} - -SamplesRecord::~SamplesRecord() -{ - ReleaseProfileInfo(); -} - -void SamplesRecord::InitProfileInfo() -{ - profileInfo_ = new ProfileInfo(); - LOGF_CHECK(profileInfo_ != nullptr) << "new profileInfo fail"; - NodeInit(); -} - -void SamplesRecord::ReleaseProfileInfo() -{ - if (profileInfo_ != nullptr) { - delete profileInfo_; - profileInfo_ = nullptr; - } -} - -void SamplesRecord::NodeInit() -{ - CpuProfileNode methodNode; - - methodNode.id = ROOT_NODE_ID; - methodNode.parentId = UNKNOWN_NODE_ID; - methodNode.codeEntry.functionName = "(root)"; - profileInfo_->nodes[profileInfo_->nodeCount++] = methodNode; - profileInfo_->nodeSet.insert(methodNode); - - methodNode.id = PROGRAM_NODE_ID; - methodNode.parentId = ROOT_NODE_ID; - methodNode.codeEntry.functionName = "(program)"; - profileInfo_->nodes[profileInfo_->nodeCount++] = methodNode; - profileInfo_->nodes[ROOT_NODE_ID - 1].children.emplace_back(methodNode.id); - profileInfo_->nodeSet.insert(methodNode); - - methodNode.id = IDLE_NODE_ID; - methodNode.parentId = ROOT_NODE_ID; - methodNode.codeEntry.functionName = "(idle)"; - profileInfo_->nodes[profileInfo_->nodeCount++] = methodNode; - profileInfo_->nodes[ROOT_NODE_ID - 1].children.emplace_back(methodNode.id); - profileInfo_->nodeSet.insert(methodNode); -} - -uint64_t SamplesRecord::UpdateScriptIdMap(CString& url) -{ - auto iter = scriptIdMap_.find(url); - if (iter == scriptIdMap_.end()) { - scriptIdMap_.emplace(url, scriptIdMap_.size() + 1); - return static_cast(scriptIdMap_.size()); - } else { - return iter->second; - } -} - -bool SamplesRecord::UpdateNodeMap(ProfileInfo* info, CpuProfileNode& methodNode) -{ - auto& nodSet = info->nodeSet; - auto result = nodSet.find(methodNode); - if (result == nodSet.end()) { - info->previousId = methodNode.id = nodSet.size() + 1; - nodSet.insert(methodNode); - return true; - } else { - info->previousId = methodNode.id = result->id; - return false; - } -} - -void SamplesRecord::AddNodes(ProfileInfo* info, CpuProfileNode& methodNode) -{ - info->nodes[info->nodeCount++] = methodNode; - if (methodNode.parentId > 0 && methodNode.parentId <= info->nodeCount) { - info->nodes[methodNode.parentId - 1].children.emplace_back(methodNode.id); - } -} - -int SamplesRecord::GetSampleNodeId(uint64_t previousId, uint64_t topFrameNodeId) -{ - return previousId == 0 ? ROOT_NODE_ID : topFrameNodeId; -} - -uint64_t SamplesRecord::GetPreviousTimeStamp(uint64_t previousTimeStamp, uint64_t startTime) -{ - return previousTimeStamp == 0 ? startTime : previousTimeStamp; -} - -void SamplesRecord::DeleteAbnormalSample(ProfileInfo* info, int timeDelta) -{ - if (timeDelta > static_cast(timeDeltaThreshold_)) { - uint32_t size = info->samples.size(); - if (size > 0) { - info->samples[size - 1] = PROGRAM_NODE_ID; - } - info->previousState = FrameType::UNKNOWN; - } -} - -void SamplesRecord::SetPreviousState(ProfileInfo* info, FrameType frameType) -{ - info->previousState = frameType; -} - -void SamplesRecord::IncreaseNodeHitCount(ProfileInfo* info, int sampleNodeId) -{ - info->nodes[sampleNodeId - 1].hitCount++; -} - -void SamplesRecord::AddSampleNodeId(ProfileInfo* info, int sampleNodeId) -{ - info->samples.emplace_back(sampleNodeId); -} - -void SamplesRecord::AddTimeDelta(ProfileInfo* info, int timeDelta) -{ - info->timeDeltas.emplace_back(timeDelta); -} - -void SamplesRecord::SetPreviousTimeStamp(ProfileInfo* info, uint64_t timeStamp) -{ - info->previousTimeStamp = timeStamp; -} - -ProfileInfo* SamplesRecord::GetProfileInfo(uint64_t mutatorId) -{ - if (profileInfo_->mutatorId != mutatorId) { - profileInfo_->mutatorId = mutatorId; - } - return profileInfo_; -} - -void SamplesRecord::AddSample(SampleTask& task) -{ - ProfileInfo* info = GetProfileInfo(task.mutatorId); - if (info->nodeCount >= MAX_NODE_COUNT) { - LOG_COMMON(WARN) << "CpuProfileNode counts over 20000."; - return; - } - std::vector codeInfos = GetCodeInfos(task); - int frameSize = codeInfos.size(); - CpuProfileNode methodNode; - methodNode.id = 1; - for (; frameSize >= 1; frameSize--) { - methodNode.codeEntry = codeInfos[frameSize - 1]; - methodNode.parentId = methodNode.id; - if (UpdateNodeMap(info, methodNode)) { - AddNodes(info, methodNode); - } - } - int sampleNodeId = GetSampleNodeId(info->previousId, methodNode.id); - uint64_t previousTimeStamp = GetPreviousTimeStamp(info->previousTimeStamp, info->startTime); - int timeDelta = static_cast(task.timeStamp - previousTimeStamp); - - DeleteAbnormalSample(info, timeDelta); - - StatisticStateTime(info, timeDelta); - - SetPreviousState(info, codeInfos.front().frameType); - - IncreaseNodeHitCount(info, sampleNodeId); - - AddSampleNodeId(info, sampleNodeId); - - AddTimeDelta(info, timeDelta); - - SetPreviousTimeStamp(info, task.timeStamp); -} - -void SamplesRecord::AddEmptySample(SampleTask& task) -{ - ProfileInfo* info = GetProfileInfo(task.mutatorId); - uint64_t previousTimeStamp = GetPreviousTimeStamp(info->previousTimeStamp, info->startTime); - int timeDelta = static_cast(task.timeStamp - previousTimeStamp); - - DeleteAbnormalSample(info, timeDelta); - - StatisticStateTime(info, timeDelta); - - SetPreviousState(info, FrameType::UNKNOWN); - - IncreaseNodeHitCount(info, ROOT_NODE_ID); - - AddSampleNodeId(info, IDLE_NODE_ID); - - AddTimeDelta(info, timeDelta); - - SetPreviousTimeStamp(info, task.timeStamp); -} - -void SamplesRecord::StatisticStateTime(ProfileInfo* info, int timeDelta) -{ - FrameType state = info->previousState; - switch (state) { - case FrameType::MANAGED: { - info->managedTime += static_cast(timeDelta); - break; - } - default: { - info->unknownTime += static_cast(timeDelta); - } - } -} - -void SamplesRecord::StringifySampleData(ProfileInfo* info) -{ - sampleData_ = ""; - sampleData_.Append(CString::FormatString("{\"mutatorId\":%d,", info->mutatorId)); - sampleData_.Append(CString::FormatString("\"startTime\":%llu,", info->startTime)); - sampleData_.Append(CString::FormatString("\"endTime\":%llu,", info->stopTime)); - - StringifyStateTimeStatistic(info); - StringifyNodes(info); - StringifySamples(info); -} - -void SamplesRecord::StringifyStateTimeStatistic(ProfileInfo* info) -{ - sampleData_.Append(CString::FormatString("\"managedTime\":%llu,", info->managedTime)); - sampleData_.Append(CString::FormatString("\"unknownTime\":%llu,", info->unknownTime)); -} - -void SamplesRecord::StringifyNodes(ProfileInfo* info) -{ - sampleData_.Append("\"nodes\":["); - size_t nodeCount = static_cast(info->nodeCount); - - for (size_t i = 0; i < nodeCount; i++) { - struct CpuProfileNode& node = info->nodes[i]; - struct CodeInfo& codeEntry = node.codeEntry; - sampleData_.Append(CString::FormatString("{\"id\":%d,", node.id)); - sampleData_.Append("\"callFrame\":{\"functionName\":\""); - sampleData_.Append(codeEntry.functionName); - sampleData_.Append(CString::FormatString("\",\"scriptId\":\"%d\",", codeEntry.scriptId)); - sampleData_.Append("\"url\":\""); - sampleData_.Append(codeEntry.url); - sampleData_.Append(CString::FormatString("\",\"lineNumber\":%d},", codeEntry.lineNumber)); - sampleData_.Append(CString::FormatString("\"hitCount\":%d,\"children\":[", node.hitCount)); - - std::vector children = node.children; - size_t childrenCount = children.size(); - for (size_t j = 0; j < childrenCount; j++) { - sampleData_.Append(CString::FormatString("%d,", children[j])); - } - if (childrenCount > 0) { - sampleData_ = sampleData_.SubStr(0, sampleData_.Length() - 1); - } - sampleData_ += "]},"; - } - sampleData_ = sampleData_.SubStr(0, sampleData_.Length() - 1); - sampleData_ += "],"; -} - -void SamplesRecord::StringifySamples(ProfileInfo* info) -{ - std::vector& samples = info->samples; - std::vector& timeDeltas = info->timeDeltas; - - size_t samplesCount = samples.size(); - if (samplesCount == 0) { - sampleData_.Append("\"samples\":[],\"timeDeltas\":[]}"); - return; - } - CString samplesIdStr = ""; - CString timeDeltasStr = ""; - for (size_t i = 0; i < samplesCount; i++) { - samplesIdStr.Append(CString::FormatString("%d,", samples[i])); - timeDeltasStr.Append(CString::FormatString("%d,", timeDeltas[i])); - } - - samplesIdStr = samplesIdStr.SubStr(0, samplesIdStr.Length() - 1); - timeDeltasStr = timeDeltasStr.SubStr(0, timeDeltasStr.Length() - 1); - - sampleData_.Append("\"samples\":["); - sampleData_.Append(samplesIdStr); - sampleData_.Append("],\"timeDeltas\":["); - sampleData_.Append(timeDeltasStr); - sampleData_.Append("]}"); -} - -void SamplesRecord::DumpProfileInfo() -{ - StringifySampleData(profileInfo_); - WriteFile(); -} - -bool SamplesRecord::OpenFile(int fd) -{ - if (fd == -1) { - return false; - } - - if (ftruncate(fd, 0) == -1) { - return false; - } - fileDesc_ = fd; - return true; -} - -void SamplesRecord::WriteFile() -{ - int err = write(fileDesc_, sampleData_.Str(), sampleData_.Length()); - if (err == -1) { - LOG_COMMON(ERROR) << "Write file failed. msg: " << strerror(errno); - } -} - -void SamplesRecord::RunTaskLoop() -{ - while (!taskQueue_.empty()) { - auto task = taskQueue_.front(); - taskQueue_.pop_front(); - if (task.frameCnt == 0) { - AddEmptySample(task); - } else { - AddSample(task); - } - } -} - -void SamplesRecord::DoSingleTask(uint64_t previousTimeStemp) -{ - if (IsTimeout(previousTimeStemp)) { - return; - } - if (taskQueue_.empty()) { - return; - } - auto task = taskQueue_.front(); - if (!task.finishParsed) { - return; - } - taskQueue_.pop_front(); - if (task.frameCnt == 0) { - AddEmptySample(task); - } else { - AddSample(task); - } -} - -void SamplesRecord::ParseSampleData(uint64_t previousTimeStemp) -{ - if (taskQueue_.empty()) { - return; - } - for (auto& task : taskQueue_) { - if (task.finishParsed) { - continue; - } - for (int i = task.checkPoint; i < task.frameCnt; ++i) { - GetDemangleName(task.funcDescRefs[i]); - GetUrl(task.funcDescRefs[i]); - if (IsTimeout(previousTimeStemp)) { - task.checkPoint = i + 1; - return; - } - } - task.finishParsed = true; - } -} - -bool SamplesRecord::IsTimeout(uint64_t previousTimeStemp) -{ - uint64_t currentTimeStamp = SamplesRecord::GetMicrosecondsTimeStamp(); - int64_t ts = static_cast(interval_) - - static_cast(currentTimeStamp - previousTimeStemp); - if (ts < 0) { - return true; - } - return false; -} - -void SamplesRecord::Post(uint64_t mutatorId, std::vector& FuncDescRefs, - std::vector& FrameTypes, std::vector& LineNumbers) -{ - uint64_t timeStamp = SamplesRecord::GetMicrosecondsTimeStamp(); - SampleTask task(timeStamp, mutatorId, FuncDescRefs, FrameTypes, LineNumbers); - taskQueue_.push_back(task); -} - -std::vector SamplesRecord::GetCodeInfos(SampleTask& task) -{ - std::vector codeInfos; - for (int i = 0; i < task.frameCnt; ++i) { - CodeInfo codeInfo; - codeInfo.lineNumber = task.lineNumbers[i]; - codeInfo.frameType = task.frameTypes[i]; - codeInfo.funcIdentifier = task.funcDescRefs[i]; - codeInfo.functionName = GetDemangleName(codeInfo.funcIdentifier); - codeInfo.url = GetUrl(codeInfo.funcIdentifier); - codeInfo.scriptId = UpdateScriptIdMap(codeInfo.url); - codeInfos.emplace_back(codeInfo); - } - return codeInfos; -} - -CString SamplesRecord::GetUrl(uint64_t funcIdentifier) -{ - if (identifierUrlMap_.find(funcIdentifier) != identifierUrlMap_.end()) { - return identifierUrlMap_[funcIdentifier]; - } - return ParseUrl(funcIdentifier); -} - -CString SamplesRecord::ParseUrl(uint64_t funcIdentifier) { return CString(); } - -CString SamplesRecord::GetDemangleName(uint64_t funcIdentifier) -{ - if (identifierFuncnameMap_.find(funcIdentifier) != identifierFuncnameMap_.end()) { - return identifierFuncnameMap_[funcIdentifier]; - } - return ParseDemangleName(funcIdentifier); -} - -CString SamplesRecord::ParseDemangleName(uint64_t funcIdentifier) -{ - LOG_COMMON(FATAL) << "Unresolved fatal"; - UNREACHABLE_CC(); -} -} diff --git a/common_components/common_runtime/src/cpu_profiler/samples_record.h b/common_components/common_runtime/src/cpu_profiler/samples_record.h deleted file mode 100755 index 8c71d5d6d041b10075b4ca9f0572402606351689..0000000000000000000000000000000000000000 --- a/common_components/common_runtime/src/cpu_profiler/samples_record.h +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef ARK_COMMON_SAMPLES_RECORD_H -#define ARK_COMMON_SAMPLES_RECORD_H - -#include -#include -#include -#include -#include - -#include "common_components/common_runtime/src/base/c_string.h" - -namespace panda { -constexpr int MAX_NODE_COUNT = 20000; // 20000:the maximum size of the array -constexpr int UNKNOWN_NODE_ID = 0; // 0: the (root) node parent id -constexpr int ROOT_NODE_ID = 1; // 1: the (root) node id -constexpr int PROGRAM_NODE_ID = 2; // 2: the (program) node id -constexpr int IDLE_NODE_ID = 3; // 3: the (idel) node id - -struct CodeInfo { - uint64_t funcIdentifier = 0; - uint64_t scriptId = 0; - uint32_t lineNumber = 0; - CString functionName = ""; - CString url = ""; -}; - -struct CpuProfileNode { - uint64_t id = 0; - uint64_t parentId = 0; - uint64_t hitCount = 0; - struct CodeInfo codeEntry; - std::vector children; - - bool operator < (const CpuProfileNode& node) const - { - return parentId < node.parentId || (parentId == node.parentId && codeEntry < node.codeEntry); - } -}; - -struct ProfileInfo { - uint64_t mutatorId = 0; - uint64_t startTime = 0; - uint64_t stopTime = 0; - uint64_t nodeCount = 0; - - CpuProfileNode nodes[MAX_NODE_COUNT]; - std::vector samples; - std::vector timeDeltas; - - std::set nodeSet; - uint64_t previousTimeStamp = 0; - uint64_t previousId = 0; - - // state time statistic - uint64_t managedTime = 0; - uint64_t unknownTime = 0; -}; - -class SampleTask { -public: - uint64_t timeStamp; - uint64_t mutatorId; - std::vector funcDescRefs; - std::vector lineNumbers; - uint64_t frameCnt; - bool finishParsed {false}; - int checkPoint {0}; -}; - -class SamplesRecord { -public: - SamplesRecord() {} - ~SamplesRecord(); - void InitProfileInfo(); - void ReleaseProfileInfo(); - void SetIsStart(bool started) { isStart_.store(started); } - bool GetIsStart() const { return isStart_.load(); } - void SetThreadStartTime(uint64_t threadStartTime) { profileInfo_->startTime = threadStartTime; } - void SetSampleStopTime(uint64_t threadStopTime) { profileInfo_->stopTime = threadStopTime; } - void AddSample(SampleTask& task); - void AddEmptySample(SampleTask& task); - void StringifySampleData(ProfileInfo* info); - void DumpProfileInfo(); - void RunTaskLoop(); - void DoSingleTask(uint64_t previousTimeStemp); - void ParseSampleData(uint64_t previousTimeStemp); - std::vector BuildCodeInfos(SampleTask* task); - int GetSamplingInterval() { return interval_; } - bool OpenFile(int fd); - static uint64_t GetMicrosecondsTimeStamp(); - -private: - void StatisticStateTime(ProfileInfo* info, int timeDelta); - void NodeInit(); - void StringifyStateTimeStatistic(ProfileInfo* info); - void StringifyNodes(ProfileInfo* info); - void StringifySamples(ProfileInfo* info); - uint64_t UpdateScriptIdMap(CString& url); - bool UpdateNodeMap(ProfileInfo* info, CpuProfileNode& methodNode); - void AddNodes(ProfileInfo* info, CpuProfileNode& methodNode); - int GetSampleNodeId(uint64_t previousId, uint64_t topFrameNodeId); - uint64_t GetPreviousTimeStamp(uint64_t previousTimeStamp, uint64_t startTime); - void DeleteAbnormalSample(ProfileInfo* info, int timeDelta); - void IncreaseNodeHitCount(ProfileInfo* info, int sampleNodeId); - void AddSampleNodeId(ProfileInfo* info, int sampleNodeId); - void AddTimeDelta(ProfileInfo* info, int timeDelta); - void SetPreviousTimeStamp(ProfileInfo* info, uint64_t timeStamp); - std::vector GetCodeInfos(SampleTask& task); - CString GetUrl(uint64_t funcIdentifier); - CString ParseUrl(uint64_t funcIdentifier); - CString GetDemangleName(uint64_t funcIdentifier); - CString ParseDemangleName(uint64_t funcIdentifier); - void WriteFile(); - bool IsTimeout(uint64_t previousTimeStemp); - ProfileInfo* GetProfileInfo(uint64_t mutatorId); - - int fileDesc_{ -1 }; - uint32_t timeDeltaThreshold_{ 2000 }; // 2000 : default timeDeltaThreshold 2000us - std::atomic_bool isStart_{ false }; - CString sampleData_{ "" }; - std::list taskQueue_; - ProfileInfo* profileInfo_{ nullptr }; - std::map scriptIdMap_{ { "", 0 } }; - int interval_{ 500 }; // 500 : default interval 500us - std::map identifierFuncnameMap_; - std::map identifierUrlMap_; -}; -} // namespace panda -#endif diff --git a/common_components/common_runtime/src/inspector/alloc_data.cpp b/common_components/common_runtime/src/inspector/alloc_data.cpp deleted file mode 100755 index b2242d15ab03761db9b019ff61e6d3dff0dacc55..0000000000000000000000000000000000000000 --- a/common_components/common_runtime/src/inspector/alloc_data.cpp +++ /dev/null @@ -1,318 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "common_components/common_runtime/src/inspector/alloc_data.h" -#include -#include -#include -#include -#include "common_components/common_runtime/src/common/stack_type.h" -#include "common_components/common_runtime/src/unwind_stack/print_stack_info.h" -#include "common_components/common_runtime/src/unwind_stack/stack_info.h" -#include "common_components/common_runtime/src/unwind_stack/stack_metadata_helper.h" -#include "common_components/common_runtime/src/inspector/file_stream.h" - -namespace panda { -static ArkAllocData* g_allocInfo; -ArkAllocData* ArkAllocData::GetArkAllocData() -{ - if (g_allocInfo == nullptr) { - g_allocInfo = new ArkAllocData(); - } - return g_allocInfo; -} - -void ArkAllocData::SetArkAllocData() -{ - g_allocInfo -> InitAllocParam(); -} - -TraceNodeField* ArkAllocData::FindNode(const FrameAddress* ptr, const char* str) -{ - if (str == nullptr) { - return nullptr; - }; - size_t key = FindKey(ptr, str); - auto iter = traceNodeMap_.find(key); - if (iter != traceNodeMap_.end()) { - return iter->second; - } else { - return nullptr; - } -} - -int32_t ArkAllocData::FindKey(const FrameAddress* ptr, const char* str) -{ - // The identifier of a stack frame depends on the FA of the frame and the name of the frame. - std::size_t hash1 = std::hash{}(ptr); - std::size_t hash2 = std::hash{}(str); - return hash1 ^ hash2; -} - -void ArkAllocData::DeleteArkAllocData() -{ - std::unique_lock lock(sharedMtx_); - SetRecording(false); - HeapProfilerStream* stream = &panda::HeapProfilerStream::GetInstance(); - stream->SetContext(ALLOCATION); - g_allocInfo->SerializeArkAllocData(); - for (auto sample : g_allocInfo->samples_) { - delete sample; - sample = nullptr; - } - samples_.clear(); - for (auto traceInfo : g_allocInfo->traceFunctionInfo_) { - delete traceInfo; - traceInfo = nullptr; - } - traceFunctionInfo_.clear(); - g_allocInfo->DeleteAllNode(g_allocInfo->traceNodeHead_); - delete g_allocInfo->writer_; -} - -void ArkAllocData::DeleteAllNode(TraceNodeField* node) -{ - for (size_t i = 0; i < node->children.size(); i++) { - DeleteAllNode(node->children[i]); - } - delete node; -} - -void ArkAllocData::SerializeArkAllocData() -{ - writer_->WriteString("{\\\"head\\\":"); - // 1. dump callFrame - SerializeCallFrames(); - writer_->WriteString(","); - // 2. dump samples - SerializeSamples(); - writer_->WriteChar('}'); - writer_->End(); -} - -void ArkAllocData::SerializeCallFrames() { SerializeEachFrame(traceNodeHead_); } - -void ArkAllocData::SerializeFunctionInfo(int32_t idx) -{ - writer_->WriteString("{\\\"functionName\\\":\\\""); - writer_->WriteString(traceFunctionInfo_[idx]->functionName); - writer_->WriteString("\\\",\\\"scriptName\\\":\\\""); - writer_->WriteString(traceFunctionInfo_[idx]->scriptName); - writer_->WriteString("\\\",\\\"lineNumber\\\":"); - writer_->WriteNumber(traceFunctionInfo_[idx]->line); - writer_->WriteString(",\\\"columnNumber\\\":"); - writer_->WriteNumber(traceFunctionInfo_[idx]->column); - writer_->WriteChar('}'); -} - -void ArkAllocData::SerializeEachFrame(TraceNodeField* node) -{ - writer_->WriteString("{\\\"callFrame\\\":"); - SerializeFunctionInfo(node->functionInfoIndex); - writer_->WriteChar(','); - writer_->WriteString("\\\"selfSize\\\":"); - writer_->WriteNumber(node->selfSize); - writer_->WriteString(",\\\"id\\\":"); - writer_->WriteNumber(node->id); - writer_->WriteString(",\\\"children\\\":["); - if (node->children.size() != 0) { - for (size_t i = 0; i < node->children.size(); i++) { - SerializeEachFrame(node->children[i]); - if (i < node->children.size() - 1) { - writer_->WriteChar(','); - } - } - } - writer_->WriteChar(']'); - writer_->WriteChar('}'); -} - -void ArkAllocData::SerializeSamples() -{ - writer_->WriteString("\\\"samples\\\":["); - // for each sample - for (size_t i = 0; i < samples_.size(); i++) { - if (i != 0) { - writer->WriteChar(','); - } - writer->WriteString("{\\\"size\\\":"); - writer->WriteNumber(samples_[i]->size); - writer->WriteChar(','); - writer->WriteString("\\\"nodeId\\\":"); - writer->WriteNumber(samples_[i]->nodeId); - writer->WriteChar(','); - writer->WriteString("\\\"ordinal\\\":"); - writer->WriteNumber(samples_[i]->orinal); - writer->WriteChar('}'); - } - writer_->WriteChar(']'); -} -void ArkAllocData::InitRoot() -{ - TraceNodeField* traceNode = new TraceNodeField(); - if (traceNode == nullptr) { - LOG_COMMON(ERROR) << "init traceNode failed"; - return; - } - traceNode->id = 0; - traceNode->functionInfoIndex = 0; - traceNode->selfSize = 0; - traceNodeHead_ = traceNode; - TraceFunctionInfo* traceFunction = new TraceFunctionInfo(); - if (traceFunction == nullptr) { - LOG_COMMON(ERROR) << "init traceFunction failed"; - return; - } - traceFunction->functionName = "{root}"; - traceFunction->scriptName = ""; - traceFunction->line = -1; - ArkAllocData::GetArkAllocData()->traceFunctionInfo_.push_back(traceFunction); -} - -void ArkAllocData::InitAllocParam() -{ - sampSize_ = 1 * 1024; // default 1 * 1024 b - InitRoot(); - HeapProfilerStream* stream = &panda::HeapProfilerStream::GetInstance(); - writer_ = new StreamWriter(stream); -} - -void ArkAllocData::SerializeStats() -{ - HeapProfilerStream* stream = &panda::HeapProfilerStream::GetInstance(); - stream->SetContext(STATSUPDATE); - writer_->WriteString("["); - writer_->WriteNumber(traceNodeID_); - writer_->WriteChar(','); - writer_->WriteNumber(0); - writer_->WriteChar(','); - ssize_t allocatedSize = panda::Heap::GetHeap().GetAllocatedSize(); - writer_->WriteNumber(allocatedSize); - writer_->WriteString("]"); - writer_->End(); -} - -void ArkAllocData::RecordAllocNodes(const TypeInfo* klass, uint32_t size) -{ - std::unique_lock lock(sharedMtx_); - if (!IsRecording()) { // avoid delete func was called at this time - return; - } - allocSize_ += size; - if (allocSize < sampSize_) { - return; - } else { - AllocStackInfo* allocStackInfo = new AllocStackInfo(); - allocStackInfo->ProcessStackTrace(size); - delete allocStackInfo; - allocSize_ = 0; - SerializeStats(); - } -} - -int32_t AllocStackInfo::ProcessTraceInfo(FrameInfo &frame) -{ - if (frame.GetFrameType() == FrameType::NATIVE) { - return -1; - } - TraceFunctionInfo* traceFunction = new TraceFunctionInfo(); - if (frame.GetFrameType() == FrameType::MANAGED) { - StackMetadataHelper stackMetadataHelper(frame); - traceFunction->scriptName = frame.GetFileName(); - traceFunction->scriptName.ReplaceAll("/", "\\"); - traceFunction->functionName = frame.GetFuncName(); - traceFunction->line = stackMetadataHelper.GetLineNumber(); - } else { - delete traceFunction; - return -1; - } - - ArkAllocData::GetArkAllocData()->traceFunctionInfo_.push_back(traceFunction); - int32_t functionInfoIndex = ArkAllocData::GetArkAllocData()->traceFunctionInfo_.size() - 1; - return functionInfoIndex; -} - -void AllocStackInfo::ProcessTraceNode(TraceNodeField* head, uint32_t allocSize) -{ - // Initialize nodes and fill nodes in head in sequence. - if (frames_.empty()) { - // This node is the root node. - head->selfSize += allocSize; - return; - } - while (!frames_.empty()) { - FrameInfo* f = frames_.top(); - frames_.pop(); - TraceNodeField* traceNode = new TraceNodeField(); - traceNode->id = ArkAllocData::GetArkAllocData()->SetNodeID(); - traceNode->functionInfoIndex = ProcessTraceInfo(*f); - if (traceNode->functionInfoIndex == -1) { - delete traceNode; - continue; - } - traceNode->selfSize = 0; - // Add the children node of the upper-level node. - head->children.push_back(traceNode); - // In this case, head becomes a child node. Used to insert a child node next time. - head = traceNode; - // Add to the map for next search. - FrameAddress* FA = f->mFrame.GetFA(); - size_t key = ArkAllocData::GetArkAllocData()->FindKey(FA, f->GetFuncName().Str()); - ArkAllocData::GetArkAllocData()->traceNodeMap_.insert(std::pair(key, traceNode)); - delete f; - f = nullptr; - } - head->selfSize += allocSize; -} -void AllocStackInfo::ProcessStackTrace(uint32_t size) -{ - UnwindContext uwContext; - // Top unwind context can only be runtime or Arkcommon context. - CheckTopUnwindContextAndInit(uwContext); - while (!uwContext.frameInfo.mFrame.IsAnchorFrame(anchorFA)) { - AnalyseAndSetFrameType(uwContext); - FrameInfo* f = new FrameInfo(uwContext.frameInfo); - // 1. If the node has been recorded, add the content in the stack to the end of the node. - FrameAddress* FA = f->mFrame.GetFA(); - TraceNodeField* node = ArkAllocData::GetArkAllocData()->FindNode(FA, f->GetFuncName().Str()); - if (node != nullptr) { - delete f; - ProcessTraceNode(node, size); - return; - } - frames_.push(f); - UnwindContext caller; - lastFrameType = uwContext.frameInfo.GetFrameType(); -#ifndef _WIN64 - if (uwContext.UnwindToCallerContext(caller) == false) { -#else - if (uwContext.UnwindToCallerContext(caller, uwCtxStatus_) == false) { -#endif - return; - } - uwContext = caller; - } - // 2. If the stack back is not recorded, the call chain is a new call chain and the root node is the root node. - ProcessTraceNode(ArkAllocData::GetArkAllocData()->traceNodeHead_, size); - - // 3. Record the samples at this time. - Sample* sample = new Sample(); - sample->size = size; - sample->nodeId = ArkAllocData::GetArkAllocData()->traceFunctionInfo_.size(); - auto now = std::chrono::system_clock::now(); - auto timestamp = std::chrono::duration_cast(now.time_since_epoch()).count(); - sample->orinal = static_cast(timestamp); - ArkAllocData::GetArkAllocData()->samples_.push_back(sample); -} -} diff --git a/common_components/common_runtime/src/inspector/alloc_data.h b/common_components/common_runtime/src/inspector/alloc_data.h deleted file mode 100755 index ded843e6576bedbdbcaba956221707217923f309..0000000000000000000000000000000000000000 --- a/common_components/common_runtime/src/inspector/alloc_data.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef ARK_COMMON_ALLOC_DATA_H -#define ARK_COMMON_ALLOC_DATA_H -#include "common_components/common_runtime/src/unwind_stack/gc_stack_info.h" -#include "common_components/common_runtime/src/inspector/heap_data.h" -#include "common_components/common_runtime/src/inspector/heap_snapshot_json_serializer.h" -namespace panda { -struct TraceFunctionInfo { - CString functionName; - CString scriptName; - CString url = ""; - int32_t line; - int32_t column = -1; -}; - -struct TraceNodeField { - int32_t id = 0; // Unique ID - int32_t functionInfoIndex; - int32_t selfSize; - std::vector children; -}; - -struct Sample { - int32_t size; - int32_t nodeId; - int32_t orinal; -}; - -class ArkAllocData { -public: - static ArkAllocData* GetArkAllocData(); - static void SetArkAllocData(); - TraceNodeField* FindNode(const FrameAddress*, const char*); - int32_t FindKey(const FrameAddress*, const char*); - void DeleteArkAllocData(); - bool IsRecording() { return recording_.load(); }; - void SetRecording(bool isRecording) { recording_.store(isRecording, std::memory_order_release); }; - void DeleteAllNode(TraceNodeField* node); - void SerializeArkAllocData(); - void SerializeSamples(); - void SerializeCallFrames(); - void SerializeStats(); - void SerializeFunctionInfo(int32_t idx); - void SerializeEachFrame(TraceNodeField* node); - void InitAllocParam(); - void InitRoot(); - void RecordAllocNodes(const TypeInfo* klass, uint32_t size); - int32_t SetNodeID() { return ++traceNodeID_; }; - friend class AllocStackInfo; -private: - std::unordered_map traceNodeMap_; - TraceNodeField* traceNodeHead_; // ROOT node - std::vector samples_; - std::vector traceFunctionInfo_; - int32_t sampSize_; - int32_t allocSize_; - int32_t traceNodeID_ = 0; - StreamWriter* writer_ = nullptr; - std::atomic recording_{ false }; - std::mutex sharedMtx_; -}; - -class AllocStackInfo : public GCStackInfo { -public: - int32_t ProcessTraceInfo(FrameInfo &frame); - void ProcessTraceNode(TraceNodeField* head, uint32_t allocSize); - void ProcessStackTrace(uint32_t size); -private: - std::stack frames_; -}; - -} // namespace panda -#endif diff --git a/common_components/common_runtime/src/inspector/file_stream.cpp b/common_components/common_runtime/src/inspector/file_stream.cpp deleted file mode 100755 index be1cbd252ce0772400f75a5af66e5511ed795a22..0000000000000000000000000000000000000000 --- a/common_components/common_runtime/src/inspector/file_stream.cpp +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include "common_components/common_runtime/src/base/log_file.h" -#include "common_components/common_runtime/src/inspector/file_stream.h" - -namespace panda { -void HeapProfilerStream::SetHandler(SendMsgCB sendMsgInit) -{ - if (sendMsg_ != nullptr) { - return; - } - sendMsg_ = sendMsgInit; -} -bool HeapProfilerStream::SetMessageID(const std::string &message) -{ - std::string key = "\"id\":"; - size_t startPos = message.find(key); - if (startPos == std::string::npos) { - return false; - } - startPos += key.length(); - - while (startPos < message.length() && std::isspace(message[startPos])) { - ++startPos; - } - - if (message[startPos] == '"') { - size_t endPos = message.find('"', startPos + 1); - if (endPos == std::string::npos) { - return false; - } - messageId_ = CString(message.substr(startPos + 1, endPos - startPos - 1).c_str()); - } else { - size_t endPos = message.find(',', startPos); - if (endPos == std::string::npos) { - endPos = message.find('}', startPos); - } - if (endPos == std::string::npos) { - return false; - } - messageId_ = CString(message.substr(startPos, endPos - startPos).c_str()); - } - return true; -} - -CString HeapProfilerStream::GetMessageID() -{ - return messageId_; -} - -void HeapProfilerStream::SetContext(MsgType type) -{ - if (type == DUMPHEAPSNAPSHOT || type == ALLOCATION) { - start_ = "{\"method\":\"HeapProfiler.addHeapSnapshotChunk\",\"params\":{\"chunk\":\""; - end_ = "\"},\"profiler\":\"arkcommon profiler\"}"; - return; - } else if (type == STATSUPDATE) { - start_ = "{\"method\":\"HeapProfiler.heapStatsUpdate\",\"params\":{\"statsUpdate\":"; - end_ = "},\"profiler\":\"arkcommon profiler\"}"; - return; - } else { - start_ = ""; - } - end_ = ",\"profiler\":\"arkcommon profiler\"}"; -} -int HeapProfilerStream::GetSize() -{ - static const int heapProfilerChunkSise = static_cast(20 * 1024); - return heapProfilerChunkSise; -} -bool HeapProfilerStream::WriteChunk(char* data, int32_t size) -{ - std::string chunk(data, size); - std::string msg = start_ + chunk + end_; - sendMsg_(msg); - return true; -} -} diff --git a/common_components/common_runtime/src/inspector/file_stream.h b/common_components/common_runtime/src/inspector/file_stream.h deleted file mode 100755 index 6a3105fc7caa1dc1f3dce5da30585550a1afaaa7..0000000000000000000000000000000000000000 --- a/common_components/common_runtime/src/inspector/file_stream.h +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ARK_COMMON_FILE_STREAM_H -#define ARK_COMMON_FILE_STREAM_H - -#include -#include -#include -#include - -#include "common_components/common_runtime/src/inspector/stream.h" -#include "common_components/log/log.h" - -namespace panda { -class StreamWriter { -public: - explicit StreamWriter(Stream* stream) - : stream_(stream), chunkSize_(stream->GetSize()), chunk_(chunkSize_), current_(0) - { - } - - void WriteString(const CString &str) - { - auto len = str.Length(); - DCHECK_CC(len <= static_cast(INT_MAX)); - if (len == 0) { - return; - } - const char* cur = str.Str(); - const char* end = cur + len; - while (cur < end) { - int dstSize = chunkSize_ - current_; - int writeSize = std::min(static_cast(end - cur), dstSize); - DCHECK_CC(writeSize > 0); - if (memcpy_s(chunk_.data() + current_, dstSize, cur, writeSize) != EOK) { - LOG_COMMON(ERROR) << "memcpy_s failed"; - } - cur += writeSize; - current_ += writeSize; - MaybeWriteChunk(); - } - } - - void WriteChar(char c) - { - DCHECK_CC(c != '\0'); - DCHECK_CC(current_ < chunkSize_); - chunk_[current_++] = c; - MaybeWriteChunk(); - } - - void WriteNumber(int32_t num) - { - WriteString(CString(num)); - } - - void End() - { - if (current_ > 0) { - WriteChunk(); - } - stream_->EndOfStream(); - } - -private: - void MaybeWriteChunk() - { - DCHECK_CC(current_ <= chunkSize_); - if (current_ >= chunkSize_) { - WriteChunk(); - } - } - - void WriteChunk() - { - stream_->WriteChunk(chunk_.data(), current_); - current_ = 0; - } - - Stream* stream_ {nullptr}; - int chunkSize_ {0}; - std::vector chunk_; - int current_ {0}; -}; - -enum MsgType : uint32_t { - DUMPHEAPSNAPSHOT = 0, - ALLOCATION = 1, - HEAPUSAGE = 2, - DISABLE = 3, - END = 4, - STATSUPDATE = 5, -}; - -class HeapProfilerStream : public Stream { -public: - static HeapProfilerStream& GetInstance() - { - static HeapProfilerStream instance; - return instance; - } - using SendMsgCB = std::function; - HeapProfilerStream() : sendMsg_(nullptr) {} - void SetHandler(SendMsgCB sendMsgInit); - bool SetMessageID(const std::string &message); - CString GetMessageID(); - void SetContext(MsgType type); - void EndOfStream() override {} - int GetSize() override; - bool WriteChunk(char* data, int32_t size) override; - -private: - std::string start_; - std::string end_; - SendMsgCB sendMsg_; - CString messageId_; -}; -} -#endif diff --git a/common_components/common_runtime/src/inspector/heap_data.cpp b/common_components/common_runtime/src/inspector/heap_data.cpp deleted file mode 100755 index 51ef0b2e9b302a585de298e8d7153608cb101988..0000000000000000000000000000000000000000 --- a/common_components/common_runtime/src/inspector/heap_data.cpp +++ /dev/null @@ -1,825 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "common_components/common_runtime/src/inspector/heap_data.h" - -#include - -#include "common_components/common_runtime/src/common/base_object.h" -#include "common_components/common_runtime/src/common/scoped_object_access.h" -#include "common_components/common_runtime/src/heap/collector/task_queue.h" -#include "common_components/common_runtime/src/heap/collector/trace_collector.h" -#include "common_components/common_runtime/src/heap/heap.h" - -namespace panda { -static bool g_oomIsTrigged = false; - -void ArkHeapData::WriteHeap() -{ - WriteFixedHeader(); - WriteString(); - WriteAllClassLoad(); - WriteAllStructClassLoad(); - WriteStackTrace(); - WriteStartThread(); - WriteHeapDump(); -} - -void ArkHeapData::ProcessHeap() -{ - ProcessRootGlobal(); - ProcessRootLocal(); - ProcessRootFinalizer(); - (void)LookupStringId("reserved"); - (void)LookupStringId("RefFields"); - (void)LookupStringId("ValueField"); - // dump object contents - auto dumpVisitor = [this](BaseObject* obj) { ProcessHeapObject(obj); }; - bool ret = Heap::GetHeap().ForEachObject(dumpVisitor, false); - LOGE_IF(UNLIKELY_CC(!ret)) << "theAllocator.ForEachObject() in DumpHeap() return false."; -} - -void ArkHeapData::DumpHeap() -{ - // step1 - open file - CString specifiedPath; - if (dumpAfterOOM && !g_oomIsTrigged) { - g_oomIsTrigged = true; - specifiedPath = ""; - auto pid = GetPid(); - CString dumpFile = CString("ark_OOM_pid") + CString(pid) + CString(".dat"); -#if defined(_WIN64) - const char* separator = "\\"; -#else - const char* separator = "/"; -#endif - if (specifiedPath.IsEmpty()) { - // dump to current path - fp = fopen(dumpFile.Str(), "wb"); - VLOG(INFO, "Heap dump log is writing into %s%s", separator, dumpFile.Str()); - } else { - // dump to specified path - dumpFile = specifiedPath + separator + dumpFile; - fp = fopen(dumpFile.Str(), "wb"); - VLOG(INFO, "Heap dump log is writing into %s", dumpFile.Str()); - } - } else { - // dump for prof - fp = fopen("item_data.dat.cache", "wb"); - } - - if (!fp) { - LOG_COMMON(ERROR) << "Failed to open heap dump file, stop dumping heap info, errno type is %d", errno); - return; - } - - // step2 - write file - ScopedStopTheWorld scopedStopTheWorld("dump-heap"); - ProcessHeap(); - WriteHeap(); - - // step3 - close file - int ret = fclose(fp); - if (ret) { - LOG_COMMON(ERROR) << "Fail to close file when dump heap data finished"; - } - if (!dumpAfterOOM) { - ret = rename("item_data.dat.cache", "item_data.dat"); - if (ret) { - LOG_COMMON(ERROR) << "Fail to rename file when dump heap data finished"; - } - } -} - -void ArkHeapData::ProcessHeapObject(BaseObject* obj) -{ - if (obj == nullptr) { - return; - } - - if (obj->IsRawArray()) { - MArray* mArray = reinterpret_cast(obj); - TypeInfo* componentTypeInfo = mArray->GetComponentTypeInfo(); - if (componentTypeInfo->IsPrimitiveType()) { - DumpObject dumpObject = { obj, TAG_PRIMITIVE_ARRAY_DUMP, 0, 0, - LookupStringId(obj->GetTypeInfo()->GetName() == nullptr ? - "anonymousPrimitiveArray" : - obj->GetTypeInfo()->GetName()) }; - dumpObjects.push_back(dumpObject); - } else if (componentTypeInfo->IsStructType()) { - DumpObject dumpObject = { obj, TAG_STRUCT_ARRAY_DUMP, 0, 0, - LookupStringId(obj->GetTypeInfo()->GetName() == nullptr ? - "anonymousStructArray" : - obj->GetTypeInfo()->GetName()) }; - dumpObjects.push_back(dumpObject); - ProcessStructClass(obj->GetTypeInfo()); - return; - } else if (componentTypeInfo->IsObjectType() || - componentTypeInfo->IsArrayType() || - componentTypeInfo->IsInterface()) { - DumpObject dumpObject = { obj, TAG_OBJECT_ARRAY_DUMP, 0, 0, - LookupStringId(obj->GetTypeInfo()->GetName() == nullptr ? - "anonymousObjectArray" : - obj->GetTypeInfo()->GetName()) }; - dumpObjects.push_back(dumpObject); - } else { - LOG_COMMON(ERROR) << "array object %p has wrong component type", mArray); - } - } else if (obj->GetTypeInfo()->IsVaildType()) { - DumpObject dumpObject = { - obj, TAG_INSTANCE_DUMP, 0, 0, - LookupStringId(obj->GetTypeInfo()->GetName() == nullptr ? "defaultLambda" : obj->GetTypeInfo()->GetName()) - }; - dumpObjects.push_back(dumpObject); - } else { - LOG_COMMON(ERROR) << "object %p has wrong component type", obj); - return; - } - ProcessRootClass(obj->GetTypeInfo()); -} - -void ArkHeapData::ProcessRootClass(TypeInfo* klass) -{ - if (dumpClassMap.find(klass) == dumpClassMap.end()) { - dumpClassMap.insert(std::pair( - klass, - LookupStringId(klass->GetName() == nullptr ? "defaultLambda" : - klass->GetName()))); // lamda obj has null name - } -} - -void ArkHeapData::ProcessStructClass(TypeInfo* klass) -{ - if (dumpStructClassMap.find(klass) == dumpStructClassMap.end()) { - dumpStructClassMap.insert(std::pair( - klass, - LookupStringId(klass->GetName() == nullptr ? "defaultStructLamda" : - klass->GetName()))); // lamda obj has null name - } -} -void ArkHeapData::ProcessStacktrace(RecordStackInfo* recordStackInfo) -{ - std::vector framesInStack = recordStackInfo->stacks; - if (stacktraces.find(recordStackInfo) == stacktraces.end()) { - stacktraces.insert( - std::pair(recordStackInfo, traceSerialNum++)); - CString threadIdx = CString(threadId); - LookupStringId(threadName); - for (size_t i = 0; i < framesInStack.size(); ++i) { - FrameInfo* frame = framesInStack[i]; - if (frame->GetFrameType() == FrameType::MANAGED) { - LookupStringId(frame->GetFuncName().Str()); - LookupStringId(frame->GetFileName().Str()); - } else { - Os::Loader::BinaryInfo binInfo; - (void)Os::Loader::GetBinaryInfoFromAddress(frame->mFrame.GetIP(), &binInfo); - LookupStringId(CString(binInfo.filePathName).Str()); - LookupStringId(CString(binInfo.symbolName).Str()); - } - frames.insert(std::pair(frame, frameId++)); - } - } -} - -void ArkHeapData::ProcessRootLocal() -{ - LOG_COMMON(FATAL) << "Unresolved fatal"; - UNREACHABLE_CC(); -} - -void ArkHeapData::ProcessRootGlobal() -{ - RefFieldVisitor visitor = [this](RefField<>& refField) { - BaseObject* obj = Heap::GetBarrier().ReadStaticRef(refField); - if (obj == nullptr || !Heap::IsHeapAddress(obj)) { - return; - } - DumpObject dumpObject = { - obj, TAG_ROOT_GLOBAL, 0, 0, - LookupStringId(obj->GetTypeInfo()->GetName() == nullptr ? "anonymous" : obj->GetTypeInfo()->GetName()) - }; - dumpObjects.push_back(dumpObject); - }; - Heap::GetHeap().VisitStaticRoots(visitor); -} - -void ArkHeapData::ProcessRootFinalizer() -{ - RootVisitor visitor = [this](ObjectRef& objRef) { - BaseObject* obj = objRef.object; - if (obj == nullptr || !Heap::IsHeapAddress(obj)) { - return; - } - DumpObject dumpObject = { - obj, TAG_ROOT_UNKNOWN, 0, 0, - LookupStringId(obj->GetTypeInfo()->GetName() == nullptr ? "anonymous" : obj->GetTypeInfo()->GetName()) - }; - dumpObjects.push_back(dumpObject); - }; - Heap::GetHeap().GetFinalizerProcessor().VisitGCRoots(visitor); -} - -void ArkHeapData::WriteHeapDump() -{ - WriteRecordHeader(TAG_HEAP_DUMP, kHeapDataTime); - WriteAllClass(); - WriteAllStructClass(); - WriteAllObjects(); - ModifyLength(); - EndRecord(); -} -/* - * Record thread info: - * RecordHeader header; - * u4 thread serial number - * ID thread object ID - * u4 stack trace serial number - * ID thread name string ID - */ -void ArkHeapData::WriteStartThread() -{ - for (auto trace = stacktraces.begin(); trace != stacktraces.end(); trace++) { - WriteRecordHeader(TAG_START_THREAD, kHeapDataTime); - AddU4(trace->first->GetStackTid()); - AddID(threadObjectId++); - AddU4(trace->second); - CString threadNameAll = trace->first->GetThreadName(); - AddID(LookupStringId(threadNameAll)); - ModifyLength(); - EndRecord(); - } -} - -void ArkHeapData::WriteAllClass() -{ - for (auto klassInfo : dumpClassMap) { - WriteClass(klassInfo.first, klassInfo.second, TAG_CLASS_DUMP); - } -} - -void ArkHeapData::WriteAllStructClass() -{ - for (auto klassInfo : dumpStructClassMap) { - WriteStructClass(klassInfo.first, klassInfo.second, TAG_CLASS_DUMP); - } -} - -void ArkHeapData::WriteAllObjects() -{ - for (auto objectInfo : dumpObjects) { - switch (objectInfo.tag) { - case TAG_ROOT_THREAD_OBJECT: - WriteThreadObjectRoot(objectInfo.obj, objectInfo.tag, objectInfo.threadId, 0); - break; - case TAG_ROOT_LOCAL: - WriteLocalRoot(objectInfo.obj, objectInfo.tag, objectInfo.threadId, objectInfo.frameNum); - break; - case TAG_ROOT_GLOBAL: - WriteGlobalRoot(objectInfo.obj, objectInfo.tag); - break; - case TAG_ROOT_UNKNOWN: - WriteUnknownRoot(objectInfo.obj, objectInfo.tag); - break; - case TAG_OBJECT_ARRAY_DUMP: - WriteObjectArray(objectInfo.obj, objectInfo.tag); - break; - case TAG_STRUCT_ARRAY_DUMP: - WriteStructArray(objectInfo.obj, objectInfo.tag); - break; - case TAG_PRIMITIVE_ARRAY_DUMP: - WritePrimitiveArray(objectInfo.obj, objectInfo.tag); - break; - case TAG_INSTANCE_DUMP: - WriteInstance(objectInfo.obj, objectInfo.tag); - break; - default: - break; - } - } -} -/* - * Record Global Root Info: - * u1 tag; //denoting the type of this sub-record - * ID objId; // object ID - */ -void ArkHeapData::WriteGlobalRoot(BaseObject*& obj, const u1 tag) -{ - AddU1(tag); - ArkHeapDataID objId = (reinterpret_cast(obj)); - CString name = obj->GetTypeInfo()->GetName(); - AddStringId(objId); -} - -/* - * Record Unknown Root Info: - * u1 tag; // denoting the type of this sub-record - * ID objId; // object ID - */ -void ArkHeapData::WriteUnknownRoot(BaseObject*& obj, const u1 tag) -{ - AddU1(tag); - ArkHeapDataID objId = (reinterpret_cast(obj)); - AddStringId(objId); -} - -/* - * Record Local Root Info: - * u1 tag; // denoting the type of this sub-record - * ID objId; // object ID - * u4 threadIdx; // thread serial number - * u4 frame; // frame number in stack trace (-1 for empty) - */ -void ArkHeapData::WriteLocalRoot(BaseObject*& obj, const u1 tag, const u4 tid, const u1 depth) -{ - AddU1(tag); - ArkHeapDataID objId = (reinterpret_cast(obj)); - AddStringId(objId); - AddU4(tid); - AddU4(depth); -} - -/* - * Record Thread Object Root Info: - * u1 tag; // denoting the type of this sub-record - * ID threadObjId; // thread object ID - * u4 threadIdx; // thread serial number - * u4 stackTraceIdx; // stack trace serial number - * - */ -void ArkHeapData::WriteThreadObjectRoot(BaseObject*& obj, const u1 tag, const u4 tid, const u4 stackTraceIdx) -{ - AddU1(tag); - ArkHeapDataID objId = (reinterpret_cast(obj)); - AddStringId(objId); - AddU4(tid); - AddU4(stackTraceIdx); -} - - -/* - * Record Object Array Info: - * u1 tag; // denoting the type of this sub-record - * ID arrObjId; // array object ID - * u4 num; // number of elements - * ID arrClassObjId; // array class object ID - * ID elements[num]; // elements - * - */ -void ArkHeapData::WriteObjectArray(BaseObject*& obj, const u1 tag) -{ - AddU1(tag); - ArkHeapDataID objId = (reinterpret_cast(obj)); - AddStringId(objId); - u4 num = 0; - std::stack VAL; - RefFieldVisitor visitor = [&VAL, &num](RefField<>& arrayContent) { - VAL.push(reinterpret_cast(arrayContent.GetTargetObject())); - num++; - }; - // take array length and content. - MArray* mArray = reinterpret_cast(obj); - MIndex arrayLengthVal = mArray->GetLength(); - RefField<>* arrayContent = reinterpret_cast*>(mArray->ConvertToCArray()); - // for each object in array. - for (MIndex i = 0; i < arrayLengthVal; ++i) { - visitor(arrayContent[i]); - } - AddU4(num); - AddStringId(reinterpret_cast(obj->GetTypeInfo())); - while (!VAL.empty()) { - u8 val = VAL.top(); - VAL.pop(); - AddU8(val); - } -} - -/* - * Record struct Array Info: - * u1 tag; // denoting the type of this sub-record - * ID arrObjId; // array object ID - * u4 componentNum; // component Num - * u4 num; // number of ref fields - * ID arrClassObjId; // array class object ID - * ID elements[num]; // elements - * - */ -void ArkHeapData::WriteStructArray(BaseObject*& obj, const u1 tag) -{ - AddU1(tag); - ArkHeapDataID objId = (reinterpret_cast(obj)); - AddStringId(objId); - u4 num = 0; - std::stack VAL; - RefFieldVisitor visitor = [&VAL, &num](RefField<>& arrayContent) { - VAL.push(reinterpret_cast(arrayContent.GetTargetObject())); - num++; - }; - // take array length and content. - MArray* mArray = reinterpret_cast(obj); - MIndex arrayLengthVal = mArray->GetLength(); - TypeInfo* componentTypeInfo = mArray->GetComponentTypeInfo(); - GCTib gcTib = componentTypeInfo->GetGCTib(); - HeapAddress contentAddr = reinterpret_cast(mArray) + MArray::GetContentOffset(); - for (MIndex i = 0; i < arrayLengthVal; ++i) { - gcTib.ForEachBitmapWord(contentAddr, visitor); - contentAddr += mArray->GetElementSize(); - } - AddU4(arrayLengthVal); - AddU4(num); - AddStringId(reinterpret_cast(obj->GetTypeInfo())); - while (!VAL.empty()) { - u8 val = VAL.top(); - VAL.pop(); - AddU8(val); - } -} - -/* - * Record Primitive Array Info: - * u1 tag; // denoting the type of this sub-record - * ID arrObjId; // array object - * u4 num; // number of elements - * u1 type; // element type - */ - -void ArkHeapData::WritePrimitiveArray(BaseObject*& obj, const u1 tag) -{ - AddU1(tag); - ArkHeapDataID objId = (reinterpret_cast(obj)); - AddStringId(objId); - MArray* mArray = reinterpret_cast(obj); - AddU4(mArray->GetLength()); - uint32_t ComponentSize = obj->GetTypeInfo()->GetComponentSize(); - switch (ComponentSize) { - // bool:1 bytes - case 1: - AddU1(BOOLEAN); - break; - // short:2 bytes - case 2: - AddU1(SHORT); - break; - // int:4 bytes - case 4: - AddU1(INT); - break; - // long:8 bytes - case 8: - AddU1(LONG); - break; - default: - break; - } -} - -/* - * Record Struct Class Info: - * u1 tag; // denoting the type of this sub-record - * ID classObjId; // class object ID - * u4 size; // instance size (in bytes) - */ -void ArkHeapData::WriteStructClass(TypeInfo* klass, ArkHeapDataStringId klassId, const u1 tag) -{ - AddU1(tag); - AddStringId(reinterpret_cast(klass)); - TypeInfo* componentKlass = klass->GetComponentTypeInfo(); - // No alignment required - u4 size = componentKlass->GetInstanceSize(); - AddU4(size); -} - -/* - * Record Class Info: - * u1 tag; // denoting the type of this sub-record - * ID classObjId; // class object ID - * u4 size; // instance size (in bytes) - */ -void ArkHeapData::WriteClass(TypeInfo* klass, ArkHeapDataStringId klassId, const u1 tag) -{ - AddU1(tag); - AddStringId(reinterpret_cast(klass)); - // 8-byte alignment - if (!klass->IsObjectType()) { - AddU4(0); - return; - } - u4 size = AlignUp((klass->GetInstanceSize() + sizeof(TypeInfo*)), alignment); - // 8 bytes for each field - AddU4(size); -} - -/* - * Record Instance Info: - * u1 tag; // denoting the type of this sub-record - * ID objId; // object ID - * ID classObjId; // class object ID - * u4 num; // number of ref fields - * VAL entry[]; // ref contents in instance field values (this class, followed by super class, etc) - */ -void ArkHeapData::WriteInstance(BaseObject*& obj, const u1 tag) -{ - AddU1(tag); - ArkHeapDataID objId = (reinterpret_cast(obj)); - AddStringId(objId); - AddStringId(reinterpret_cast(obj->GetTypeInfo())); - u4 num = 0; - std::stack VAL; - RefFieldVisitor visitor = [&VAL, &num](RefField<>& fieldAddr) { - VAL.push(reinterpret_cast(fieldAddr.GetTargetObject())); - num++; - }; - TypeInfo* currentClass = obj->GetTypeInfo(); - if (obj->HasRefField()) { - GCTib gcTib = currentClass->GetGCTib(); - HeapAddress objAddr = reinterpret_cast(obj) + sizeof(TypeInfo*); - gcTib.ForEachBitmapWord(objAddr, visitor); - } - AddU4(num); - while (!VAL.empty()) { - u8 val = VAL.top(); - VAL.pop(); - AddU8(val); - } -} - -/* - * Record String Info: - * RecordHeader header; - * ID id; // ID for this string - * u1 str[]; // UTF8 characters for string (NOT NULL terminated) - */ -void ArkHeapData::WriteString() -{ - for (auto string : strings) { - WriteRecordHeader(TAG_STRING_IN_UTF8, kHeapDataTime); - ArkHeapDataStringId id = string.second; - AddStringId(id); - const CString str = string.first; - AddU1List(reinterpret_cast(str.Str()), str.Length()); - ModifyLength(); - EndRecord(); - } -} - -/* - * Record Stack Frame Info: - * RecordHeader header; - * ID frameId; // stack frame ID - * ID methodNameId; // method name string ID - * ID srcFileNameId; // source file name string ID - * u4 line; // line number(>0: line number, 0: no line information available, -1: unknown - * location) - */ -void ArkHeapData::WriteStackFrame(FrameInfo& frame, uint32_t frameIdx) -{ - if (frameIdx > 0 && frame.GetFrameType() == FrameType::NATIVE) { - return; - } - if (frame.GetFrameType() == FrameType::MANAGED) { - StackMetadataHelper stackMetadataHelper(frame); - methodName = frame.GetFuncName(); - fileName = frame.GetFileName(); - lineNumber = stackMetadataHelper.GetLineNumber(); - } else { - Os::Loader::BinaryInfo binInfo; - (void)Os::Loader::GetBinaryInfoFromAddress(frame.mFrame.GetIP(), &binInfo); - fileName = CString(binInfo.filePathName); - methodName = CString(binInfo.symbolName); - } - WriteRecordHeader(TAG_STACK_FRAME, kHeapDataTime); - AddStringId(frames[&frame]); - AddStringId(LookupStringId(methodName.Str())); - AddStringId(LookupStringId(fileName.Str())); - AddU4(reinterpret_cast(lineNumber)); - ModifyLength(); - EndRecord(); -} - -/* - * Record Stack Trace Info: - * RecordHeader header; - * u4 stackTraceIdx; // stack trace serial number - * u4 threadIdx; // thread serial number - * u4 frameNum; // number of frames - * ID frames[]; // series of stack frame ID's - */ -void ArkHeapData::WriteStackTrace() -{ - for (auto trace = stacktraces.begin(); trace != stacktraces.end(); trace++) { - auto env = std::getenv("DumpStackDepth"); - // 10: Default Limit Max Dump Depth as 10 frames - size_t size = CString::ParseNumFromEnv(env) == 0 ? 10 : CString::ParseNumFromEnv(env); - size_t depth = trace->first->stacks.size() > size ? size : trace->first->stacks.size(); - std::vector stack = trace->first->stacks; - for (size_t i = 0; i < depth; ++i) { - WriteStackFrame(*stack[i], i); - } - WriteRecordHeader(TAG_STACK_TRACE, kHeapDataTime); - AddU4(trace->second); - AddU4(trace->first->GetStackTid()); - AddU4(depth); - for (size_t i = 0; i < depth; ++i) { - AddStringId(frames[stack[i]]); - } - ModifyLength(); - EndRecord(); - } -} - -/* - * Record Record Header Info: - * u1 tag; // denoting the type of the record - * u4 time; // number of microseconds since the time stamp in the header - * u4 length; // number of bytes that follow this u4 field and belong to this record - */ -void ArkHeapData::WriteRecordHeader(const u1 tag, const u4 time) -{ - AddU1(tag); - // DEADDEADEADDEAD: placeholder, the actual length is filled in Func modifyLength. - const u8 tmpLens = 0xDEADDEADEADDEAD; - AddU8(tmpLens); -} - -void ArkHeapData::AddU1(const u1 value) { AddU1List(&value, 1); } - -void ArkHeapData::AddU2(const u2 value) { AddU2List(&value, 1); } - -void ArkHeapData::AddU4(const u4 value) { AddU4List(&value, 1); } - -void ArkHeapData::AddU8(const u8 value) { AddU8List(&value, 1); } - -void ArkHeapData::AddID(const u8 value) { AddU8List(&value, 1); } - -void ArkHeapData::AddU1List(const u1* value, uint8_t count) -{ - HandleAddU1(value, count); - length += count; -} - -void ArkHeapData::AddU2List(const u2* value, uint8_t count) -{ - HandleAddU2(value, count); - length += count * sizeof(u2); - ; -} - -void ArkHeapData::AddU4List(const u4* value, uint8_t count) -{ - HandleAddU4(value, count); - length += count * sizeof(u4); -} -void ArkHeapData::AddU8List(const u8* value, uint8_t count) -{ - HandleAddU8(value, count); - length += count * sizeof(u8); -} - -enum ByteOffset { - FIRST_BYTE = 0 * 8, - SECOND_BYTE = 1 * 8, - THIRD_BYTE = 2 * 8, - FOURTH_BYTE = 3 * 8, - FIFTH_BYTE = 4 * 8, - SIXTH_BYTE = 5 * 8, - SEVENTH_BYTE = 6 * 8, - EIGHTH_BYTE = 7 * 8 -}; - -void ArkHeapData::HandleAddU1(const u1* value, uint8_t count) { buffer.insert(buffer.end(), value, value + count); } - -void ArkHeapData::HandleAddU2(const u2* value, uint8_t count) -{ - for (int i = 0; i < count; i++) { - buffer.push_back(static_cast((*value >> SECOND_BYTE) & 0xFF)); - buffer.push_back(static_cast((*value >> FIRST_BYTE) & 0xFF)); - value++; - } -} - -void ArkHeapData::HandleAddU4(const u4* value, uint8_t count) -{ - for (int i = 0; i < count; i++) { - buffer.push_back(static_cast((*value >> FOURTH_BYTE) & 0xFF)); - buffer.push_back(static_cast((*value >> THIRD_BYTE) & 0xFF)); - buffer.push_back(static_cast((*value >> SECOND_BYTE) & 0xFF)); - buffer.push_back(static_cast((*value >> FIRST_BYTE) & 0xFF)); - value++; - } -} - -void ArkHeapData::HandleAddU8(const u8* value, uint8_t count) -{ - // 0: offset for 1st byte - // 8: offset for 2nd byte - // 16: offset for 3st byte - // 24: offset for 4st byte - // 32: offset for 5st byte - // 40: offset for 6st byte - // 48: offset for 7st byte - // 56: offset for 8st byte - for (int i = 0; i < count; i++) { - buffer.push_back(static_cast((*value >> EIGHTH_BYTE) & 0xFF)); - buffer.push_back(static_cast((*value >> SEVENTH_BYTE) & 0xFF)); - buffer.push_back(static_cast((*value >> SIXTH_BYTE) & 0xFF)); - buffer.push_back(static_cast((*value >> FIFTH_BYTE) & 0xFF)); - buffer.push_back(static_cast((*value >> FOURTH_BYTE) & 0xFF)); - buffer.push_back(static_cast((*value >> THIRD_BYTE) & 0xFF)); - buffer.push_back(static_cast((*value >> SECOND_BYTE) & 0xFF)); - buffer.push_back(static_cast((*value >> FIRST_BYTE) & 0xFF)); - value++; - } -} - -void ArkHeapData::AddStringId(ArkHeapData::ArkHeapDataStringId value) { AddID(static_cast(value)); } - -void ArkHeapData::EndRecord() -{ - const char* ptr = reinterpret_cast(buffer.data()); - if (fwrite(ptr, length, 1, fp) < 1) { - LOG_COMMON(ERROR) << "Failed to write heap dump file."; - } - length = 0; - std::vector().swap(buffer); -} - -void ArkHeapData::WriteFixedHeader() -{ - const char ident[] = "ARKCOMMON PROFILE 1.0.1"; - AddU1List(reinterpret_cast(ident), sizeof(ident)); - const u4 idSize = sizeof(ArkHeapDataID); - AddU4(idSize); - struct timeval timeNow {}; - gettimeofday(&timeNow, nullptr); - const uint64_t msecsTime = (timeNow.tv_sec * 1000) + (timeNow.tv_usec / 1000); - const uint32_t timeHigh = static_cast(msecsTime >> 32); - const uint32_t timeLow = static_cast(msecsTime & 0xFFFFFFFF); - AddU4(timeHigh); - AddU4(timeLow); - EndRecord(); -} -void ArkHeapData::WriteAllClassLoad() -{ - for (auto klassInfo : dumpClassMap) { - WriteClassLoad(klassInfo.first, klassInfo.second, TAG_CLASS_LOAD); - } -} - -void ArkHeapData::WriteAllStructClassLoad() -{ - for (auto klassInfo : dumpStructClassMap) { - WriteClassLoad(klassInfo.first, klassInfo.second, TAG_CLASS_LOAD); - } -} - -/* - * Record Class Load Info: - * ID class object ID - * ID class name string ID - */ -void ArkHeapData::WriteClassLoad(TypeInfo* klass, ArkHeapDataStringId klassId, const u1 tag) -{ - WriteRecordHeader(tag, kHeapDataTime); - AddID(reinterpret_cast(klass)); - AddStringId(klassId); - ModifyLength(); - EndRecord(); -} - -void ArkHeapData::ModifyLength() -{ - // 9: Subtract the length of the record header - constexpr uint8_t recordHeaderLength = 9; - uint64_t value = length - recordHeaderLength; - // 1,2,3,4,5,6,7,8: Stores 64 bits for length - buffer[1] = (static_cast((value >> EIGHTH_BYTE) & 0xFF)); // 1 : index of buffer - buffer[2] = (static_cast((value >> SEVENTH_BYTE) & 0xFF)); // 2 : index of buffer - buffer[3] = (static_cast((value >> SIXTH_BYTE) & 0xFF)); // 3 : index of buffer - buffer[4] = (static_cast((value >> FIFTH_BYTE) & 0xFF)); // 4 : index of buffer - buffer[5] = (static_cast((value >> FOURTH_BYTE) & 0xFF)); // 5 : index of buffer - buffer[6] = (static_cast((value >> THIRD_BYTE) & 0xFF)); // 6 : index of buffer - buffer[7] = (static_cast((value >> SECOND_BYTE) & 0xFF)); // 7 : index of buffer - buffer[8] = (static_cast((value >> FIRST_BYTE) & 0xFF)); // 8 : index of buffer -} - -ArkHeapData::ArkHeapDataStringId ArkHeapData::LookupStringId(const CString& string) -{ - auto it = strings.find(string); - if (it != strings.end()) { - return it->second; - } - ArkHeapData::ArkHeapDataStringId id = stringId++; - strings.insert(std::pair(string, id)); - return id; -} -} // namespace panda diff --git a/common_components/common_runtime/src/inspector/heap_snapshot_json_serializer.cpp b/common_components/common_runtime/src/inspector/heap_snapshot_json_serializer.cpp deleted file mode 100755 index 70b7aa6b0f0b8794ea755b4c27696a10437eb209..0000000000000000000000000000000000000000 --- a/common_components/common_runtime/src/inspector/heap_snapshot_json_serializer.cpp +++ /dev/null @@ -1,627 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include -#include -#include -#include -#include -#include "common_components/common_runtime/src/mutator/mutator_manager.h" -#include "common_components/common_runtime/src/unwind_stack/stack_metadata_helper.h" -#include "common_components/common_runtime/src/object_model/m_array.inline.h" -#include "common_components/common_runtime/src/inspector/file_stream.h" -#include "common_components/common_runtime/src/inspector/heap_data.h" -#include "common_components/common_runtime/src/inspector/heap_snapshot_json_serializer.h" - -namespace panda { - -bool ArkHeapDataForIDE::Serialize() -{ - ScopedStopTheWorld scopedStopTheWorld("serialize-heap-data"); - ProcessHeap(); - HeapProfilerStream* stream = &panda::HeapProfilerStream::GetInstance(); - stream->SetContext(DUMPHEAPSNAPSHOT); - writer_ = new StreamWriter(stream); - writer_->WriteChar('{'); - SerializeFixedHeader(); - SerializeString(); - SerializeAllClassLoad(); - SerializeAllStructClassLoad(); - SerializeStackTrace(); - SerializeStartThread(); - SerializeHeapDump(); - writer_->WriteChar('}'); - writer_->End(); - stream->SetContext(END); - writer_->WriteString("{\"id\":"); - writer_->WriteString(stream->GetMessageID()); - writer_->WriteString(",\"result\":{}"); - writer_->End(); - delete writer_; - return true; -} - -void ArkHeapDataForIDE::SerializeFixedHeader() -{ - const u4 idSize = sizeof(u4); - struct timeval timeNow {}; - gettimeofday(&timeNow, nullptr); - const uint64_t msecsTime = (timeNow.tv_sec * 1000) + (timeNow.tv_usec / 1000); - const uint32_t timeHigh = static_cast(msecsTime >> 32); - const uint32_t timeLow = static_cast(msecsTime & 0xFFFFFFFF); - writer_->WriteString("\\\"HEADER\\\":["); - writer_->WriteNumber(idSize); - writer_->WriteChar(','); - writer_->WriteNumber(timeHigh); - writer_->WriteChar(','); - writer_->WriteNumber(timeLow); - writer_->WriteChar(']'); - writer_->WriteChar(','); -} - -void ArkHeapDataForIDE::SerializeString() -{ - writer_->WriteString("\\\"STRING\\\":["); - bool isFirstElement = true; - for (auto string : strings) { - if (!isFirstElement) { - writer_->WriteChar(','); - } else { - isFirstElement = false; - } - writer_->WriteChar('['); - ArkHeapDataStringId id = string.second; - writer_->WriteNumber(GetId(id)); - writer_->WriteChar(','); - const CString str = string.first; - writer_->WriteString("\\\""); - writer_->WriteString(str); - writer_->WriteString("\\\""); - writer_->WriteChar(']'); - } - writer_->WriteChar(']'); - writer_->WriteChar(','); -} - -void ArkHeapDataForIDE::SerializeAllClassLoad() -{ - writer_->WriteString("\\\"CLASSLOAD\\\":["); - bool isFirstElement = true; - for (auto klassInfo : dumpClassMap) { - if (!isFirstElement) { - writer_->WriteChar(','); - } else { - isFirstElement = false; - } - SerializeClassLoad(klassInfo.first, klassInfo.second, TAG_CLASS_LOAD); - } - writer_->WriteChar(']'); - writer_->WriteChar(','); -} - -void ArkHeapDataForIDE::SerializeAllClass() -{ - writer_->WriteString("\\\"CLASS\\\":["); - bool isFirstElement = true; - for (auto klassInfo : dumpClassMap) { - if (!isFirstElement) { - writer_->WriteChar(','); - } else { - isFirstElement = false; - } - SerializeClass(klassInfo.first, klassInfo.second, TAG_CLASS_DUMP); - } - writer_->WriteChar(']'); - writer_->WriteChar(','); -} - -void ArkHeapDataForIDE::SerializeAllStructClass() -{ - writer_->WriteString("\\\"STRUCTCLASS\\\":["); - bool isFirstElement = true; - for (auto klassInfo : dumpStructClassMap) { - if (!isFirstElement) { - writer_->WriteChar(','); - } else { - isFirstElement = false; - } - SerializeStructClass(klassInfo.first, klassInfo.second, TAG_CLASS_DUMP); - } - writer_->WriteChar(']'); - writer_->WriteChar(','); -} - -void ArkHeapDataForIDE::SerializeClassLoad(TypeInfo* klass, ArkHeapDataStringId klassId, const u1 tag) -{ - writer_->WriteChar('['); - writer_->WriteNumber(GetId(reinterpret_cast(klass))); - writer_->WriteChar(','); - writer_->WriteNumber(GetId(reinterpret_cast(klassId))); - writer_->WriteChar(']'); -} - -void ArkHeapDataForIDE::SerializeAllStructClassLoad() -{ - writer_->WriteString("\\\"STRUCTCLASSLOAD\\\":["); - bool isFirstElement = true; - for (auto klassInfo : dumpStructClassMap) { - if (!isFirstElement) { - writer_->WriteChar(','); - } else { - isFirstElement = false; - } - SerializeClassLoad(klassInfo.first, klassInfo.second, TAG_CLASS_LOAD); - } - writer_->WriteChar(']'); - writer_->WriteChar(','); -} - -void ArkHeapDataForIDE::SerializeStackFrame(FrameInfo& frame, uint32_t frameIdx) -{ - writer_->WriteChar('['); - if (frameIdx > 0 && frame.GetFrameType() == FrameType::NATIVE) { - return; - } - if (frame.GetFrameType() == FrameType::MANAGED) { - StackMetadataHelper stackMetadataHelper(frame); - methodName = frame.GetFuncName(); - fileName = frame.GetFileName(); - lineNumber = stackMetadataHelper.GetLineNumber(); - } else { - Os::Loader::BinaryInfo binInfo; - (void)Os::Loader::GetBinaryInfoFromAddress(frame.mFrame.GetIP(), &binInfo); - fileName = CString(binInfo.filePathName); - methodName = CString(binInfo.symbolName); - } - writer_->WriteNumber(frames[&frame]); - writer_->WriteChar(','); - writer_->WriteNumber(LookupStringId(methodName.Str())); - writer_->WriteChar(','); - writer_->WriteNumber(LookupStringId(fileName.Str())); - writer_->WriteChar(','); - writer_->WriteNumber(reinterpret_cast(lineNumber)); -} - - -void ArkHeapDataForIDE::SerializeStackTrace() -{ - for (auto trace = stacktraces.begin(); trace != stacktraces.end(); trace++) { - auto env = std::getenv("DumpStackDepth"); - // 10: Default Limit Max Dump Depth as 10 frames - size_t size = CString::ParseNumFromEnv(env) == 0 ? 10 : CString::ParseNumFromEnv(env); - size_t depth = trace->first->stacks.size() > size ? size : trace->first->stacks.size(); - std::vector stack = trace->first->stacks; - writer_->WriteString("\\\"STACKFRAME\\\":["); - for (size_t i = 0; i < depth; ++i) { - SerializeStackFrame(*stack[i], i); - } - writer_->WriteChar(']'); - writer_->WriteChar(','); - writer_->WriteString("\\\"STACKTRACE\\\":["); - writer_->WriteNumber(trace->second); - writer_->WriteChar(','); - writer_->WriteNumber(trace->first->GetStackTid()); - writer_->WriteChar(','); - writer_->WriteNumber(depth); - writer_->WriteChar(','); - writer_->WriteChar('['); - for (size_t i = 0; i < depth; ++i) { - writer_->WriteNumber(frames[stack[i]]); - } - writer_->WriteChar(']'); - } - writer_->WriteChar(']'); - writer_->WriteChar(','); -} - -void ArkHeapDataForIDE::SerializeStartThread() -{ - writer_->WriteString("\\\"STARTTHREAD\\\":["); - for (auto trace = stacktraces.begin(); trace != stacktraces.end(); trace++) { - writer_->WriteNumber(trace->first->GetStackTid()); - writer_->WriteChar(','); - writer_->WriteNumber(GetId(threadObjectId++)); - writer_->WriteChar(','); - writer_->WriteNumber(trace->second); - writer_->WriteChar(','); - CString threadNameAll = trace->first->GetThreadName(); - writer_->WriteNumber(GetId(LookupStringId(threadNameAll))); - } - writer_->WriteChar(']'); - writer_->WriteChar(','); -} - -void ArkHeapDataForIDE::SerializeHeapDump() -{ - SerializeAllClass(); - SerializeAllStructClass(); - SerializeAllObjects(); -} - -void ArkHeapDataForIDE::SerializeAllObjects() -{ - writer_->WriteString("\\\"OBJECTS\\\":["); - bool isFirstElement = true; - for (auto objectInfo : dumpObjects) { - if (!isFirstElement) { - writer_->WriteChar(','); - } else { - isFirstElement = false; - } - switch (objectInfo.tag) { - case TAG_ROOT_THREAD_OBJECT: - SerializeThreadObjectRoot(objectInfo.obj, objectInfo.tag, objectInfo.threadId, 0); - break; - case TAG_ROOT_LOCAL: - SerializeLocalRoot(objectInfo.obj, objectInfo.tag, objectInfo.threadId, objectInfo.frameNum); - break; - case TAG_ROOT_GLOBAL: - SerializeGlobalRoot(objectInfo.obj, objectInfo.tag); - break; - case TAG_ROOT_UNKNOWN: - SerializeUnknownRoot(objectInfo.obj, objectInfo.tag); - break; - case TAG_OBJECT_ARRAY_DUMP: - SerializeObjectArray(objectInfo.obj, objectInfo.tag); - break; - case TAG_STRUCT_ARRAY_DUMP: - SerializeStructArray(objectInfo.obj, objectInfo.tag); - break; - case TAG_PRIMITIVE_ARRAY_DUMP: - SerializePrimitiveArray(objectInfo.obj, objectInfo.tag); - break; - case TAG_INSTANCE_DUMP: - SerializeInstance(objectInfo.obj, objectInfo.tag); - break; - default: - break; - } - } - writer_->WriteChar(']'); -} - -/* - * Record Global Root Info: - * u1 tag; //denoting the type of this sub-record - * ID objId; // object ID - */ -void ArkHeapDataForIDE::SerializeGlobalRoot(BaseObject*& obj, const u1 tag) -{ - writer_->WriteChar('['); - writer_->WriteNumber(tag); - writer_->WriteChar(','); - ArkHeapDataID objAddr = (reinterpret_cast(obj)); - u4 id = GetId(objAddr); - CString name = obj->GetTypeInfo()->GetName(); - writer_->WriteNumber(id); - writer_->WriteChar(']'); -} - -/* - * Record Unknown Root Info: - * u1 tag; // denoting the type of this sub-record - * ID objId; // object ID - */ -void ArkHeapDataForIDE::SerializeUnknownRoot(BaseObject*& obj, const u1 tag) -{ - writer_->WriteChar('['); - writer_->WriteNumber(tag); - writer_->WriteChar(','); - ArkHeapDataID objAddr = (reinterpret_cast(obj)); - u4 id = GetId(objAddr); - writer_->WriteNumber(id); - writer_->WriteChar(']'); -} - -/* - * Record Local Root Info: - * u1 tag; // denoting the type of this sub-record - * ID objId; // object ID - * u4 threadIdx; // thread serial number - * u4 frame; // frame number in stack trace (-1 for empty) - */ -void ArkHeapDataForIDE::SerializeLocalRoot(BaseObject*& obj, const u1 tag, const u4 tid, const u1 depth) -{ - writer_->WriteChar('['); - writer_->WriteNumber(tag); - writer_->WriteChar(','); - ArkHeapDataID objAddr = (reinterpret_cast(obj)); - u4 id = GetId(objAddr); - writer_->WriteNumber(id); - writer_->WriteChar(','); - writer_->WriteNumber(tid); - writer_->WriteChar(','); - writer_->WriteNumber(depth); - writer_->WriteChar(']'); -} - -/* - * Record Thread Object Root Info: - * u1 tag; // denoting the type of this sub-record - * ID threadObjId; // thread object ID - * u4 threadIdx; // thread serial number - * u4 stackTraceIdx; // stack trace serial number - * - */ -void ArkHeapDataForIDE::SerializeThreadObjectRoot(BaseObject*& obj, const u1 tag, const u4 tid, const u4 stackTraceIdx) -{ - writer_->WriteChar('['); - writer_->WriteNumber(tag); - writer_->WriteChar(','); - ArkHeapDataID objAddr = (reinterpret_cast(obj)); - u4 id = GetId(objAddr); - writer_->WriteNumber(id); - writer_->WriteChar(','); - writer_->WriteNumber(tid); - writer_->WriteChar(','); - writer_->WriteNumber(stackTraceIdx); - writer_->WriteChar(']'); -} - -/* - * Record Class Info: - * u1 tag; // denoting the type of this sub-record - * ID classObjId; // class object ID - * u4 size; // instance size (in bytes) - */ -void ArkHeapDataForIDE::SerializeClass(TypeInfo* klass, ArkHeapDataStringId klassId, const u1 tag) -{ - writer_->WriteChar('['); - writer_->WriteNumber(tag); - writer_->WriteChar(','); - writer_->WriteNumber(GetId(reinterpret_cast(klass))); - writer_->WriteChar(','); - // 8-byte alignment - if (!klass->IsObjectType()) { - writer_->WriteNumber(0); - writer_->WriteChar(']'); - return; - } - u4 size = AlignUp((klass->GetInstanceSize() + sizeof(TypeInfo*)), alignment); - // 8 bytes for each field - writer_->WriteNumber(size); - writer_->WriteChar(']'); -} - -/* - * Record Object Array Info: - * u1 tag; // denoting the type of this sub-record - * ID arrObjId; // array object ID - * u4 num; // number of elements - * ID arrClassObjId; // array class object ID - * ID elements[num]; // elements - * - */ -void ArkHeapDataForIDE::SerializeObjectArray(BaseObject*& obj, const u1 tag) -{ - writer_->WriteChar('['); - writer_->WriteNumber(tag); - writer_->WriteChar(','); - ArkHeapDataID objAddr = (reinterpret_cast(obj)); - u4 id = GetId(objAddr); - writer_->WriteNumber(id); - writer_->WriteChar(','); - u4 num = 0; - std::stack VAL; - RefFieldVisitor visitor = [&VAL, &num, this](RefField<>& arrayContent) { - VAL.push(GetId(reinterpret_cast(arrayContent.GetTargetObject()))); - num++; - }; - // take array length and content. - MArray* mArray = reinterpret_cast(obj); - MIndex arrayLengthVal = mArray->GetLength(); - RefField<>* arrayContent = reinterpret_cast*>(mArray->ConvertToCArray()); - // for each object in array. - for (MIndex i = 0; i < arrayLengthVal; ++i) { - visitor(arrayContent[i]); - } - writer_->WriteNumber(num); - writer_->WriteChar(','); - writer_->WriteNumber(GetId(reinterpret_cast(obj->GetTypeInfo()))); - writer_->WriteChar(','); - writer_->WriteChar('['); - bool isFirstElement = true; - while (!VAL.empty()) { - if (!isFirstElement) { - writer_->WriteChar(','); - } else { - isFirstElement = false; - } - u4 val = VAL.top(); - VAL.pop(); - writer_->WriteNumber(val); - } - writer_->WriteChar(']'); - writer_->WriteChar(']'); -} - -/* - * Record struct Array Info: - * u1 tag; // denoting the type of this sub-record - * ID arrObjId; // array object ID - * u4 componentNum; // component Num - * u4 num; // number of ref fields - * ID arrClassObjId; // array class object ID - * ID elements[num]; // elements - * - */ - -void ArkHeapDataForIDE::SerializeStructArray(BaseObject*& obj, const u1 tag) -{ - writer_->WriteChar('['); - writer_->WriteNumber(tag); - writer_->WriteChar(','); - ArkHeapDataID objAddr = (reinterpret_cast(obj)); - u4 id = GetId(objAddr); - writer_->WriteNumber(id); - writer_->WriteChar(','); - u4 num = 0; - std::stack VAL; - RefFieldVisitor visitor = [&VAL, &num, this](RefField<>& arrayContent) { - VAL.push(GetId(reinterpret_cast(arrayContent.GetTargetObject()))); - num++; - }; - // take array length and content. - MArray* mArray = reinterpret_cast(obj); - MIndex arrayLengthVal = mArray->GetLength(); - TypeInfo* componentTypeInfo = mArray->GetComponentTypeInfo(); - GCTib gcTib = componentTypeInfo->GetGCTib(); - HeapAddress contentAddr = reinterpret_cast(mArray) + MArray::GetContentOffset(); - for (MIndex i = 0; i < arrayLengthVal; ++i) { - gcTib.ForEachBitmapWord(contentAddr, visitor); - contentAddr += mArray->GetElementSize(); - } - writer_->WriteNumber(arrayLengthVal); - writer_->WriteChar(','); - writer_->WriteNumber(num); - writer_->WriteChar(','); - writer_->WriteNumber(GetId(reinterpret_cast(obj->GetTypeInfo()))); - writer_->WriteChar(','); - writer_->WriteChar('['); - bool isFirstElement = true; - while (!VAL.empty()) { - if (!isFirstElement) { - writer_->WriteChar(','); - } else { - isFirstElement = false; - } - u4 val = VAL.top(); - VAL.pop(); - writer_->WriteNumber(val); - } - writer_->WriteChar(']'); - writer_->WriteChar(']'); -} - -/* - * Record Primitive Array Info: - * u1 tag; // denoting the type of this sub-record - * ID arrObjId; // array object - * u4 num; // number of elements - * u1 type; // element type - */ - -void ArkHeapDataForIDE::SerializePrimitiveArray(BaseObject*& obj, const u1 tag) -{ - writer_->WriteChar('['); - writer_->WriteNumber(tag); - writer_->WriteChar(','); - ArkHeapDataID objAddr = (reinterpret_cast(obj)); - u4 id = GetId(objAddr); - writer_->WriteNumber(id); - writer_->WriteChar(','); - MArray* mArray = reinterpret_cast(obj); - writer_->WriteNumber(mArray->GetLength()); - writer_->WriteChar(','); - uint32_t ComponentSize = obj->GetTypeInfo()->GetComponentSize(); - switch (ComponentSize) { - // bool:1 bytes - case 1: - writer_->WriteNumber(BOOLEAN); - break; - // short:2 bytes - case 2: - writer_->WriteNumber(SHORT); - break; - // int:4 bytes - case 4: - writer_->WriteNumber(INT); - break; - // long:8 bytes - case 8: - writer_->WriteNumber(LONG); - break; - default: - break; - } - writer_->WriteChar(']'); -} - -/* - * Record Struct Class Info: - * u1 tag; // denoting the type of this sub-record - * ID classObjId; // class object ID - * u4 size; // instance size (in bytes) - */ -void ArkHeapDataForIDE::SerializeStructClass(TypeInfo* klass, ArkHeapDataStringId klassId, const u1 tag) -{ - writer_->WriteChar('['); - writer_->WriteNumber(tag); - writer_->WriteChar(','); - writer_->WriteNumber(GetId(reinterpret_cast(klass))); - TypeInfo* componentKlass = klass->GetComponentTypeInfo(); - // No alignment required - u4 size = componentKlass->GetInstanceSize(); - writer_->WriteChar(','); - writer_->WriteNumber(size); - writer_->WriteChar(']'); -} - -/* - * Record Instance Info: - * u1 tag; // denoting the type of this sub-record - * ID objId; // object ID - * ID classObjId; // class object ID - * u4 num; // number of ref fields - * VAL entry[]; // ref contents in instance field values (this class, followed by super class, etc) - */ -void ArkHeapDataForIDE::SerializeInstance(BaseObject*& obj, const u1 tag) -{ - writer_->WriteChar('['); - writer_->WriteNumber(tag); - writer_->WriteChar(','); - u4 id = GetId(reinterpret_cast(obj)); - writer_->WriteNumber(id); - writer_->WriteChar(','); - writer_->WriteNumber(GetId(reinterpret_cast(obj->GetTypeInfo()))); - writer_->WriteChar(','); - u4 num = 0; - std::stack VAL; - RefFieldVisitor visitor = [&VAL, &num, this](RefField<>& fieldAddr) { - VAL.push(GetId(reinterpret_cast(fieldAddr.GetTargetObject()))); - num++; - }; - TypeInfo* currentClass = obj->GetTypeInfo(); - if (obj->HasRefField()) { - GCTib gcTib = currentClass->GetGCTib(); - HeapAddress objAddr = reinterpret_cast(obj) + sizeof(TypeInfo*); - gcTib.ForEachBitmapWord(objAddr, visitor); - } - writer_->WriteNumber(num); - writer_->WriteChar(','); - writer_->WriteChar('['); - bool isFirstElement = true; - while (!VAL.empty()) { - if (!isFirstElement) { - writer_->WriteChar(','); - } else { - isFirstElement = false; - } - u4 val = VAL.top(); - VAL.pop(); - writer_->WriteNumber(val); - } - writer_->WriteChar(']'); - writer_->WriteChar(']'); -} - -ArkHeapData::u4 ArkHeapDataForIDE::GetId(ArkHeapDataStringId klassId) -{ - if (stringIdxMap_.find(klassId) == stringIdxMap_.end()) { - stringIdxMap_[klassId] = ++stringIdx; - } - return stringIdxMap_[klassId]; -} - -} diff --git a/common_components/common_runtime/src/inspector/heap_snapshot_json_serializer.h b/common_components/common_runtime/src/inspector/heap_snapshot_json_serializer.h deleted file mode 100755 index 774615954a2edb69c09fa987eb59b541b5230043..0000000000000000000000000000000000000000 --- a/common_components/common_runtime/src/inspector/heap_snapshot_json_serializer.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ARK_HEAP_SNAPSHOT_JSON_SERIALIZER_H -#define ARK_HEAP_SNAPSHOT_JSON_SERIALIZER_H -#include -#include -#include -#include "common_components/common_runtime/src/inspector/file_stream.h" -#include "common_components/common_runtime/src/base/c_string.h" -#include "common_components/common_runtime/src/inspector/heap_data.h" -#include "securec.h" - -namespace panda { -class ArkHeapDataForIDE : public ArkHeapData { -public: - using HeapDataIDForIDE = u4; - bool Serialize(); - void SerializeFixedHeader(); - void SerializeString(); - void SerializeRecordHeader(const u1 tag, const u4 time); - void SerializeAllClassLoad(); - void SerializeAllStructClassLoad(); - void SerializeAllClass(); - void SerializeAllStructClass(); - void SerializeStackTrace(); - void SerializeStartThread(); - void SerializeHeapDump(); - void SerializeClassLoad(TypeInfo* klass, ArkHeapDataStringId klassId, const u1 tag); - void SerializeAllObjects(); - void SerializeGlobalRoot(BaseObject*& obj, const u1 tag); - void SerializeUnknownRoot(BaseObject*& obj, const u1 tag); - void SerializeLocalRoot(BaseObject*& obj, const u1 tag, const u4 tid, const u1 depth); - void SerializeThreadObjectRoot(BaseObject*& obj, const u1 tag, const u4 tid, const u4 stackTraceIdx); - void SerializeObjectArray(BaseObject*& obj, const u1 tag); - void SerializeStructArray(BaseObject*& obj, const u1 tag); - void SerializePrimitiveArray(BaseObject*& obj, const u1 tag); - void SerializeInstance(BaseObject*& obj, const u1 tag); - void SerializeClass(TypeInfo* klass, ArkHeapDataStringId klassId, const u1 tag); - void SerializeStructClass(TypeInfo* klass, ArkHeapDataStringId klassId, const u1 tag); - u4 GetId(ArkHeapDataStringId klassId); -private: - StreamWriter* writer_ = nullptr; - std::unordered_map stringIdxMap_; - u4 stringIdx_ = 0; -}; -} -#endif diff --git a/common_components/common_runtime/src/inspector/profiler_agent_impl.cpp b/common_components/common_runtime/src/inspector/profiler_agent_impl.cpp deleted file mode 100755 index 2ef17939577c8851d12f9344357c5ea8713f94aa..0000000000000000000000000000000000000000 --- a/common_components/common_runtime/src/inspector/profiler_agent_impl.cpp +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "common_components/common_runtime/src/heap/heap.h" -#include "common_components/common_runtime/src/heap/collector/task_queue.h" -#include "common_components/common_runtime/src/heap/collector/collector_resources.h" -#include "common_components/common_runtime/src/heap/collector/gc_request.h" -#include "common_components/common_runtime/src/inspector/file_stream.h" -#include "common_components/common_runtime/src/inspector/alloc_data.h" -#include "common_components/common_runtime/src/heap/allocator/region_info.h" -#include "common_components/common_runtime/src/heap/allocator/alloc_buffer.h" -#include "common_components/common_runtime/src/inspector/profiler_agent_impl.h" -namespace panda { -int EnableAllocRecord(bool enable) -{ - panda::ArkAllocData::GetArkAllocData()->SetRecording(enable); - if (enable) { - panda::ArkAllocData::SetArkAllocData(); - } else { - panda::ArkAllocData::GetArkAllocData()->DeleteArkAllocData(); - } - return 0; -} -std::string ParseDomain(const std::string &message) -{ - std::string key = "\"id\":"; - size_t startPos = message.find(key); - if (startPos == std::string::npos) { - return ""; - } - startPos += key.length(); - - while (startPos < message.length() && std::isspace(message[startPos])) { - ++startPos; - } - - if (message[startPos] == '"') { - size_t endPos = message.find('"', startPos + 1); - if (endPos == std::string::npos) { - return ""; - } - return message.substr(startPos + 1, endPos - startPos - 1); - } else { - size_t endPos = message.find(',', startPos); - if (endPos == std::string::npos) { - endPos = message.find('}', startPos); - } - if (endPos == std::string::npos) { - return ""; - } - return message.substr(startPos, endPos - startPos); - } -} - -void SetEnd(const std::string &message, panda::MsgType type) -{ - panda::HeapProfilerStream* stream = &panda::HeapProfilerStream::GetInstance(); - stream->SetContext(type); - panda::StreamWriter* writer = new panda::StreamWriter(stream); - writer->WriteString("{\"id\":"); - writer->WriteString(panda::HeapProfilerStream::GetInstance().GetMessageID()); - writer->WriteString(",\"result\":{}"); - writer->End(); - delete writer; -} - -void DumpHeapSnapshot(SendMsgCB sendMsg) -{ - panda::Heap::GetHeap().GetCollectorResources().RequestHeapDump( - panda::GCTask::TaskType::GC_TASK_DUMP_HEAP_IDE); -} - -void StartTrackingHeapObjects(const std::string &message, SendMsgCB sendMsg) -{ - SetEnd(message, panda::MsgType::END); - EnableAllocRecord(true); -} - -void StopTrackingHeapObjects(const std::string &message, SendMsgCB sendMsg) -{ - EnableAllocRecord(false); - SetEnd(message, panda::MsgType::END); -} - -void CollectGarbage(const std::string &message, SendMsgCB sendMsg) -{ - panda::Heap::GetHeap().GetCollectorResources().RequestGC(panda::GC_REASON_HEU, false); - SetEnd(message, panda::MsgType::END); -} - -void DisableCollect(const std::string &message, SendMsgCB sendMsg) -{ - SetEnd(message, panda::MsgType::DISABLE); -} - -void GetHeapUsage(const std::string &message, SendMsgCB sendMsg) -{ - panda::HeapProfilerStream* stream = &panda::HeapProfilerStream::GetInstance(); - stream->SetContext(panda::MsgType::HEAPUSAGE); - panda::StreamWriter* writer = new panda::StreamWriter(stream); - writer->WriteString("{\"id\":"); - writer->WriteString(panda::HeapProfilerStream::GetInstance().GetMessageID()); - writer->WriteString(",\"result\":{\"usedSize\":"); - ssize_t allocatedSize = panda::Heap::GetHeap().GetAllocatedSize(); - writer->WriteNumber(allocatedSize); - writer->WriteString(",\"totalSize\":"); - ssize_t totalSize = panda::Heap::GetHeap().GetMaxCapacity(); - writer->WriteNumber(totalSize); - writer->WriteString("}"); - writer->End(); - delete writer; -} - -void ProfilerAgentImpl(const std::string &message, SendMsgCB sendMsg) -{ - panda::HeapProfilerStream::GetInstance().SetHandler(sendMsg); - panda::HeapProfilerStream::GetInstance().SetMessageID(message); - if (message.find("takeHeapSnapshot", 0) != std::string::npos) { - DumpHeapSnapshot(sendMsg); - } else if (message.find("startTrackingHeapObjects", 0) != std::string::npos) { - StartTrackingHeapObjects(message, sendMsg); - } else if (message.find("stopTrackingHeapObjects", 0) != std::string::npos) { - StopTrackingHeapObjects(message, sendMsg); - } else if (message.find("disable", 0) != std::string::npos) { - DisableCollect(message, sendMsg); - } else if (message.find("collectGarbage", 0) != std::string::npos) { - CollectGarbage(message, sendMsg); - } else if (message.find("getHeapUsage", 0) != std::string::npos) { - GetHeapUsage(message, sendMsg); - } else { - LOG_COMMON(ERROR) << "invaild request\n"; - } -} -} \ No newline at end of file diff --git a/common_components/base_runtime/tests/BUILD.gn b/common_components/common_runtime/tests/BUILD.gn similarity index 100% rename from common_components/base_runtime/tests/BUILD.gn rename to common_components/common_runtime/tests/BUILD.gn diff --git a/common_components/base_runtime/tests/base_runtime_test.cpp b/common_components/common_runtime/tests/base_runtime_test.cpp similarity index 83% rename from common_components/base_runtime/tests/base_runtime_test.cpp rename to common_components/common_runtime/tests/base_runtime_test.cpp index b01820ddcc54bd0d4ca4384f4eea218e24d9794f..033020630bd103a769be2fc6f04edd80800821a5 100644 --- a/common_components/base_runtime/tests/base_runtime_test.cpp +++ b/common_components/common_runtime/tests/base_runtime_test.cpp @@ -36,7 +36,7 @@ HWTEST_F_L0(BaseRuntimeTest, RequestGC_Test1) { BaseRuntime* runtime = BaseRuntime::GetInstance(); ASSERT_TRUE(runtime != nullptr); - runtime->RequestGC(static_cast(-1)); + runtime->RequestGC(static_cast(-1), false, static_cast(-1)); BaseObject obj; RefField field(reinterpret_cast(&obj)); @@ -92,19 +92,25 @@ HWTEST_F_L0(BaseRuntimeTest, RequestGC_Sync_CallsHeapManager) { auto* runtime = BaseRuntime::GetInstance(); ASSERT_NE(runtime, nullptr); runtime->Init(); - const GcType allTypes[] = { - GcType::SYNC, - GcType::ASYNC, - GcType::FULL, - GcType::APPSPAWN + struct TestCase { + GCReason reason; + bool async; + GCType gcType; + }; + + const std::vector testCases = { + {GC_REASON_USER, false, GC_TYPE_FULL}, + {GC_REASON_USER, true, GC_TYPE_FULL}, + {GC_REASON_BACKUP, false, GC_TYPE_FULL}, + {GC_REASON_APPSPAWN, false, GC_TYPE_FULL} }; - for (GcType type : allTypes) { + for (TestCase tc : testCases) { testing::internal::CaptureStderr(); - EXPECT_NO_FATAL_FAILURE(runtime->RequestGC(type)); + EXPECT_NO_FATAL_FAILURE(runtime->RequestGC(tc.reason, tc.async, tc.gcType)); std::string output = testing::internal::GetCapturedStderr(); - EXPECT_TRUE(output.empty()) << "GC Type " << static_cast(type) + EXPECT_TRUE(output.empty()) << "GC reason " << static_cast(tc.reason) << " produced unexpected stderr output."; } diff --git a/common_components/heap/allocator/alloc_buffer.h b/common_components/heap/allocator/alloc_buffer.h index bfe35c47c4c4662fedc49ba66c82905a53c3a52f..3d8a8f9af1f69ff411871e02ea19b4e46d145ab9 100755 --- a/common_components/heap/allocator/alloc_buffer.h +++ b/common_components/heap/allocator/alloc_buffer.h @@ -18,10 +18,18 @@ #include +#include "common_components/mutator/thread_local.h" #include "common_components/heap/allocator/region_list.h" #include "common_components/common/mark_work_stack.h" namespace common { + +enum class AllocBufferType: uint8_t { + YOUNG = 0, // for young space + OLD, // for old space + TO // for to space, valid only dring GC copy/fix phase and will become old-space later +}; + // thread-local data structure class AllocationBuffer { public: @@ -30,21 +38,56 @@ public: void Init(); static AllocationBuffer* GetOrCreateAllocBuffer(); static AllocationBuffer* GetAllocBuffer(); - HeapAddress ToSpaceAllocate(size_t size, AllocType allocType); + HeapAddress ToSpaceAllocate(size_t size); HeapAddress Allocate(size_t size, AllocType allocType); - RegionDesc* GetRegion() { return tlRegion_; } + + template + RegionDesc* GetRegion() + { + if constexpr (type == AllocBufferType::YOUNG) { + return tlRegion_; + } else if constexpr (type == AllocBufferType::OLD) { + return tlOldRegion_; + } else if constexpr (type == AllocBufferType::TO) { + return tlToRegion_; + } + } + + template + void SetRegion(RegionDesc* newRegion) + { + if constexpr (type == AllocBufferType::YOUNG) { + tlRegion_ = newRegion; + } else if constexpr (type == AllocBufferType::OLD) { + tlOldRegion_ = newRegion; + } else if constexpr (type == AllocBufferType::TO) { + tlToRegion_ = newRegion; + } + } + RegionDesc* GetPreparedRegion() { return preparedRegion_.load(std::memory_order_acquire); } - void SetRegion(RegionDesc* newRegion) { tlRegion_ = newRegion; } + + template inline void ClearRegion() { - if (tlRegion_ == RegionDesc::NullRegion()) { - return; + if constexpr (type == AllocBufferType::YOUNG) { + tlRegion_ = RegionDesc::NullRegion(); + } else if constexpr (type == AllocBufferType::OLD) { + tlOldRegion_ = RegionDesc::NullRegion(); + } else if constexpr (type == AllocBufferType::TO) { + tlToRegion_ = RegionDesc::NullRegion(); } - DLOG(REGION, "AllocBuffer clear tlRegion %p@%#zx+%zu", - tlRegion_, tlRegion_->GetRegionStart(), tlRegion_->GetRegionAllocatedSize()); - tlRegion_ = RegionDesc::NullRegion(); } + + inline void ClearRegions() + { + ClearRegion(); + ClearRegion(); + ClearRegion(); + } + void ClearThreadLocalRegion(); + void Unregister(); bool SetPreparedRegion(RegionDesc* newPreparedRegion) { @@ -72,11 +115,43 @@ public: stackRoots_.clear(); } + template + HeapAddress FastAllocateInTlab(size_t size) + { + if constexpr (allocType == AllocBufferType::YOUNG) { + if (LIKELY_CC(tlRegion_ != RegionDesc::NullRegion())) { + return tlRegion_->Alloc(size); + } + } else if constexpr (allocType == AllocBufferType::OLD) { + if (LIKELY_CC(tlOldRegion_ != RegionDesc::NullRegion())) { + return tlOldRegion_->Alloc(size); + } + } + return 0; + } + + // Allocation buffer is thread local, but held in multiple mutators per thread. + // RefCount records how many mutators holds this allocbuffer. + void IncreaseRefCount() + { + refCount_++; + } + + bool DecreaseRefCount() + { + return --refCount_ <= 0; + } + static constexpr size_t GetTLRegionOffset() { return offsetof(AllocationBuffer, tlRegion_); } + static constexpr size_t GetTLOldRegionOffset() + { + return offsetof(AllocationBuffer, tlOldRegion_); + } + private: // slow path HeapAddress TryAllocateOnce(size_t totalSize, AllocType allocType); @@ -85,13 +160,16 @@ private: // tlRegion in AllocBuffer is a shortcut for fast allocation. // we should handle failure in RegionManager - RegionDesc* tlRegion_ = RegionDesc::NullRegion(); + RegionDesc* tlRegion_ = RegionDesc::NullRegion(); // managed by young-space + RegionDesc* tlOldRegion_ = RegionDesc::NullRegion(); // managed by old-space + // only used in ToSpaceAllocate for GC copy + RegionDesc* tlToRegion_ = RegionDesc::NullRegion(); // managed by to-space std::atomic preparedRegion_ = { nullptr }; // allocate objects which are exposed to runtime thus can not be moved. // allocation context is responsible to notify collector when these objects are safe to be collected. RegionList tlRawPointerRegions_; - + int64_t refCount_ { 0 }; // Record stack roots in concurrent enum phase, waiting for GC to merge these roots std::list stackRoots_; diff --git a/common_components/heap/allocator/alloc_buffer_manager.h b/common_components/heap/allocator/alloc_buffer_manager.h index 3f8b45fcbb10bc7ce0510314f24ae69f22fb837e..e1be55ee318465ea3838f631846dd0a31e2fb831 100755 --- a/common_components/heap/allocator/alloc_buffer_manager.h +++ b/common_components/heap/allocator/alloc_buffer_manager.h @@ -49,6 +49,13 @@ public: allocBufferLock_.Unlock(); } + void UnregisterAllocBuffer(AllocationBuffer& buffer) + { + allocBufferLock_.Lock(); + allocBuffers_.erase(&buffer); + allocBufferLock_.Unlock(); + } + template void VisitAllocBuffers(const AllocBufferVisitor& visitor) { diff --git a/common_components/heap/allocator/allocator.cpp b/common_components/heap/allocator/allocator.cpp index 6eb7667f306b1ae166b71ea05fa130e70f29bab2..9791d8a1049970b3ddca4c9b3dc164443d4d1c9f 100755 --- a/common_components/heap/allocator/allocator.cpp +++ b/common_components/heap/allocator/allocator.cpp @@ -19,7 +19,7 @@ #include "common_components/base/immortal_wrapper.h" #include "common_components/common/base_object.h" -#include "common_components/heap/allocator/region_space.h" +#include "common_components/heap/allocator/regional_heap.h" #include "common_components/mutator/thread_local.h" namespace common { @@ -80,8 +80,8 @@ PagePool& PagePool::Instance() noexcept Allocator* Allocator::CreateAllocator() { - RegionSpace* heapSpace = new (std::nothrow) RegionSpace(); - LOGF_CHECK(heapSpace != nullptr) << "New RegionSpace failed"; + RegionalHeap* heapSpace = new (std::nothrow) RegionalHeap(); + LOGF_CHECK(heapSpace != nullptr) << "New RegionalHeap failed"; return heapSpace; } } // namespace common diff --git a/common_components/heap/allocator/allocator.h b/common_components/heap/allocator/allocator.h index 2ea7ddb6f2890627660b3a33593e482a32059e6a..841203ea95189886cbe6b57555fbbf032a2afed2 100755 --- a/common_components/heap/allocator/allocator.h +++ b/common_components/heap/allocator/allocator.h @@ -44,11 +44,18 @@ public: // allocated bytes for large objects are included. virtual size_t GetAllocatedBytes() const = 0; + virtual size_t GetSurvivedSize() const = 0; + inline void RegisterAllocBuffer(AllocationBuffer& buffer) const { allocBufferManager_->RegisterAllocBuffer(buffer); } + inline void UnregisterAllocBuffer(AllocationBuffer& buffer) const + { + allocBufferManager_->UnregisterAllocBuffer(buffer); + } + virtual ~Allocator() {} Allocator(); diff --git a/common_components/heap/allocator/fix_heap.cpp b/common_components/heap/allocator/fix_heap.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ff769ce1634825b4010584bed40f3ef85fffe0e8 --- /dev/null +++ b/common_components/heap/allocator/fix_heap.cpp @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/heap/allocator/fix_heap.h" + +#include "common_components/heap/ark_collector/ark_collector.h" +#include "common_runtime/hooks.h" + +namespace common { + +void FixHeapWorker::CollectFixHeapTasks(FixHeapTaskList &taskList, RegionList &list, FixRegionType type) +{ + list.VisitAllRegions([&taskList, type](RegionDesc *region) { taskList.emplace_back(region, type); }); +} + +void FixHeapWorker::FixOldRegion(RegionDesc *region) +{ + auto visitFunc = [this, ®ion](BaseObject *object) { + DLOG(FIX, "fix: old obj %p<%p>(%zu)", object, object->GetTypeInfo(), object->GetSize()); + collector_->FixObjectRefFields(object); + }; + region->VisitRememberSet(visitFunc); +} + +void FixHeapWorker::FixRecentOldRegion(RegionDesc *region) +{ + auto visitFunc = [this, ®ion](BaseObject *object) { + DLOG(FIX, "fix: old obj %p<%p>(%zu)", object, object->GetTypeInfo(), object->GetSize()); + collector_->FixObjectRefFields(object); + }; + region->VisitRememberSetBeforeCopy(visitFunc); +} + +void FixHeapWorker::FixToRegion(RegionDesc *region) +{ + region->VisitAllObjects([this](BaseObject *object) { collector_->FixObjectRefFields(object); }); +} + +template +void FixHeapWorker::FixRegion(RegionDesc *region) +{ + size_t cellCount = 0; + if constexpr (type == FixHeapWorker::COLLECT_MONOSIZE_NONMOVABLE) { + cellCount = region->GetRegionCellCount(); + } + + region->VisitAllObjects([this, region, cellCount](BaseObject *object) { + if (collector_->IsSurvivedObject(object)) { + collector_->FixObjectRefFields(object); + } else { + if constexpr (type == FixHeapWorker::FILL_FREE) { + FillFreeObject(object, RegionalHeap::GetAllocSize(*object)); + } else if constexpr (type == FixHeapWorker::COLLECT_MONOSIZE_NONMOVABLE) { + result_.monoSizeNonMovableGarbages.emplace_back(region, object, cellCount); + } else if constexpr (type == FixHeapWorker::COLLECT_POLYSIZE_NONMOVABLE) { + result_.polySizeNonMovableGarbages.emplace_back(object, RegionalHeap::GetAllocSize(*object)); + } else if constexpr (type == FixHeapWorker::IGNORED) { + /* Ignore */ + } + DLOG(FIX, "fix: skip dead obj %p<%p>(%zu)", object, object->GetTypeInfo(), object->GetSize()); + } + }); +} + +template +void FixHeapWorker::FixRecentRegion(RegionDesc *region) +{ + size_t cellCount = 0; + if constexpr (type == FixHeapWorker::COLLECT_MONOSIZE_NONMOVABLE) { + cellCount = region->GetRegionCellCount(); + } + + region->VisitAllObjectsBeforeCopy([this, region, cellCount](BaseObject *object) { + if (region->IsNewObjectSinceMarking(object) || collector_->IsSurvivedObject(object)) { + collector_->FixObjectRefFields(object); + } else { // handle dead objects in tl-regions for concurrent gc. + if constexpr (type == FixHeapWorker::FILL_FREE) { + FillFreeObject(object, RegionalHeap::GetAllocSize(*object)); + } else if constexpr (type == FixHeapWorker::COLLECT_MONOSIZE_NONMOVABLE) { + result_.monoSizeNonMovableGarbages.emplace_back(region, object, cellCount); + } else if constexpr (type == FixHeapWorker::COLLECT_POLYSIZE_NONMOVABLE) { + result_.polySizeNonMovableGarbages.emplace_back(object, RegionalHeap::GetAllocSize(*object)); + } else if constexpr (type == FixHeapWorker::IGNORED) { + /* Ignore */ + } + DLOG(FIX, "skip dead obj %p<%p>(%zu)", object, object->GetTypeInfo(), object->GetSize()); + } + }); +} + +bool FixHeapWorker::Run([[maybe_unused]] uint32_t threadIndex) +{ + ThreadLocal::SetThreadType(ThreadType::GC_THREAD); + auto *task = getNextTask_(); + while (task != nullptr) { + DispatchRegionFixTask(task); + task = getNextTask_(); + } + ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); + monitor_.NotifyFinishOne(); + return true; +} + +void FixHeapWorker::DispatchRegionFixTask(FixHeapTask *task) +{ + result_.numProcessedRegions += 1; + RegionDesc *region = task->region; + switch (task->type) { + case FIX_OLD_REGION: + FixOldRegion(region); + break; + case FIX_RECENT_OLD_REGION: + FixRecentOldRegion(region); + break; + case FIX_RECENT_REGION: + if (region->IsMonoSizeNonMovableRegion()) { + FixRecentRegion(region); + } else if (region->IsPolySizeNonMovableRegion()) { + FixRecentRegion(region); + } else if (region->IsLargeRegion()) { + FixRecentRegion(region); + } else { + FixRecentRegion(region); + } + break; + case FIX_REGION: + if (region->IsMonoSizeNonMovableRegion()) { + FixRegion(region); + } else if (region->IsPolySizeNonMovableRegion()) { + FixRegion(region); + } else if (region->IsLargeRegion()) { + FixRegion(region); + } else { + FixRegion(region); + } + break; + case FIX_TO_REGION: + FixToRegion(region); + break; + default: + UNREACHABLE(); + } +} + +std::stack> PostFixHeapWorker::emptyRegionsToCollect {}; + +void PostFixHeapWorker::PostClearTask() +{ + for (auto [region, object, cellCount] : result_.monoSizeNonMovableGarbages) { + region->CollectNonMovableGarbage(object, cellCount); + } + for (auto [object, size] : result_.polySizeNonMovableGarbages) { + FillFreeObject(object, size); + } + DLOG(FIX, "Fix heap worker processed %d Regions, %d monoSizeNonMovableGarbages, %d polySizeNonMovableGarbages", + result_.numProcessedRegions, result_.monoSizeNonMovableGarbages.size(), + result_.polySizeNonMovableGarbages.size()); +} + +bool PostFixHeapWorker::Run([[maybe_unused]] uint32_t threadIndex) +{ + ThreadLocal::SetThreadType(ThreadType::GC_THREAD); + PostClearTask(); + ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); + monitor_.NotifyFinishOne(); + return true; +} + +void PostFixHeapWorker::AddEmptyRegionToCollectDuringPostFix(RegionList *list, RegionDesc *region) +{ + PostFixHeapWorker::emptyRegionsToCollect.emplace(list, region); +} + +void PostFixHeapWorker::CollectEmptyRegions() +{ + RegionalHeap &theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + RegionManager ®ionManager = theAllocator.GetRegionManager(); + GCStats &stats = Heap::GetHeap().GetCollector().GetGCStats(); + size_t garbageSize = 0; + + while (!PostFixHeapWorker::emptyRegionsToCollect.empty()) { + auto [list, del] = PostFixHeapWorker::emptyRegionsToCollect.top(); + PostFixHeapWorker::emptyRegionsToCollect.pop(); + + list->DeleteRegion(del); + garbageSize += regionManager.CollectRegion(del); + } + stats.nonMovableGarbageSize += garbageSize; +} + +}; // namespace common diff --git a/common_components/heap/allocator/fix_heap.h b/common_components/heap/allocator/fix_heap.h new file mode 100644 index 0000000000000000000000000000000000000000..5275f73e77aa0ffba7d7d0f18e0d7e9b5ba2af3a --- /dev/null +++ b/common_components/heap/allocator/fix_heap.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef COMMON_COMPONENTS_HEAP_ALLOCATOR_FIX_HEAP_H +#define COMMON_COMPONENTS_HEAP_ALLOCATOR_FIX_HEAP_H + +#include +#include +#include +#include "common_components/taskpool/task.h" +namespace common { + +class ArkCollector; +class MarkingCollector; +class RegionDesc; +class RegionList; +class BaseObject; + +/** + * Enum representing different types of heap region fixing tasks + */ +enum FixRegionType { + FIX_OLD_REGION, // Fix all rset objects + FIX_RECENT_OLD_REGION, // Fix all rset objects before copyline + FIX_RECENT_REGION, // Fix all objects before copyline + FIX_REGION, // Fix all survived objects + FIX_TO_REGION, // Fix objects in to region +}; + +/** + * Task unit for parallel heap fixing operations + */ +struct FixHeapTask final { + RegionDesc *region; + FixRegionType type; + + FixHeapTask(RegionDesc *region, FixRegionType type) noexcept : region(region), type(type) {} + + // Explicitly delete copy + FixHeapTask(const FixHeapTask &) = delete; + FixHeapTask &operator=(const FixHeapTask &) = delete; + + // Default move operations + FixHeapTask(FixHeapTask &&) = default; + FixHeapTask &operator=(FixHeapTask &&) = default; +}; + +using FixHeapTaskList = std::vector; + +/** + * Worker class for parallel heap fixing operations + */ +class FixHeapWorker : public common::Task { +public: + /** + * Result structure containing the collected garbages and stats of heap fixing operations + */ + struct Result { + std::vector> monoSizeNonMovableGarbages; + std::vector> polySizeNonMovableGarbages; + size_t numProcessedRegions = 0; + }; + + FixHeapWorker(ArkCollector *collector, TaskPackMonitor &monitor, Result &result, + std::function &next) noexcept + : Task(0), collector_(collector), monitor_(monitor), result_(result), getNextTask_(next) + { + } + + /** + * Dispatches a region fixing task based on its type + */ + void DispatchRegionFixTask(FixHeapTask *task); + + bool Run([[maybe_unused]] uint32_t threadIndex) override; + + /** + * Collect fix heap tasks from a region list + */ + static void CollectFixHeapTasks(FixHeapTaskList &taskList, RegionList ®ionList, FixRegionType type); + +private: + /** + * Enum defining how to handle dead objects in a region + */ + enum DeadObjectHandlerType { + FILL_FREE, // Fill in free object immediately + COLLECT_MONOSIZE_NONMOVABLE, // Collect mono size non-movable objects (to be added to freelist) + COLLECT_POLYSIZE_NONMOVABLE, // Collect non-movable objects (to be filled free later) + IGNORED, // Ignore dead objects + }; + + void FixOldRegion(RegionDesc *region); + void FixRecentOldRegion(RegionDesc *region); + void FixToRegion(RegionDesc *region); + + template + void FixRegion(RegionDesc *region); + + template + void FixRecentRegion(RegionDesc *region); + + ArkCollector *collector_; + TaskPackMonitor &monitor_; + Result &result_; + std::function getNextTask_; +}; + +/** + * Worker class for collecting garbages units after heap fixing operations + */ +class PostFixHeapWorker : public common::Task { +public: + PostFixHeapWorker(FixHeapWorker::Result &result, TaskPackMonitor &monitor) noexcept + + : Task(0), monitor_(monitor), result_(result) + { + } + + /** + * Performs post-processing cleanup tasks + */ + void PostClearTask(); + + // During fix phase we also collect the entire empty regions into garbage list from non-movable region. + // However, we can only do it during post-fix because those region can contains metadata for getObjectSize + // Hence we cache empty regions in those two stack and duirng post fix we collect the region as garbage, + static std::stack> emptyRegionsToCollect; + static void AddEmptyRegionToCollectDuringPostFix(RegionList *list, RegionDesc *region); + static void CollectEmptyRegions(); + + bool Run([[maybe_unused]] uint32_t threadIndex) override; + +private: + TaskPackMonitor &monitor_; + FixHeapWorker::Result &result_; +}; + +}; // namespace common +#endif diff --git a/common_components/heap/allocator/memory_map.cpp b/common_components/heap/allocator/memory_map.cpp index 90420c26bf0ee0c8985de206ee929811910418d1..c9df91cfbc0d5ff3e96831e3a58ac03215bab291 100755 --- a/common_components/heap/allocator/memory_map.cpp +++ b/common_components/heap/allocator/memory_map.cpp @@ -23,7 +23,7 @@ #endif #include "common_components/platform/os.h" -#include "common_components/base_runtime/hooks.h" +#include "common_components/common_runtime/hooks.h" #include "common_components/base/sys_call.h" #include "common_components/log/log.h" @@ -81,7 +81,6 @@ MemoryMap* MemoryMap::MapMemory(size_t reqSize, size_t initSize, const Option& o MemoryMap* MemoryMap::MapMemoryAlignInner4G(uint64_t reqSize, uint64_t initSize, const Option& opt) { static constexpr uint64_t MAX_SUPPORT_CAPACITY = 4ULL * GB; - LOGF_CHECK(reqSize <= MAX_SUPPORT_CAPACITY) << "Max support capacity 4G"; void* mappedAddr = nullptr; reqSize = AllocUtilRndUp(reqSize, ALLOC_UTIL_PAGE_SIZE); diff --git a/common_components/heap/allocator/region_desc.h b/common_components/heap/allocator/region_desc.h index 380efeceb0b1ec20f2fb3b7f29da65070bc33b88..8fd719125431d84e18cae6514e6aeba73583be49 100755 --- a/common_components/heap/allocator/region_desc.h +++ b/common_components/heap/allocator/region_desc.h @@ -91,21 +91,24 @@ public: COPYED, }; - static const size_t UNIT_SIZE; // same as system page size + // default common region unit size. + static constexpr size_t UNIT_SIZE = 256 * KB; - // regarding a object as a large object when the size is greater than 8 units. - static const size_t LARGE_OBJECT_DEFAULT_THRESHOLD; + // threshold for object to unique a region + static constexpr size_t LARGE_OBJECT_DEFAULT_THRESHOLD = UNIT_SIZE * 2 / 3; // release a large object when the size is greater than 4096KB. static constexpr size_t LARGE_OBJECT_RELEASE_THRESHOLD = 4096 * KB; + static constexpr size_t DEFAULT_REGION_UNIT_MASK = RegionDesc::UNIT_SIZE - 1; + RegionDesc() { metadata.allocPtr = reinterpret_cast(nullptr); - metadata.traceLine = std::numeric_limits::max(); + metadata.markingLine = std::numeric_limits::max(); metadata.forwardLine = std::numeric_limits::max(); - metadata.fixLine = std::numeric_limits::max(); metadata.freeSlot = nullptr; + metadata.regionBase = reinterpret_cast(nullptr); metadata.regionStart = reinterpret_cast(nullptr); metadata.regionEnd = reinterpret_cast(nullptr); metadata.regionRSet = nullptr; @@ -119,9 +122,8 @@ public: void SetReadOnly() { constexpr int pageProtRead = 1; - DLOG(REPORT, "try to set readonly to %p, size is %ld", GetRegionStart(), GetRegionEnd() - GetRegionStart()); - if (PageProtect(reinterpret_cast(GetRegionStart()), - GetRegionEnd() - GetRegionStart(), pageProtRead) != 0) { + DLOG(REPORT, "try to set readonly to %p, size is %ld", GetRegionBase(), GetRegionBaseSize()); + if (PageProtect(reinterpret_cast(GetRegionBase()), GetRegionBaseSize(), pageProtRead) != 0) { DLOG(REPORT, "set read only fail"); } } @@ -129,131 +131,74 @@ public: void ClearReadOnly() { constexpr int pageProtReadWrite = 3; - DLOG(REPORT, "try to set read & write to %p, size is %ld", GetRegionStart(), GetRegionEnd() - GetRegionStart()); - if (PageProtect(reinterpret_cast(GetRegionStart()), - GetRegionEnd() - GetRegionStart(), pageProtReadWrite) != 0) { + DLOG(REPORT, "try to set read & write to %p, size is %ld", GetRegionBase(), GetRegionBaseSize()); + if (PageProtect(reinterpret_cast(GetRegionBase()), GetRegionBaseSize(), pageProtReadWrite) != 0) { DLOG(REPORT, "clear read only fail"); } } - RegionLiveDesc* GetLiveInfo() - { - RegionLiveDesc* liveInfo = __atomic_load_n(&metadata.liveInfo, std::memory_order_acquire); - if (reinterpret_cast(liveInfo) == RegionLiveDesc::TEMPORARY_PTR) { - return nullptr; - } - return liveInfo; - } - NO_INLINE RegionLiveDesc* AllocLiveInfo(RegionLiveDesc* liveInfo) - { - RegionLiveDesc* newValue = reinterpret_cast(RegionLiveDesc::TEMPORARY_PTR); - if (__atomic_compare_exchange_n(&metadata.liveInfo, &liveInfo, newValue, false, std::memory_order_seq_cst, - std::memory_order_relaxed)) { - RegionLiveDesc* allocatedLiveInfo = HeapBitmapManager::GetHeapBitmapManager().AllocateRegionLiveDesc(); - allocatedLiveInfo->relatedRegion = this; - __atomic_store_n(&metadata.liveInfo, allocatedLiveInfo, std::memory_order_release); - DLOG(REGION, "region %p@%#zx alloc liveinfo %p", this, GetRegionStart(), metadata.liveInfo); - return allocatedLiveInfo; - } - return nullptr; - } - ALWAYS_INLINE RegionLiveDesc* GetOrAllocLiveInfo() - { - do { - RegionLiveDesc* liveInfo = __atomic_load_n(&metadata.liveInfo, std::memory_order_acquire); - if (UNLIKELY_CC(reinterpret_cast(liveInfo) == RegionLiveDesc::TEMPORARY_PTR)) { - continue; - } - if (LIKELY_CC(liveInfo != nullptr)) { - return liveInfo; - } - liveInfo = AllocLiveInfo(liveInfo); - if (liveInfo != nullptr) { - return liveInfo; - } - } while (true); - return nullptr; - } - - RegionBitmap* GetMarkBitmap() + RegionBitmap *GetMarkBitmap() { - RegionLiveDesc* liveInfo = GetLiveInfo(); - if (liveInfo == nullptr) { - return nullptr; - } - RegionBitmap* bitmap = __atomic_load_n(&liveInfo->markBitmap, std::memory_order_acquire); + RegionBitmap *bitmap = __atomic_load_n(&metadata.liveInfo_.markBitmap_, std::memory_order_acquire); if (reinterpret_cast(bitmap) == RegionLiveDesc::TEMPORARY_PTR) { return nullptr; } return bitmap; } - NO_INLINE RegionBitmap* AllocMarkBitmap(RegionLiveDesc* liveInfo, RegionBitmap* bitmap) - { - RegionBitmap* newValue = reinterpret_cast(RegionLiveDesc::TEMPORARY_PTR); - if (__atomic_compare_exchange_n(&liveInfo->markBitmap, &bitmap, newValue, false, std::memory_order_seq_cst, - std::memory_order_relaxed)) { - RegionBitmap* allocated = - HeapBitmapManager::GetHeapBitmapManager().AllocateRegionBitmap(GetRegionSize()); - __atomic_store_n(&liveInfo->markBitmap, allocated, std::memory_order_release); - DLOG(REGION, "region %p@%#zx liveinfo %p alloc markbitmap %p", - this, GetRegionStart(), metadata.liveInfo, metadata.liveInfo->markBitmap); - return allocated; - } - return nullptr; - } ALWAYS_INLINE RegionBitmap* GetOrAllocMarkBitmap() { - RegionLiveDesc* liveInfo = GetOrAllocLiveInfo(); do { - RegionBitmap* bitmap = __atomic_load_n(&liveInfo->markBitmap, std::memory_order_acquire); + RegionBitmap *bitmap = __atomic_load_n(&metadata.liveInfo_.markBitmap_, std::memory_order_acquire); if (UNLIKELY_CC(reinterpret_cast(bitmap) == RegionLiveDesc::TEMPORARY_PTR)) { continue; } if (LIKELY_CC(bitmap != nullptr)) { return bitmap; } - bitmap = AllocMarkBitmap(liveInfo, bitmap); - if (bitmap != nullptr) { - return bitmap; + RegionBitmap *newValue = reinterpret_cast(RegionLiveDesc::TEMPORARY_PTR); + if (__atomic_compare_exchange_n(&metadata.liveInfo_.markBitmap_, &bitmap, newValue, false, + std::memory_order_seq_cst, std::memory_order_relaxed)) { + RegionBitmap *allocated = + HeapBitmapManager::GetHeapBitmapManager().AllocateRegionBitmap(GetRegionBaseSize()); + __atomic_store_n(&metadata.liveInfo_.markBitmap_, allocated, std::memory_order_release); + DLOG(REGION, "region %p(base=%#zx)@%#zx liveinfo %p alloc markbitmap %p", + this, GetRegionBase(), GetRegionStart(), &metadata.liveInfo_, metadata.liveInfo_.markBitmap_); + return allocated; } } while (true); return nullptr; } - RegionBitmap* GetResurrectBitmap() + RegionBitmap *GetResurrectBitmap() { - RegionLiveDesc* liveInfo = GetLiveInfo(); - if (liveInfo == nullptr) { - return nullptr; - } - RegionBitmap* bitmap = __atomic_load_n(&liveInfo->resurrectBitmap, std::memory_order_acquire); + RegionBitmap *bitmap = __atomic_load_n(&metadata.liveInfo_.resurrectBitmap_, std::memory_order_acquire); if (reinterpret_cast(bitmap) == RegionLiveDesc::TEMPORARY_PTR) { return nullptr; } return bitmap; } - RegionBitmap* GetOrAllocResurrectBitmap() + RegionBitmap *GetOrAllocResurrectBitmap() { - RegionLiveDesc* liveInfo = GetOrAllocLiveInfo(); do { - RegionBitmap* bitmap = __atomic_load_n(&liveInfo->resurrectBitmap, std::memory_order_acquire); + RegionBitmap *bitmap = __atomic_load_n(&metadata.liveInfo_.resurrectBitmap_, std::memory_order_acquire); if (UNLIKELY_CC(reinterpret_cast(bitmap) == RegionLiveDesc::TEMPORARY_PTR)) { continue; } if (LIKELY_CC(bitmap != nullptr)) { return bitmap; } - RegionBitmap* newValue = reinterpret_cast(RegionLiveDesc::TEMPORARY_PTR); - if (__atomic_compare_exchange_n(&liveInfo->resurrectBitmap, &bitmap, newValue, false, + RegionBitmap *newValue = reinterpret_cast(RegionLiveDesc::TEMPORARY_PTR); + if (__atomic_compare_exchange_n(&metadata.liveInfo_.resurrectBitmap_, &bitmap, newValue, false, std::memory_order_seq_cst, std::memory_order_relaxed)) { - RegionBitmap* allocated = - HeapBitmapManager::GetHeapBitmapManager().AllocateRegionBitmap(GetRegionSize()); - __atomic_store_n(&liveInfo->resurrectBitmap, allocated, std::memory_order_release); - DLOG(REGION, "region %p@%#zx liveinfo %p alloc resurrectbitmap %p", - this, GetRegionStart(), metadata.liveInfo, metadata.liveInfo->resurrectBitmap); + RegionBitmap *allocated = + HeapBitmapManager::GetHeapBitmapManager().AllocateRegionBitmap(GetRegionBaseSize()); + __atomic_store_n(&metadata.liveInfo_.resurrectBitmap_, allocated, std::memory_order_release); + DLOG(REGION, "region %p(base=%#zx)@%#zx liveinfo %p alloc resurrectbitmap %p", + this, GetRegionBase(), GetRegionStart(), &metadata.liveInfo_, + metadata.liveInfo_.resurrectBitmap_); return allocated; } } while (true); @@ -263,11 +208,7 @@ public: RegionBitmap* GetEnqueueBitmap() { - RegionLiveDesc* liveInfo = GetLiveInfo(); - if (liveInfo == nullptr) { - return nullptr; - } - RegionBitmap* bitmap = __atomic_load_n(&liveInfo->enqueueBitmap, std::memory_order_acquire); + RegionBitmap *bitmap = __atomic_load_n(&metadata.liveInfo_.enqueueBitmap_, std::memory_order_acquire); if (reinterpret_cast(bitmap) == RegionLiveDesc::TEMPORARY_PTR) { return nullptr; } @@ -276,23 +217,22 @@ public: RegionBitmap* GetOrAllocEnqueueBitmap() { - RegionLiveDesc* liveInfo = GetOrAllocLiveInfo(); do { - RegionBitmap* bitmap = __atomic_load_n(&liveInfo->enqueueBitmap, std::memory_order_acquire); + RegionBitmap *bitmap = __atomic_load_n(&metadata.liveInfo_.enqueueBitmap_, std::memory_order_acquire); if (UNLIKELY_CC(reinterpret_cast(bitmap) == RegionLiveDesc::TEMPORARY_PTR)) { continue; } if (LIKELY_CC(bitmap != nullptr)) { return bitmap; } - RegionBitmap* newValue = reinterpret_cast(RegionLiveDesc::TEMPORARY_PTR); - if (__atomic_compare_exchange_n(&liveInfo->enqueueBitmap, &bitmap, newValue, false, + RegionBitmap* newValue = reinterpret_cast(RegionLiveDesc::TEMPORARY_PTR); + if (__atomic_compare_exchange_n(&metadata.liveInfo_.enqueueBitmap_, &bitmap, newValue, false, std::memory_order_seq_cst, std::memory_order_relaxed)) { - RegionBitmap* allocated = - HeapBitmapManager::GetHeapBitmapManager().AllocateRegionBitmap(GetRegionSize()); - __atomic_store_n(&liveInfo->enqueueBitmap, allocated, std::memory_order_release); - DLOG(REGION, "region %p@%#zx liveinfo %p alloc enqueuebitmap %p", - this, GetRegionStart(), metadata.liveInfo, metadata.liveInfo->enqueueBitmap); + RegionBitmap *allocated = + HeapBitmapManager::GetHeapBitmapManager().AllocateRegionBitmap(GetRegionBaseSize()); + __atomic_store_n(&metadata.liveInfo_.enqueueBitmap_, allocated, std::memory_order_release); + DLOG(REGION, "region %p(base=%#zx)@%#zx liveinfo %p alloc enqueuebitmap %p", + this, GetRegionBase(), GetRegionStart(), &metadata.liveInfo_, metadata.liveInfo_.enqueueBitmap_); return allocated; } } while (true); @@ -320,7 +260,6 @@ public: if (IsLargeRegion()) { return MarkObjectForLargeRegion(obj); } - // top1 issue size_t offset = GetAddressOffset(reinterpret_cast(obj)); bool marked = GetOrAllocMarkBitmap()->MarkBits(offset); DCHECK_CC(IsMarkedObject(obj)); @@ -396,13 +335,6 @@ public: return enqueBitmap->IsMarked(offset); } - void CheckAndMarkObject(const BaseObject* obj) - { - if (!IsMarkedObject(obj)) { - MarkObject(obj); - } - } - RegionRSet* GetRSet() { return metadata.regionRSet; @@ -416,19 +348,13 @@ public: bool MarkRSetCardTable(BaseObject* obj) { size_t offset = GetAddressOffset(reinterpret_cast(obj)); - return metadata.regionRSet->MarkCardTable(offset); - } - - bool IsInRSet(BaseObject* obj) - { - size_t offset = GetAddressOffset(reinterpret_cast(obj)); - return metadata.regionRSet->IsMarkedCard(offset); + return GetRSet()->MarkCardTable(offset); } ALWAYS_INLINE_CC size_t GetAddressOffset(HeapAddress address) { - DCHECK_CC(GetRegionStart() <= address); - return (address - metadata.regionStart); + DCHECK_CC(GetRegionBaseFast() <= address); + return (address - GetRegionBaseFast()); } enum class UnitRole : uint8_t { @@ -445,20 +371,24 @@ public: enum class RegionType : uint8_t { FREE_REGION, + GARBAGE_REGION, + + // ************************boundary of dead region and alive region************************** THREAD_LOCAL_REGION, RECENT_FULL_REGION, FROM_REGION, - LONE_FROM_REGION, EXEMPTED_FROM_REGION, + LONE_FROM_REGION, TO_REGION, OLD_REGION, + THREAD_LOCAL_OLD_REGION, - // pinned object will not be forwarded by concurrent copying gc. - FULL_PINNED_REGION, - RECENT_PINNED_REGION, - FIXED_PINNED_REGION, - FULL_FIXED_PINNED_REGION, + // non movable object will not be forwarded by concurrent copying gc. + RECENT_POLYSIZE_NONMOVABLE_REGION, + FULL_POLYSIZE_NONMOVABLE_REGION, + MONOSIZE_NONMOVABLE_REGION, + FULL_MONOSIZE_NONMOVABLE_REGION, // region for raw-pointer objects which are exposed to runtime thus can not be moved by any gc. // raw-pointer region becomes pinned region when none of its member objects are used as raw pointer. @@ -470,11 +400,51 @@ public: RECENT_LARGE_REGION, LARGE_REGION, - GARBAGE_REGION, READ_ONLY_REGION, APPSPAWN_REGION, + + END_OF_REGION_TYPE, + + ALIVE_REGION_FIRST = THREAD_LOCAL_REGION, }; + static bool IsAliveRegionType(RegionType type) + { + return static_cast(type) >= static_cast(RegionType::ALIVE_REGION_FIRST); + } + + static bool IsInRecentSpace(RegionType type) + { + return type == RegionType::THREAD_LOCAL_REGION || type == RegionType::RECENT_FULL_REGION; + } + + static bool IsInYoungSpaceForWB(RegionType type) + { + return type == RegionType::THREAD_LOCAL_REGION || type == RegionType::RECENT_FULL_REGION || + type == RegionType::FROM_REGION; + } + + static bool IsInYoungSpace(RegionType type) + { + return type == RegionType::THREAD_LOCAL_REGION || type == RegionType::RECENT_FULL_REGION || + type == RegionType::FROM_REGION || type == RegionType::EXEMPTED_FROM_REGION; + } + + static bool IsInFromSpace(RegionType type) + { + return type == RegionType::FROM_REGION || type == RegionType::EXEMPTED_FROM_REGION; + } + + static bool IsInToSpace(RegionType type) + { + return type == RegionType::TO_REGION; + } + + static bool IsInOldSpace(RegionType type) + { + return type == RegionType::OLD_REGION; + } + static void Initialize(size_t nUnit, uintptr_t regionInfoAddr, uintptr_t heapAddress) { UnitInfo::totalUnitCount = nUnit; @@ -486,24 +456,33 @@ public: { UnitInfo* unit = RegionDesc::UnitInfo::GetUnitInfo(idx); DCHECK_CC((reinterpret_cast(unit) % 8) == 0); // 8: Align with 8 - if (static_cast(unit->GetMetadata().unitRole) == UnitRole::SUBORDINATE_UNIT) { - return unit->GetMetadata().ownerRegion; - } else { - return reinterpret_cast(unit); - } + DCHECK_CC(static_cast(unit->GetMetadata().unitRole) != UnitRole::SUBORDINATE_UNIT); + return reinterpret_cast(unit); } static RegionDesc* GetRegionDescAt(uintptr_t allocAddr) { + ASSERT_LOGF(Heap::IsHeapAddress(allocAddr), "Cannot get region info of a non-heap object"); UnitInfo* unit = reinterpret_cast(UnitInfo::heapStartAddress - (((allocAddr - UnitInfo::heapStartAddress) / UNIT_SIZE) + 1) * sizeof(RegionDesc)); DCHECK_CC((reinterpret_cast(unit) % 8) == 0); // 8: Align with 8 - if (static_cast(unit->GetMetadata().unitRole) == UnitRole::SUBORDINATE_UNIT) { - return unit->GetMetadata().ownerRegion; - } else { - return reinterpret_cast(unit); - } + DCHECK_CC(static_cast(unit->GetMetadata().unitRole) != UnitRole::SUBORDINATE_UNIT); + return reinterpret_cast(unit); + } + + // This could only used for surely alive region, such as from interpreter, + // because ONLY alive region have `InlinedRegionMetaData` + static RegionDesc* GetAliveRegionDescAt(uintptr_t allocAddr) + { + // only alive region have `InlinedRegionMetaData`. + DCHECK_CC(IsAliveRegionType(GetRegionDescAt(allocAddr)->GetRegionType())); + InlinedRegionMetaData *metaData = InlinedRegionMetaData::GetInlinedRegionMetaData(allocAddr); + UnitInfo *unit = reinterpret_cast(metaData->regionDesc_); + DCHECK_CC(reinterpret_cast(unit) == GetRegionDescAt(allocAddr)); + DCHECK_CC((reinterpret_cast(unit) % 8) == 0); // 8: Align with 8 + DCHECK_CC(static_cast(unit->GetMetadata().unitRole) != UnitRole::SUBORDINATE_UNIT); + return reinterpret_cast(unit); } static void InitFreeRegion(size_t unitIdx, size_t nUnit) @@ -512,6 +491,13 @@ public: region->InitRegionDesc(nUnit, UnitRole::FREE_UNITS); } + static RegionDesc* ResetRegion(size_t unitIdx, size_t nUnit, RegionDesc::UnitRole uclass) + { + RegionDesc* region = reinterpret_cast(RegionDesc::UnitInfo::GetUnitInfo(unitIdx)); + region->ResetRegion(nUnit, uclass); + return region; + } + static RegionDesc* InitRegion(size_t unitIdx, size_t nUnit, RegionDesc::UnitRole uclass) { RegionDesc* region = reinterpret_cast(RegionDesc::UnitInfo::GetUnitInfo(unitIdx)); @@ -563,21 +549,19 @@ public: #endif } - BaseObject* GetFirstObject() const { return reinterpret_cast(GetRegionStart()); } - - bool IsEmpty() const + size_t GetRegionSize() const { - ASSERT_LOGF(IsSmallRegion(), "wrong region type"); - return GetRegionAllocPtr() == GetRegionStart(); + DCHECK_CC(GetRegionEnd() > GetRegionStart()); + return GetRegionEnd() - GetRegionStart(); } - size_t GetRegionSize() const + size_t GetRegionBaseSize() const { - DCHECK_CC(metadata.regionEnd > GetRegionStart()); - return metadata.regionEnd - GetRegionStart(); + DCHECK_CC(GetRegionEnd() > GetRegionBase()); + return GetRegionEnd() - GetRegionBase(); } - size_t GetUnitCount() const { return GetRegionSize() / UNIT_SIZE; } + size_t GetUnitCount() const { return GetRegionBaseSize() / UNIT_SIZE; } size_t GetAvailableSize() const { @@ -592,18 +576,19 @@ public: const char* GetTypeName() const; #endif - void VisitAllObjectsWithFixedSize(size_t cellCount, const std::function&& func); void VisitAllObjects(const std::function&& func); - void VisitAllObjectsBeforeFix(const std::function&& func); - void VisitAllObjectsBeforeTrace(const std::function&& func); + void VisitAllObjectsBeforeCopy(const std::function&& func); bool VisitLiveObjectsUntilFalse(const std::function&& func); + + void VisitRememberSetBeforeMarking(const std::function& func); + void VisitRememberSetBeforeCopy(const std::function& func); void VisitRememberSet(const std::function& func); // reset so that this region can be reused for allocation void InitFreeUnits() { if (metadata.regionRSet != nullptr) { - delete metadata.regionRSet; + RegionRSet::DestroyRegionRSet(metadata.regionRSet); metadata.regionRSet = nullptr; } size_t nUnit = GetUnitCount(); @@ -613,29 +598,12 @@ public: } } - // the interface can only be used to clear live info after gc. - void CheckAndClearLiveInfo(RegionLiveDesc* liveInfo) - { - // Garbage region may be reused by other thread. For the sake of safety, we don't clean it here. - // We will clean it before the region is accessable. - if (IsGarbageRegion()) { - return; - } - // Check the value whether is expected, in order to avoid resetting a reused region. - if (metadata.liveInfo == liveInfo) { - metadata.liveInfo = nullptr; - metadata.liveByteCount = 0; - } - } - void ClearLiveInfo() { - if (metadata.liveInfo != nullptr) { - DCHECK_CC(metadata.liveInfo->relatedRegion == this); - metadata.liveInfo = nullptr; - DLOG(REGION, "region %p@%#zx+%zu clear liveinfo %p type %u", this, this->GetRegionStart(), - this->GetRegionSize(), metadata.liveInfo, this->GetRegionType()); - } + DCHECK_CC(metadata.liveInfo_.relatedRegion_ == this); + metadata.liveInfo_.ClearLiveInfo(); + DLOG(REGION, "region %p(base=%#zx)@%#zx+%zu clear liveinfo %p type %u", + this, GetRegionBase(), GetRegionStart(), GetRegionSize(), &metadata.liveInfo_, GetRegionType()); metadata.liveByteCount = 0; } @@ -648,6 +616,9 @@ public: { metadata.regionBits.AtomicSetValue(RegionBitOffset::BIT_OFFSET_REGION_TYPE, BITS_5, static_cast(type)); + if (IsAliveRegionType(type)) { + InlinedRegionMetaData::GetInlinedRegionMetaData(this)->SetRegionType(type); + } } void SetMarkedRegionFlag(uint8_t flag) @@ -691,11 +662,28 @@ public: BITS_5)); } + static RegionType GetAliveRegionType(uintptr_t allocAddr) + { + // only alive region have `InlinedRegionMetaData`. + DCHECK_CC(IsAliveRegionType(GetRegionDescAt(allocAddr)->GetRegionType())); + InlinedRegionMetaData *metaData = InlinedRegionMetaData::GetInlinedRegionMetaData(allocAddr); + return metaData->GetRegionType(); + } + UnitRole GetUnitRole() const { return static_cast(metadata.unitRole); } size_t GetUnitIdx() const { return RegionDesc::UnitInfo::GetUnitIdx(reinterpret_cast(this)); } - HeapAddress GetRegionStart() const { return RegionDesc::GetUnitAddress(GetUnitIdx()); } + HeapAddress GetRegionBase() const { return RegionDesc::GetUnitAddress(GetUnitIdx()); } + + // This could only used to a `RegionDesc` which has been initialized + HeapAddress GetRegionBaseFast() const + { + ASSERT(metadata.regionBase == GetRegionBase()); + return metadata.regionBase; + } + + HeapAddress GetRegionStart() const { return metadata.regionStart; } HeapAddress GetRegionEnd() const { return metadata.regionEnd; } @@ -703,17 +691,16 @@ public: HeapAddress GetRegionAllocPtr() const { return metadata.allocPtr; } - HeapAddress GetTraceLine() const { return metadata.traceLine; } + HeapAddress GetMarkingLine() const { return metadata.markingLine; } HeapAddress GetCopyLine() const { return metadata.forwardLine; } - HeapAddress GetFixLine() const { return metadata.fixLine; } - void SetTraceLine() + void SetMarkingLine() { - if (metadata.traceLine == std::numeric_limits::max()) { + if (metadata.markingLine == std::numeric_limits::max()) { uintptr_t line = GetRegionAllocPtr(); - metadata.traceLine = line; - DLOG(REGION, "set region %p@%#zx+%zu trace-line %#zx type %u", this, this->GetRegionStart(), - this->GetRegionSize(), this->GetTraceLine(), this->GetRegionType()); + metadata.markingLine = line; + DLOG(REGION, "set region %p(base=%#zx)@%#zx+%zu marking-line %#zx type %u", + this, GetRegionBase(), GetRegionStart(), GetRegionSize(), GetMarkingLine(), GetRegionType()); } } @@ -722,26 +709,15 @@ public: if (metadata.forwardLine == std::numeric_limits::max()) { uintptr_t line = GetRegionAllocPtr(); metadata.forwardLine = line; - DLOG(REGION, "set region %p@%#zx+%zu copy-line %#zx type %u", this, this->GetRegionStart(), - this->GetRegionSize(), this->GetCopyLine(), this->GetRegionType()); + DLOG(REGION, "set region %p(base=%#zx)@%#zx+%zu copy-line %#zx type %u", + this, GetRegionBase(), GetRegionStart(), GetRegionSize(), GetCopyLine(), GetRegionType()); } } - void SetFixLine() + void ClearMarkingCopyLine() { - if (metadata.fixLine == std::numeric_limits::max()) { - uintptr_t line = GetRegionAllocPtr(); - metadata.fixLine = line; - DLOG(REGION, "set region %p@%#zx+%zu fix-line %#zx type %u", this, this->GetRegionStart(), - this->GetRegionSize(), this->GetFixLine(), this->GetRegionType()); - } - } - - void ClearTraceCopyFixLine() - { - metadata.traceLine = std::numeric_limits::max(); + metadata.markingLine = std::numeric_limits::max(); metadata.forwardLine = std::numeric_limits::max(); - metadata.fixLine = std::numeric_limits::max(); } void ClearFreeSlot() @@ -749,9 +725,9 @@ public: metadata.freeSlot = nullptr; } - bool IsNewObjectSinceTrace(const BaseObject* obj) + bool IsNewObjectSinceMarking(const BaseObject* obj) { - return GetTraceLine() <= reinterpret_cast(obj); + return GetMarkingLine() <= reinterpret_cast(obj); } bool IsNewObjectSinceForward(const BaseObject* obj) @@ -759,50 +735,39 @@ public: return GetCopyLine() <= reinterpret_cast(obj); } - bool IsNewObjectSinceFix(const BaseObject* obj) - { - return GetFixLine() <= reinterpret_cast(obj); - } - bool IsNewRegionSinceForward() const { return GetCopyLine() == GetRegionStart(); } - bool IsNewRegionSinceFix() const - { - return GetFixLine() == GetRegionStart(); - } - bool IsInRecentSpace() const { RegionType type = GetRegionType(); - return type == RegionType::THREAD_LOCAL_REGION || type == RegionType::RECENT_FULL_REGION; + return RegionDesc::IsInRecentSpace(type); } bool IsInYoungSpace() const { RegionType type = GetRegionType(); - return type == RegionType::THREAD_LOCAL_REGION || type == RegionType::RECENT_FULL_REGION || - type == RegionType::FROM_REGION || type == RegionType::EXEMPTED_FROM_REGION; + return RegionDesc::IsInYoungSpace(type); } bool IsInFromSpace() const { RegionType type = GetRegionType(); - return type == RegionType::FROM_REGION || type == RegionType::EXEMPTED_FROM_REGION; + return RegionDesc::IsInFromSpace(type); } bool IsInToSpace() const { RegionType type = GetRegionType(); - return type == RegionType::TO_REGION; + return RegionDesc::IsInToSpace(type); } bool IsInOldSpace() const { RegionType type = GetRegionType(); - return type == RegionType::OLD_REGION; + return RegionDesc::IsInOldSpace(type); } int32_t IncRawPointerObjectCount() @@ -833,6 +798,7 @@ public: uintptr_t Alloc(size_t size) { + DCHECK_CC(size > 0); size_t limit = GetRegionEnd(); if (metadata.allocPtr + size <= limit) { uintptr_t addr = metadata.allocPtr; @@ -865,21 +831,22 @@ public: return static_cast(metadata.unitRole) == UnitRole::LARGE_SIZED_UNITS; } - bool IsFixedRegion() const + bool IsMonoSizeNonMovableRegion() const { - return (GetRegionType() == RegionType::FIXED_PINNED_REGION) || - (GetRegionType() == RegionType::FULL_FIXED_PINNED_REGION); + return (GetRegionType() == RegionType::MONOSIZE_NONMOVABLE_REGION) || + (GetRegionType() == RegionType::FULL_MONOSIZE_NONMOVABLE_REGION); } bool IsThreadLocalRegion() const { - return GetRegionType() == RegionType::THREAD_LOCAL_REGION; + return GetRegionType() == RegionType::THREAD_LOCAL_REGION || + GetRegionType() == RegionType::THREAD_LOCAL_OLD_REGION; } - bool IsPinnedRegion() const + bool IsPolySizeNonMovableRegion() const { - return (GetRegionType() == RegionType::FULL_PINNED_REGION) || - (GetRegionType() == RegionType::RECENT_PINNED_REGION); + return (GetRegionType() == RegionType::FULL_POLYSIZE_NONMOVABLE_REGION) || + (GetRegionType() == RegionType::RECENT_POLYSIZE_NONMOVABLE_REGION); } bool IsReadOnlyRegion() const @@ -895,10 +862,10 @@ public: return reinterpret_cast(UnitInfo::GetUnitInfo(metadata.prevRegionIdx)); } - bool CollectPinnedGarbage(BaseObject* obj, size_t cellCount) + bool CollectNonMovableGarbage(BaseObject* obj, size_t cellCount) { std::lock_guard lg(metadata.regionMutex); - if (IsFreePinnedObject(obj)) { + if (IsFreeNonMovableObject(obj)) { return false; } size_t size = (cellCount + 1) * sizeof(uint64_t); @@ -920,7 +887,7 @@ public: return reinterpret_cast(res); } - HeapAddress AllocPinnedFromFreeList() + HeapAddress AllocNonMovableFromFreeList() { std::lock_guard lg(metadata.regionMutex); HeapAddress addr = GetFreeSlot(); @@ -937,7 +904,7 @@ public: return addr; } - bool IsFreePinnedObject(BaseObject* object) + bool IsFreeNonMovableObject(BaseObject* object) { ObjectSlot* slot = reinterpret_cast(object); return slot->isFree_; @@ -985,6 +952,7 @@ public: } bool IsToRegion() const { return GetRegionType() == RegionType::TO_REGION; } + bool IsOldRegion() const { return GetRegionType() == RegionType::OLD_REGION; } bool IsGarbageRegion() const { return GetRegionType() == RegionType::GARBAGE_REGION; } bool IsFreeRegion() const { return static_cast(metadata.unitRole) == UnitRole::FREE_UNITS; } @@ -1018,6 +986,16 @@ public: this->SetPrevRegion(nullptr); } + static constexpr size_t GetAllocPtrOffset() + { + return offsetof(UnitMetadata, allocPtr); + } + + static constexpr size_t GetRegionEndOffset() + { + return offsetof(UnitMetadata, regionEnd); + } + private: void VisitAllObjectsBefore(const std::function&& func, uintptr_t end); @@ -1053,18 +1031,57 @@ private: } }; + class RegionLiveDesc { + public: + static constexpr HeapAddress TEMPORARY_PTR = 0x1234; + + void Init(RegionDesc *region) + { + relatedRegion_ = region; + ClearLiveInfo(); + } + + void Fini() + { + relatedRegion_ = nullptr; + ClearLiveInfo(); + } + + void ClearLiveInfo() + { + markBitmap_ = nullptr; + resurrectBitmap_ = nullptr; + enqueueBitmap_ = nullptr; + } + private: + RegionDesc *relatedRegion_ {nullptr}; + RegionBitmap *markBitmap_ {nullptr}; + RegionBitmap *resurrectBitmap_ {nullptr}; + RegionBitmap *enqueueBitmap_ {nullptr}; + + friend class RegionDesc; + }; + struct UnitMetadata { struct { // basic data for RegionDesc // for fast allocation, always at the start. uintptr_t allocPtr; uintptr_t regionEnd; - // watermark set when gc phase transitions to pre-trace. - uintptr_t traceLine; + // watermark set when gc phase transitions to pre-marking. + uintptr_t markingLine; uintptr_t forwardLine; - uintptr_t fixLine; ObjectSlot* freeSlot; + // `regionStart` is the header of the data, and `regionBase` is the header of the total region + /** + * | *********************************Region*******************************| + * | InlinedRegionMetaData | *****************Region data******************| + * ^ ^ + * | | + * regionBase regionStart + */ uintptr_t regionStart; + uintptr_t regionBase; uint32_t nextRegionIdx; uint32_t prevRegionIdx; // support fast deletion for region list. @@ -1073,10 +1090,7 @@ private: int32_t rawPointerObjectCount; }; - union { - RegionLiveDesc* liveInfo = nullptr; - RegionDesc* ownerRegion; // if unit is SUBORDINATE_UNIT - }; + RegionLiveDesc liveInfo_ {}; RegionRSet* regionRSet = nullptr;; @@ -1164,11 +1178,6 @@ private: // These interfaces are used to make sure the writing operations of value in C++ Bit Field will be atomic. void SetUnitRole(UnitRole role) { metadata_.unitBits.AtomicSetValue(0, BITS_5, static_cast(role)); } - void SetRegionType(RegionType type) - { - metadata_.regionBits.AtomicSetValue(RegionBitOffset::BIT_OFFSET_REGION_TYPE, BITS_5, - static_cast(type)); - } void SetMarkedRegionFlag(uint8_t flag) { @@ -1185,12 +1194,6 @@ private: metadata_.regionBits.AtomicSetValue(RegionBitOffset::BIT_OFFSET_RESURRECTED_REGION, 1, flag); } - void InitSubordinateUnit(RegionDesc* owner) - { - SetUnitRole(UnitRole::SUBORDINATE_UNIT); - metadata_.ownerRegion = owner; - } - void ToFreeRegion() { InitFreeRegion(GetUnitIdx(this), 1); } void ClearUnit() { ClearUnits(GetUnitIdx(this), 1); } @@ -1203,56 +1206,220 @@ private: private: UnitMetadata metadata_; + + friend class RegionDesc; }; +public: + // inline copy some data at the begin of the region data, to support fast-path in barrier or smth else. + // NOTE the data consistency between data in header and that in `RegionDesc`. + // this could ONLY used in region that is ALIVE. + class InlinedRegionMetaData { + public: + static InlinedRegionMetaData *GetInlinedRegionMetaData(RegionDesc *region) + { + InlinedRegionMetaData *data = GetInlinedRegionMetaData(region->GetRegionStart()); + DCHECK_CC(data->regionDesc_ == region); + return data; + } + static InlinedRegionMetaData *GetInlinedRegionMetaData(uintptr_t allocAddr) + { + return reinterpret_cast(allocAddr & ~DEFAULT_REGION_UNIT_MASK); + } + + explicit InlinedRegionMetaData(RegionDesc *regionDesc) + : regionDesc_(regionDesc), regionRSet_(regionDesc->GetRSet()), regionType_(regionDesc->GetRegionType()) + { + // Since this is a backup copy of `RegionDesc`, create rset at first to guarantee data consistency + DCHECK_CC(regionRSet_ != nullptr); + // Not insert to regionList and reset regionType yet + DCHECK_CC(regionType_ == RegionType::FREE_REGION); + DCHECK_CC(regionType_ == regionDesc_->GetRegionType()); + } + ~InlinedRegionMetaData() = default; + + void SetRegionType(RegionType type) + { + DCHECK_CC(RegionDesc::IsAliveRegionType(type)); + DCHECK_CC(type == regionDesc_->GetRegionType()); + regionType_ = type; + } + + RegionDesc *GetRegionDesc() const + { + return regionDesc_; + } + + RegionRSet *GetRegionRSet() const + { + return regionRSet_; + } + + RegionType GetRegionType() const + { + DCHECK_CC(RegionDesc::IsAliveRegionType(regionType_)); + return regionType_; + } + + bool IsInRecentSpace() const + { + RegionType type = GetRegionType(); + return RegionDesc::IsInRecentSpace(type); + } + + bool IsInYoungSpace() const + { + RegionType type = GetRegionType(); + return RegionDesc::IsInYoungSpace(type); + } + + bool IsInFromSpace() const + { + RegionType type = GetRegionType(); + return RegionDesc::IsInFromSpace(type); + } + + bool IsInToSpace() const + { + RegionType type = GetRegionType(); + return RegionDesc::IsInToSpace(type); + } + + bool IsInOldSpace() const + { + RegionType type = GetRegionType(); + return RegionDesc::IsInOldSpace(type); + } + + bool IsFromRegion() const + { + RegionType type = GetRegionType(); + return type == RegionType::FROM_REGION; + } + + bool IsInYoungSpaceForWB() const + { + RegionType type = GetRegionType(); + return RegionDesc::IsInYoungSpaceForWB(type); + } + + inline HeapAddress GetRegionStart() const; + + HeapAddress GetRegionBase() const + { + uintptr_t base = reinterpret_cast(this); + ASSERT(base == regionDesc_->GetRegionBaseFast()); + return static_cast(base); + } + + size_t GetAddressOffset(HeapAddress address) const + { + DCHECK_CC(GetRegionBase() <= address); + return (address - GetRegionBase()); + } + + bool MarkRSetCardTable(BaseObject *obj) + { + size_t offset = GetAddressOffset(static_cast(reinterpret_cast(obj))); + return GetRegionRSet()->MarkCardTable(offset); + } + private: + RegionDesc *regionDesc_ {nullptr}; + RegionRSet *regionRSet_ {nullptr}; + RegionType regionType_ {}; + // fixme: inline more + + friend class RegionDesc; + }; + // should keep as same as the align of BaseObject + static constexpr size_t UNIT_BEGIN_ALIGN = 8; + // default common region unit header size. + static constexpr size_t UNIT_HEADER_SIZE = AlignUp(sizeof(InlinedRegionMetaData), UNIT_BEGIN_ALIGN); + // default common region unit available size. + static constexpr size_t UNIT_AVAILABLE_SIZE = UNIT_SIZE - UNIT_HEADER_SIZE; + +private: + void InitRegionDesc(size_t nUnit, UnitRole uClass) { + DCHECK_CC(uClass != UnitRole::SUBORDINATE_UNIT); + size_t base = GetRegionBase(); + metadata.regionBase = base; + metadata.regionStart = base + RegionDesc::UNIT_HEADER_SIZE; + ASSERT(metadata.regionStart % UNIT_BEGIN_ALIGN == 0); metadata.allocPtr = GetRegionStart(); - metadata.regionStart = GetRegionStart(); - metadata.regionEnd = metadata.allocPtr + nUnit * RegionDesc::UNIT_SIZE; + metadata.regionEnd = base + nUnit * RegionDesc::UNIT_SIZE; + DCHECK_CC(GetRegionStart() < GetRegionEnd()); metadata.prevRegionIdx = NULLPTR_IDX; metadata.nextRegionIdx = NULLPTR_IDX; metadata.liveByteCount = 0; - metadata.liveInfo = nullptr; metadata.freeSlot = nullptr; - if (metadata.regionRSet != nullptr) { - ClearRSet(); - } - metadata.regionRSet = nullptr; SetRegionType(RegionType::FREE_REGION); SetUnitRole(uClass); - ClearTraceCopyFixLine(); + ClearMarkingCopyLine(); SetMarkedRegionFlag(0); SetEnqueuedRegionFlag(0); SetResurrectedRegionFlag(0); __atomic_store_n(&metadata.rawPointerObjectCount, 0, __ATOMIC_SEQ_CST); #ifdef USE_HWASAN - ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast(metadata.allocPtr), + ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast(metadata.regionBase), nUnit * RegionDesc::UNIT_SIZE); - uintptr_t pAddr = metadata.allocPtr; + uintptr_t pAddr = metadata.regionBase; uintptr_t pSize = nUnit * RegionDesc::UNIT_SIZE; LOG_COMMON(DEBUG) << std::hex << "set [" << pAddr << std::hex << ", " << (pAddr + pSize) << ") unpoisoned\n"; #endif } + void ResetRegion(size_t nUnit, UnitRole uClass) + { + DCHECK_CC(metadata.regionRSet != nullptr); + ClearRSet(); + InitRegionDesc(nUnit, uClass); + InitMetaData(nUnit, uClass); + std::atomic_thread_fence(std::memory_order_seq_cst); + } + void InitRegion(size_t nUnit, UnitRole uClass) { + DCHECK_CC(uClass != UnitRole::FREE_UNITS); //fixme: remove `UnitRole::SUBORDINATE_UNIT` + DCHECK_CC(uClass != UnitRole::SUBORDINATE_UNIT); //fixme: remove `UnitRole::SUBORDINATE_UNIT` InitRegionDesc(nUnit, uClass); - if (metadata.regionRSet == nullptr) { - metadata.regionRSet = new RegionRSet(GetRegionSize()); - } + DCHECK_CC(metadata.regionRSet == nullptr); + metadata.regionRSet = RegionRSet::CreateRegionRSet(GetRegionBaseSize()); + InitMetaData(nUnit, uClass); + std::atomic_thread_fence(std::memory_order_seq_cst); + } + + void InitMetaData(size_t nUnit, UnitRole uClass) + { + metadata.liveInfo_.Init(this); + HeapAddress header = GetRegionBase(); + void *ptr = reinterpret_cast(static_cast(header)); + new (ptr) InlinedRegionMetaData(this); // initialize region's subordinate units. UnitInfo* unit = reinterpret_cast(this) - (nUnit - 1); for (size_t i = 0; i < nUnit - 1; i++) { - unit[i].InitSubordinateUnit(this); + DCHECK_CC(uClass == UnitRole::LARGE_SIZED_UNITS); + unit[i].metadata_.liveInfo_.Fini(); } } static constexpr uint32_t NULLPTR_IDX = UnitInfo::INVALID_IDX; UnitMetadata metadata; +public: + friend constexpr size_t GetMetaDataInRegionOffset(); + static constexpr size_t REGION_RSET_IN_INLINED_METADATA_OFFSET = MEMBER_OFFSET(InlinedRegionMetaData, regionRSet_); + static constexpr size_t REGION_TYPE_IN_INLINED_METADATA_OFFSET = MEMBER_OFFSET(InlinedRegionMetaData, regionType_); }; + +HeapAddress RegionDesc::InlinedRegionMetaData::GetRegionStart() const +{ + HeapAddress addr = static_cast(reinterpret_cast(this) + RegionDesc::UNIT_HEADER_SIZE); + DCHECK_CC(addr == regionDesc_->GetRegionStart()); + return addr; +} } // namespace common #endif // COMMON_COMPONENTS_HEAP_ALLOCATOR_REGION_INFO_H diff --git a/common_components/heap/allocator/region_list.h b/common_components/heap/allocator/region_list.h index ad4e7c609a328cb07c3f84abf20131e65806a646..7bd7a00fde3577fcf233d404cc87ec9c85452f39 100755 --- a/common_components/heap/allocator/region_list.h +++ b/common_components/heap/allocator/region_list.h @@ -17,7 +17,6 @@ #define COMMON_COMPONENTS_HEAP_ALLOCATOR_REGION_LIST_H #include "common_components/heap/allocator/region_desc.h" -#include "common_components/heap/allocator/slot_list.h" namespace common { class RegionList { @@ -28,6 +27,7 @@ public: void PrependRegionLocked(RegionDesc* region, RegionDesc::RegionType type); void MergeRegionList(RegionList& regionList, RegionDesc::RegionType regionType); + void MergeRegionListWithoutHead(RegionList& regionList, RegionDesc::RegionType regionType); void DeleteRegion(RegionDesc* del) { @@ -55,6 +55,7 @@ public: return false; } + void DumpRegionSummary() const; #ifndef NDEBUG void DumpRegionList(const char*); #endif @@ -105,9 +106,9 @@ public: size_t GetRegionCount() const { return regionCount_; } - size_t GetAllocatedSize(bool count = false) const + size_t GetAllocatedSize(bool usedPageSize = true) const { - if (!count) { + if (usedPageSize) { return GetUnitCount() * RegionDesc::UNIT_SIZE; } return CountAllocatedSize(); @@ -153,7 +154,7 @@ public: if (region == nullptr) { return 0; } - return region->AllocPinnedFromFreeList(); + return region->AllocNonMovableFromFreeList(); } protected: @@ -176,7 +177,7 @@ private: unitCount_ = srcList.unitCount_; } - // allocated-size of to-region list must be calculated on the fly. + // allocated-size of tl-region list must be calculated on the fly. size_t CountAllocatedSize() const { size_t allocCnt = 0; @@ -229,7 +230,7 @@ public: { std::lock_guard lock(listMutex_); for (RegionDesc* node = listHead_; node != nullptr; node = node->GetNextRegion()) { - node->ClearTraceCopyFixLine(); + node->ClearMarkingCopyLine(); } active_ = false; } diff --git a/common_components/heap/allocator/region_manager.cpp b/common_components/heap/allocator/region_manager.cpp index 900c5b1a77a86ec85365420ef26f81e4dbd63395..a0d9e5fb7cf1cefc1d253a4c46ab726fabc36962 100755 --- a/common_components/heap/allocator/region_manager.cpp +++ b/common_components/heap/allocator/region_manager.cpp @@ -20,17 +20,19 @@ #include #include -#include "common_components/base_runtime/hooks.h" +#include "common_components/common_runtime/hooks.h" #include "common_components/heap/allocator/region_desc.h" -#include "common_components/heap/allocator/region_space.h" +#include "common_components/heap/allocator/region_list.h" +#include "common_components/heap/allocator/regional_heap.h" #include "common_components/base/c_string.h" #include "common_components/heap/collector/collector.h" -#include "common_components/heap/collector/trace_collector.h" +#include "common_components/heap/collector/marking_collector.h" #include "common_components/common/base_object.h" #include "common_components/common/scoped_object_access.h" #include "common_components/heap/heap.h" #include "common_components/mutator/mutator.inline.h" #include "common_components/mutator/mutator_manager.h" +#include "common_components/heap/allocator/fix_heap.h" #if defined(COMMON_TSAN_SUPPORT) #include "common_components/sanitizer/sanitizer_interface.h" @@ -70,22 +72,15 @@ static size_t GetPageSize() noexcept // System default page size const size_t COMMON_PAGE_SIZE = GetPageSize(); const size_t AllocatorUtils::ALLOC_PAGE_SIZE = COMMON_PAGE_SIZE; -// region unit size: same as system page size -const size_t RegionDesc::UNIT_SIZE = COMMON_PAGE_SIZE; -// regarding a object as a large object when the size is greater than 32KB or one page size, -// depending on the system page size. -const size_t RegionDesc::LARGE_OBJECT_DEFAULT_THRESHOLD = COMMON_PAGE_SIZE > (32 * KB) ? - COMMON_PAGE_SIZE : 32 * KB; -// max size of per region is 128KB. -const size_t RegionManager::MAX_UNIT_COUNT_PER_REGION = (128 * KB) / COMMON_PAGE_SIZE; // size of huge page is 2048KB. -const size_t RegionManager::HUGE_PAGE = (2048 * KB) / COMMON_PAGE_SIZE; +constexpr size_t HUGE_PAGE_UNIT_NUM = (2048 * KB) / RegionDesc::UNIT_SIZE; #if defined(GCINFO_DEBUG) && GCINFO_DEBUG void RegionDesc::DumpRegionDesc(LogType type) const { - DLOG(type, "Region index: %zu, type: %s, address: 0x%zx-0x%zx, allocated(B) %zu, live(B) %zu", GetUnitIdx(), - GetTypeName(), GetRegionStart(), GetRegionEnd(), GetRegionAllocatedSize(), GetLiveByteCount()); + DLOG(type, "Region index: %zu, type: %s, address: 0x%zx(start=0x%zx)-0x%zx, allocated(B) %zu, live(B) %zu", + GetUnitIdx(), GetTypeName(), GetRegionBase(), GetRegionStart(), GetRegionEnd(), GetRegionAllocatedSize(), + GetLiveByteCount()); } const char* RegionDesc::GetTypeName() const @@ -97,9 +92,9 @@ const char* RegionDesc::GetTypeName() const "from region", "unmovable from region", "to region", - "full pinned region", - "recent pinned region", - "raw pointer pinned region", + "full non-movable region", + "recent non-movable region", + "raw pointer non-movable region", "tl raw pointer region", "large region", "recent large region", @@ -119,7 +114,7 @@ void RegionDesc::VisitAllObjectsBefore(const std::function & { uintptr_t position = GetRegionStart(); - if (IsFixedRegion()) { + if (IsMonoSizeNonMovableRegion()) { size_t size = static_cast(GetRegionCellCount() + 1) * sizeof(uint64_t); while (position < end) { BaseObject *obj = reinterpret_cast(position); @@ -127,7 +122,7 @@ void RegionDesc::VisitAllObjectsBefore(const std::function & if (position > end) { break; } - if (IsFreePinnedObject(obj)) { + if (IsFreeNonMovableObject(obj)) { continue; } func(obj); @@ -142,42 +137,16 @@ void RegionDesc::VisitAllObjectsBefore(const std::function & while (position < end) { // GetAllocSize should before call func, because object maybe destroy in compact gc. func(reinterpret_cast(position)); - size_t size = RegionSpace::GetAllocSize(*reinterpret_cast(position)); + size_t size = RegionalHeap::GetAllocSize(*reinterpret_cast(position)); position += size; } } } -void RegionDesc::VisitAllObjectsWithFixedSize(size_t cellCount, const std::function&& func) -{ - CHECK_CC(GetRegionType() == RegionType::FIXED_PINNED_REGION || - GetRegionType() == RegionType::FULL_FIXED_PINNED_REGION); - size_t size = (cellCount + 1) * sizeof(uint64_t); - uintptr_t position = GetRegionStart(); - uintptr_t end = GetRegionEnd(); - while (position < end) { - // GetAllocSize should before call func, because object maybe destroy in compact gc. - BaseObject* obj = reinterpret_cast(position); - position += size; - if (position > end) { - break; - } - func(obj); - } -} - -void RegionDesc::VisitAllObjectsBeforeFix(const std::function&& func) -{ - uintptr_t allocPtr = GetRegionAllocPtr(); - uintptr_t phaseLine = GetFixLine(); - uintptr_t end = std::min(phaseLine, allocPtr); - VisitAllObjectsBefore(std::move(func), end); -} - -void RegionDesc::VisitAllObjectsBeforeTrace(const std::function &&func) +void RegionDesc::VisitAllObjectsBeforeCopy(const std::function&& func) { uintptr_t allocPtr = GetRegionAllocPtr(); - uintptr_t phaseLine = GetTraceLine(); + uintptr_t phaseLine = GetCopyLine(); uintptr_t end = std::min(phaseLine, allocPtr); VisitAllObjectsBefore(std::move(func), end); } @@ -189,7 +158,7 @@ bool RegionDesc::VisitLiveObjectsUntilFalse(const std::function(Heap::GetHeap().GetCollector()); + MarkingCollector& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); if (IsLargeRegion()) { if (IsJitFortAwaitInstallFlag()) { return true; @@ -202,22 +171,48 @@ bool RegionDesc::VisitLiveObjectsUntilFalse(const std::function(position); if (collector.IsSurvivedObject(obj) && !func(obj)) { return false; } - position += RegionSpace::GetAllocSize(*obj); + position += RegionalHeap::GetAllocSize(*obj); } } return true; } +void RegionDesc::VisitRememberSetBeforeMarking(const std::function& func) +{ + if (IsLargeRegion() && IsJitFortAwaitInstallFlag()) { + // machine code which is not installed should skip here. + return; + } + uintptr_t end = std::min(GetMarkingLine(), GetRegionAllocPtr()); + GetRSet()->VisitAllMarkedCardBefore(func, GetRegionBaseFast(), end); +} + +void RegionDesc::VisitRememberSetBeforeCopy(const std::function& func) +{ + if (IsLargeRegion() && IsJitFortAwaitInstallFlag()) { + // machine code which is not installed should skip here. + return; + } + uintptr_t end = std::min(GetCopyLine(), GetRegionAllocPtr()); + GetRSet()->VisitAllMarkedCardBefore(func, GetRegionBaseFast(), end); +} + void RegionDesc::VisitRememberSet(const std::function& func) { - RegionRSet* rSet = GetRSet(); - if (IsLargeRegion()) { - if (rSet->IsMarkedCard(0)) { - func(reinterpret_cast(GetRegionStart())); - } + if (IsLargeRegion() && IsJitFortAwaitInstallFlag()) { + // machine code which is not installed should skip here. return; } - rSet->VisitAllMarkedCard(func, GetRegionStart()); + GetRSet()->VisitAllMarkedCardBefore(func, GetRegionBaseFast(), GetRegionAllocPtr()); +} + +void RegionList::MergeRegionListWithoutHead(RegionList& srcList, RegionDesc::RegionType regionType) +{ + RegionDesc *head = srcList.TakeHeadRegion(); + MergeRegionList(srcList, regionType); + if (head) { + srcList.PrependRegion(head, head->GetRegionType()); + } } void RegionList::MergeRegionList(RegionList& srcList, RegionDesc::RegionType regionType) @@ -246,24 +241,30 @@ static const char *RegionDescRegionTypeToString(RegionDesc::RegionType type) { static constexpr const char *enumStr[] = { [static_cast(RegionDesc::RegionType::FREE_REGION)] = "FREE_REGION", + [static_cast(RegionDesc::RegionType::GARBAGE_REGION)] = "GARBAGE_REGION", [static_cast(RegionDesc::RegionType::THREAD_LOCAL_REGION)] = "THREAD_LOCAL_REGION", + [static_cast(RegionDesc::RegionType::THREAD_LOCAL_OLD_REGION)] = "THREAD_LOCAL_OLD_REGION", [static_cast(RegionDesc::RegionType::RECENT_FULL_REGION)] = "RECENT_FULL_REGION", [static_cast(RegionDesc::RegionType::FROM_REGION)] = "FROM_REGION", [static_cast(RegionDesc::RegionType::LONE_FROM_REGION)] = "LONE_FROM_REGION", [static_cast(RegionDesc::RegionType::EXEMPTED_FROM_REGION)] = "EXEMPTED_FROM_REGION", [static_cast(RegionDesc::RegionType::TO_REGION)] = "TO_REGION", - [static_cast(RegionDesc::RegionType::FULL_PINNED_REGION)] = "FULL_PINNED_REGION", - [static_cast(RegionDesc::RegionType::RECENT_PINNED_REGION)] = "RECENT_PINNED_REGION", - [static_cast(RegionDesc::RegionType::FIXED_PINNED_REGION)] = "FIXED_PINNED_REGION", - [static_cast(RegionDesc::RegionType::FULL_FIXED_PINNED_REGION)] = "FULL_FIXED_PINNED_REGION", + [static_cast(RegionDesc::RegionType::OLD_REGION)] = "OLD_REGION", + [static_cast(RegionDesc::RegionType::FULL_POLYSIZE_NONMOVABLE_REGION)] = + "FULL_POLYSIZE_NONMOVABLE_REGION", + [static_cast(RegionDesc::RegionType::RECENT_POLYSIZE_NONMOVABLE_REGION)] = + "RECENT_POLYSIZE_NONMOVABLE_REGION", + [static_cast(RegionDesc::RegionType::MONOSIZE_NONMOVABLE_REGION)] = "MONOSIZE_NONMOVABLE_REGION", + [static_cast(RegionDesc::RegionType::FULL_MONOSIZE_NONMOVABLE_REGION)] = + "FULL_MONOSIZE_NONMOVABLE_REGION", [static_cast(RegionDesc::RegionType::RAW_POINTER_REGION)] = "RAW_POINTER_REGION", [static_cast(RegionDesc::RegionType::TL_RAW_POINTER_REGION)] = "TL_RAW_POINTER_REGION", [static_cast(RegionDesc::RegionType::RECENT_LARGE_REGION)] = "RECENT_LARGE_REGION", [static_cast(RegionDesc::RegionType::LARGE_REGION)] = "LARGE_REGION", - [static_cast(RegionDesc::RegionType::GARBAGE_REGION)] = "GARBAGE_REGION", [static_cast(RegionDesc::RegionType::READ_ONLY_REGION)] = "READ_ONLY_REGION", [static_cast(RegionDesc::RegionType::APPSPAWN_REGION)] = "APPSPAWN_REGION", }; + ASSERT_LOGF(type < RegionDesc::RegionType::END_OF_REGION_TYPE, "Invalid region type"); return enumStr[static_cast(type)]; } @@ -279,13 +280,14 @@ void RegionList::PrependRegionLocked(RegionDesc* region, RegionDesc::RegionType return; } - DLOG(REGION, "%s (%zu, %zu)+(%zu, %zu) prepend region %p@%#zx+%zu type %u->%u", listName_, - regionCount_, unitCount_, 1llu, region->GetUnitCount(), region, region->GetRegionStart(), - region->GetRegionAllocatedSize(), region->GetRegionType(), type); + DLOG(REGION, "%s (%zu, %zu)+(%zu, %zu) prepend region %p(base=%#zx)@%#zx+%zu type %u->%u", listName_, + regionCount_, unitCount_, 1llu, region->GetUnitCount(), region, region->GetRegionBase(), + region->GetRegionStart(), region->GetRegionAllocatedSize(), region->GetRegionType(), type); region->SetRegionType(type); - os::PrctlSetVMA(reinterpret_cast(region->GetRegionStart()), region->GetRegionAllocatedSize(), + size_t totalRegionSize = region->GetRegionEnd() - region->GetRegionBase(); + os::PrctlSetVMA(reinterpret_cast(region->GetRegionBase()), totalRegionSize, (std::string("ArkTS Heap CMCGC Region ") + RegionDescRegionTypeToString(type)).c_str()); region->SetPrevRegion(nullptr); @@ -310,9 +312,9 @@ void RegionList::DeleteRegionLocked(RegionDesc* del) del->SetNextRegion(nullptr); del->SetPrevRegion(nullptr); - DLOG(REGION, "%s (%zu, %zu)-(%zu, %zu) delete region %p@%#zx+%zu type %u", listName_, + DLOG(REGION, "%s (%zu, %zu)-(%zu, %zu) delete region %p(start=%p),@%#zx+%zu type %u", listName_, regionCount_, unitCount_, 1llu, del->GetUnitCount(), - del, del->GetRegionStart(), del->GetRegionAllocatedSize(), del->GetRegionType()); + del, del->GetRegionBase(), del->GetRegionStart(), del->GetRegionAllocatedSize(), del->GetRegionType()); DecCounts(1, del->GetUnitCount()); @@ -339,14 +341,20 @@ void RegionList::DeleteRegionLocked(RegionDesc* del) } } +void RegionList::DumpRegionSummary() const +{ + VLOG(DEBUG, "\t%s %zu: %zu units (%zu B, alloc %zu)", listName_, + regionCount_, unitCount_, GetAllocatedSize(true), GetAllocatedSize(false)); +} + #ifndef NDEBUG void RegionList::DumpRegionList(const char* msg) { DLOG(REGION, "dump region list %s", msg); std::lock_guard lock(listMutex_); for (RegionDesc *region = listHead_; region != nullptr; region = region->GetNextRegion()) { - DLOG(REGION, "region %p @0x%zx+%zu units [%zu+%zu, %zu) type %u prev %p next %p", region, - region->GetRegionStart(), region->GetRegionAllocatedSize(), + DLOG(REGION, "region %p @0x%zx(start=0x%zx)+%zu units [%zu+%zu, %zu) type %u prev %p next %p", region, + region->GetRegionBase(), region->GetRegionStart(), region->GetRegionAllocatedSize(), region->GetUnitIdx(), region->GetUnitCount(), region->GetUnitIdx() + region->GetUnitCount(), region->GetRegionType(), region->GetPrevRegion(), region->GetNextRegion()); } @@ -355,7 +363,7 @@ void RegionList::DumpRegionList(const char* msg) inline void RegionManager::TagHugePage(RegionDesc* region, size_t num) const { #if defined (__linux__) || defined(PANDA_TARGET_OHOS) - (void)madvise(reinterpret_cast(region->GetRegionStart()), num * RegionDesc::UNIT_SIZE, MADV_HUGEPAGE); + (void)madvise(reinterpret_cast(region->GetRegionBase()), num * RegionDesc::UNIT_SIZE, MADV_HUGEPAGE); #else (void)region; (void)num; @@ -365,7 +373,7 @@ inline void RegionManager::TagHugePage(RegionDesc* region, size_t num) const inline void RegionManager::UntagHugePage(RegionDesc* region, size_t num) const { #if defined (__linux__) || defined(PANDA_TARGET_OHOS) - (void)madvise(reinterpret_cast(region->GetRegionStart()), num * RegionDesc::UNIT_SIZE, MADV_NOHUGEPAGE); + (void)madvise(reinterpret_cast(region->GetRegionBase()), num * RegionDesc::UNIT_SIZE, MADV_NOHUGEPAGE); #else (void)region; (void)num; @@ -401,35 +409,28 @@ size_t FreeRegionManager::ReleaseGarbageRegions(size_t targetCachedSize) return releasedBytes; } -void RegionManager::SetMaxUnitCountForRegion() -{ - maxUnitCountPerRegion_ = BaseRuntime::GetInstance()->GetHeapParam().regionSize * KB / RegionDesc::UNIT_SIZE; -} - -void RegionManager::SetLargeObjectThreshold() -{ - size_t regionSize = BaseRuntime::GetInstance()->GetHeapParam().regionSize * KB; - if (regionSize < RegionDesc::LARGE_OBJECT_DEFAULT_THRESHOLD) { - largeObjectThreshold_ = regionSize; - } else { - largeObjectThreshold_ = RegionDesc::LARGE_OBJECT_DEFAULT_THRESHOLD; - } -} - void RegionManager::Initialize(size_t nRegion, uintptr_t regionInfoAddr) { size_t metadataSize = GetMetadataSize(nRegion); + size_t alignedHeapStart = RoundUp(regionInfoAddr + metadataSize, RegionDesc::UNIT_SIZE); + // align the start of region to 256KB + /*** + * |***********|<-metadataSize->|**********************| + * |**padding**|***RegionUnit***|*******Region*********| + * ^ ^ ^ + * | | | + * | reginInfoStart alignedHeapStart + * regionInfoAddr + */ + regionInfoStart_ = alignedHeapStart - metadataSize; + regionHeapStart_ = alignedHeapStart; #ifdef _WIN64 - MemoryMap::CommitMemory(reinterpret_cast(regionInfoAddr), metadataSize); + MemoryMap::CommitMemory(reinterpret_cast(regionInfoStart_), metadataSize); #endif - regionInfoStart_ = regionInfoAddr; - regionHeapStart_ = regionInfoAddr + metadataSize; regionHeapEnd_ = regionHeapStart_ + nRegion * RegionDesc::UNIT_SIZE; inactiveZone_ = regionHeapStart_; - SetMaxUnitCountForRegion(); - SetLargeObjectThreshold(); // propagate region heap layout - RegionDesc::Initialize(nRegion, regionInfoAddr, regionHeapStart_); + RegionDesc::Initialize(nRegion, regionInfoStart_, regionHeapStart_); freeRegionManager_.Initialize(nRegion); DLOG(REPORT, "region info @0x%zx+%zu, heap [0x%zx, 0x%zx), unit count %zu", regionInfoAddr, metadataSize, @@ -448,11 +449,11 @@ void RegionManager::ReclaimRegion(RegionDesc* region) { size_t num = region->GetUnitCount(); size_t unitIndex = region->GetUnitIdx(); - if (num >= HUGE_PAGE) { + if (num >= HUGE_PAGE_UNIT_NUM) { UntagHugePage(region, num); } - DLOG(REGION, "reclaim region %p@%#zx+%zu type %u", region, - region->GetRegionStart(), region->GetRegionAllocatedSize(), + DLOG(REGION, "reclaim region %p(base=%#zx)@%#zx+%zu type %u", region, + region->GetRegionBase(), region->GetRegionStart(), region->GetRegionAllocatedSize(), region->GetRegionType()); region->InitFreeUnits(); @@ -464,7 +465,7 @@ size_t RegionManager::ReleaseRegion(RegionDesc* region) size_t res = region->GetRegionSize(); size_t num = region->GetUnitCount(); size_t unitIndex = region->GetUnitIdx(); - if (num >= HUGE_PAGE) { + if (num >= HUGE_PAGE_UNIT_NUM) { UntagHugePage(region, num); } DLOG(REGION, "release region %p @%#zx+%zu type %u", region, region->GetRegionStart(), @@ -482,43 +483,12 @@ void RegionManager::CountLiveObject(const BaseObject* obj) region->AddLiveByteCount(obj->GetSize()); } -void RegionManager::AssembleLargeGarbageCandidates() -{ - largeRegionList_.MergeRegionList(recentLargeRegionList_, RegionDesc::RegionType::LARGE_REGION); -} - -void RegionManager::AssemblePinnedGarbageCandidates() -{ - pinnedRegionList_.MergeRegionList(recentPinnedRegionList_, RegionDesc::RegionType::FULL_PINNED_REGION); - RegionDesc* region = pinnedRegionList_.GetHeadRegion(); - for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { - fixedPinnedRegionList_[i]->MergeRegionList(*recentFixedPinnedRegionList_[i], - RegionDesc::RegionType::FULL_FIXED_PINNED_REGION); - } -} - -void RegionManager::ClearRSet() -{ - auto clearFunc = [](RegionDesc* region) { - region->ClearRSet(); - }; - recentPinnedRegionList_.VisitAllRegions(clearFunc); - pinnedRegionList_.VisitAllRegions(clearFunc); - recentLargeRegionList_.VisitAllRegions(clearFunc); - largeRegionList_.VisitAllRegions(clearFunc); - rawPointerRegionList_.VisitAllRegions(clearFunc); - appSpawnRegionList_.VisitAllRegions(clearFunc); - for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { - recentFixedPinnedRegionList_[i]->VisitAllRegions(clearFunc); - fixedPinnedRegionList_[i]->VisitAllRegions(clearFunc); - } -} - void RegionManager::ForEachObjectUnsafe(const std::function& visitor) const { for (uintptr_t regionAddr = regionHeapStart_; regionAddr < inactiveZone_;) { RegionDesc* region = RegionDesc::GetRegionDescAt(regionAddr); - regionAddr = region->GetRegionEnd(); + uintptr_t next = region->GetRegionEnd(); + regionAddr = next; if (!region->IsValidRegion() || region->IsFreeRegion() || region -> IsGarbageRegion()) { continue; } @@ -529,19 +499,13 @@ void RegionManager::ForEachObjectUnsafe(const std::function& void RegionManager::ForEachObjectSafe(const std::function& visitor) const { ScopedEnterSaferegion enterSaferegion(false); - ScopedStopTheWorld stw("visit-all-objects"); + STWParam stwParam{"visit-all-objects"}; + ScopedStopTheWorld stw(stwParam); ForEachObjectUnsafe(visitor); } -void RegionManager::ForEachAwaitingJitFortUnsafe(const std::function& visitor) const -{ - ASSERT(BaseRuntime::GetInstance()->GetMutatorManager().WorldStopped()); - for (const auto jitFort : awaitingJitFort_) { - visitor(jitFort); - } -} - -RegionDesc *RegionManager::TakeRegion(size_t num, RegionDesc::UnitRole type, bool expectPhysicalMem, bool allowGC) +RegionDesc *RegionManager::TakeRegion(size_t num, RegionDesc::UnitRole type, bool expectPhysicalMem, bool allowGC, + bool isCopy) { // a chance to invoke heuristic gc. if (allowGC && !Heap::GetHeap().IsGcStarted()) { @@ -565,7 +529,7 @@ RegionDesc *RegionManager::TakeRegion(size_t num, RegionDesc::UnitRole type, boo #endif RegionDesc::ClearUnits(idx, num); DLOG(REGION, "reuse garbage region %p@%#zx+%zu", head, head->GetRegionStart(), head->GetRegionSize()); - return RegionDesc::InitRegion(idx, num, type); + return RegionDesc::ResetRegion(idx, num, type); } else { DLOG(REGION, "reclaim garbage region %p@%#zx+%zu", head, head->GetRegionStart(), head->GetRegionSize()); ReclaimRegion(head); @@ -574,7 +538,7 @@ RegionDesc *RegionManager::TakeRegion(size_t num, RegionDesc::UnitRole type, boo RegionDesc* region = freeRegionManager_.TakeRegion(num, type, expectPhysicalMem); if (region != nullptr) { - if (num >= HUGE_PAGE) { + if (num >= HUGE_PAGE_UNIT_NUM) { TagHugePage(region, num); } return region; @@ -583,6 +547,20 @@ RegionDesc *RegionManager::TakeRegion(size_t num, RegionDesc::UnitRole type, boo // when free regions are not enough for allocation if (num <= GetInactiveUnitCount()) { uintptr_t addr = inactiveZone_.fetch_add(size); + +#ifndef PANDA_TARGET_32 + size_t totalHeapSize = regionHeapEnd_ - regionHeapStart_; + // 2: half space reserved for forward copy. throw oom when gc finish. + if (GetActiveSize() * 2 > totalHeapSize) { + if (!isCopy) { + (void)inactiveZone_.fetch_sub(size); + return nullptr; + } else { + Heap::GetHeap().SetForceThrowOOM(true); + } + } +#endif + if (addr < regionHeapEnd_ - size) { region = RegionDesc::InitRegionAt(addr, num, type); size_t idx = region->GetUnitIdx(); @@ -593,7 +571,7 @@ RegionDesc *RegionManager::TakeRegion(size_t num, RegionDesc::UnitRole type, boo (void)idx; // eliminate compilation warning DLOG(REGION, "take inactive units [%zu+%zu, %zu) at [0x%zx, 0x%zx)", idx, num, idx + num, RegionDesc::GetUnitAddress(idx), RegionDesc::GetUnitAddress(idx + num)); - if (num >= HUGE_PAGE) { + if (num >= HUGE_PAGE_UNIT_NUM) { TagHugePage(region, num); } if (expectPhysicalMem) { @@ -608,237 +586,6 @@ RegionDesc *RegionManager::TakeRegion(size_t num, RegionDesc::UnitRole type, boo return nullptr; } -static void FixRecentRegion(TraceCollector& collector, RegionDesc* region) -{ - // use fixline to skip new region after fix - // visit object before fix line to avoid race condition with mutator - region->VisitAllObjectsBeforeFix([&collector, region](BaseObject* object) { - if (region->IsNewObjectSinceForward(object)) { - // handle dead objects in tl-regions for concurrent gc. - if (collector.IsToVersion(object)) { - collector.FixObjectRefFields(object); - object->SetForwardState(BaseStateWord::ForwardState::NORMAL); - } else { - DLOG(FIX, "fix: skip new obj %p<%p>(%zu)", object, object->GetTypeInfo(), object->GetSize()); - } - } else if (region->IsNewObjectSinceTrace(object) || collector.IsSurvivedObject(object)) { - collector.FixObjectRefFields(object); - } else { // handle dead objects in tl-regions for concurrent gc. - FillFreeObject(object, RegionSpace::GetAllocSize(*object)); - DLOG(FIX, "skip dead obj %p<%p>(%zu)", object, object->GetTypeInfo(), object->GetSize()); - } - }); -} - -void RegionManager::FixRecentRegionList(TraceCollector& collector, RegionList& list) -{ - list.VisitAllRegions([&collector](RegionDesc* region) { - DLOG(REGION, "fix region %p@%#zx+%zu", region, region->GetRegionStart(), region->GetLiveByteCount()); - FixRecentRegion(collector, region); - }); -} - -static void FixToRegion(TraceCollector& collector, RegionDesc* region) -{ - region->VisitAllObjects([&collector](BaseObject* object) { - collector.FixObjectRefFields(object); - }); -} - -void RegionManager::FixToRegionList(TraceCollector& collector, RegionList& list) -{ - list.VisitAllRegions([&collector](RegionDesc* region) { - DLOG(REGION, "fix region %p@%#zx+%zu", region, region->GetRegionStart(), region->GetLiveByteCount()); - FixToRegion(collector, region); - }); -} - -void RegionManager::FixFixedRegionList(TraceCollector& collector, RegionList& list, size_t cellCount, GCStats& stats) -{ - size_t garbageSize = 0; - RegionDesc* region = list.GetHeadRegion(); - while (region != nullptr) { - auto liveBytes = region->GetLiveByteCount(); - if (liveBytes == 0) { - RegionDesc* del = region; - region = region->GetNextRegion(); - list.DeleteRegion(del); - - garbageSize += CollectRegion(del); - continue; - } - region->VisitAllObjectsWithFixedSize(cellCount, - [&collector, ®ion, &cellCount, &garbageSize](BaseObject* object) { - if (collector.IsSurvivedObject(object)) { - collector.FixObjectRefFields(object); - } else { - DLOG(ALLOC, "reclaim pinned obj %p", object); - garbageSize += (cellCount + 1) * sizeof(uint64_t); - region->CollectPinnedGarbage(object, cellCount); - } - }); - region->SetRegionAllocPtr(region->GetRegionEnd()); - region = region->GetNextRegion(); - } - stats.pinnedGarbageSize += garbageSize; -} - -static void FixRegion(TraceCollector& collector, RegionDesc* region) -{ - region->VisitAllObjects([&collector](BaseObject* object) { - if (collector.IsSurvivedObject(object)) { - collector.FixObjectRefFields(object); - } else { - FillFreeObject(object, RegionSpace::GetAllocSize(*object)); - DLOG(FIX, "fix: skip dead obj %p<%p>(%zu)", object, object->GetTypeInfo(), object->GetSize()); - } - }); -} - -void RegionManager::FixRegionList(TraceCollector& collector, RegionList& list) -{ - list.VisitAllRegions([&collector](RegionDesc* region) { - DLOG(REGION, "fix region %p@%#zx+%zu", region, region->GetRegionStart(), region->GetLiveByteCount()); - FixRegion(collector, region); - }); -} - -static void FixOldRegion(TraceCollector& collector, RegionDesc* region) -{ - region->VisitAllObjects([&collector, ®ion](BaseObject* object) { - if (region->IsNewObjectSinceTrace(object) || collector.IsSurvivedObject(object) || region->IsInRSet(object)) { - DLOG(FIX, "fix: mature obj %p<%p>(%zu)", object, object->GetTypeInfo(), object->GetSize()); - collector.FixObjectRefFields(object); - } - }); -} - -void RegionManager::FixOldRegionList(TraceCollector& collector, RegionList& list) -{ - list.VisitAllRegions([&collector](RegionDesc* region) { - DLOG(REGION, "fix mature region %p@%#zx+%zu", region, region->GetRegionStart(), region->GetLiveByteCount()); - FixOldRegion(collector, region); - }); -} - -static void FixRecentOldRegion(TraceCollector& collector, RegionDesc* region) -{ - region->VisitAllObjectsBeforeFix([&collector, ®ion](BaseObject* object) { - if (region->IsNewObjectSinceTrace(object) || collector.IsSurvivedObject(object) || region->IsInRSet(object)) { - DLOG(FIX, "fix: mature obj %p<%p>(%zu)", object, object->GetTypeInfo(), object->GetSize()); - collector.FixObjectRefFields(object); - } - }); -} - -void RegionManager::FixRecentOldRegionList(TraceCollector& collector, RegionList& list) -{ - list.VisitAllRegions([&collector](RegionDesc* region) { - DLOG(REGION, "fix mature region %p@%#zx+%zu", region, region->GetRegionStart(), region->GetLiveByteCount()); - FixRecentOldRegion(collector, region); - }); -} - -void RegionManager::FixPinnedRegionList(TraceCollector& collector, RegionList& list, GCStats& stats) -{ - size_t garbageSize = 0; - RegionDesc* region = list.GetHeadRegion(); - while (region != nullptr) { - if (region->GetLiveByteCount() == 0) { - RegionDesc* del = region; - region = region->GetNextRegion(); - list.DeleteRegion(del); - - garbageSize += CollectRegion(del); - continue; - } - DLOG(REGION, "fix region %p@%#zx+%zu", region, region->GetRegionStart(), region->GetLiveByteCount()); - FixRegion(collector, region); - region = region->GetNextRegion(); - } - stats.pinnedGarbageSize += garbageSize; -} - -void RegionManager::FixAllRegionLists() -{ - TraceCollector& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); - - // fix all objects. - if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { - FixOldRegionList(collector, largeRegionList_); - FixOldRegionList(collector, appSpawnRegionList_); - - // fix survived object but should be with line judgement. - FixRecentOldRegionList(collector, recentLargeRegionList_); - FixRecentOldRegionList(collector, recentPinnedRegionList_); - FixRecentOldRegionList(collector, rawPointerRegionList_); - FixOldRegionList(collector, pinnedRegionList_); - for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { - FixRecentOldRegionList(collector, *recentFixedPinnedRegionList_[i]); - FixOldRegionList(collector, *fixedPinnedRegionList_[i]); - } - return; - } - GCStats& stats = Heap::GetHeap().GetCollector().GetGCStats(); - // fix only survived objects. - FixRegionList(collector, largeRegionList_); - FixRegionList(collector, appSpawnRegionList_); - - // fix survived object but should be with line judgement. - FixRecentRegionList(collector, recentLargeRegionList_); - FixRecentRegionList(collector, recentPinnedRegionList_); - FixRecentRegionList(collector, rawPointerRegionList_); - FixPinnedRegionList(collector, pinnedRegionList_, stats); - for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { - FixRecentRegionList(collector, *recentFixedPinnedRegionList_[i]); - FixFixedRegionList(collector, *fixedPinnedRegionList_[i], i, stats); - } -} - -size_t RegionManager::CollectLargeGarbage() -{ - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::CollectLargeGarbage", ""); - size_t garbageSize = 0; - TraceCollector& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); - RegionDesc* region = largeRegionList_.GetHeadRegion(); - while (region != nullptr) { - HeapAddress addr = region->GetRegionStart(); - BaseObject *obj = reinterpret_cast(addr); - - if (region->IsJitFortAwaitInstallFlag()) { - region = region->GetNextRegion(); - continue; - } - if (!collector.IsSurvivedObject(obj) && !region->IsNewObjectSinceTrace(obj)) { - DLOG(REGION, "reclaim large region %p@0x%zx+%zu type %u", region, region->GetRegionStart(), - region->GetRegionAllocatedSize(), region->GetRegionType()); - - RegionDesc* del = region; - region = region->GetNextRegion(); - largeRegionList_.DeleteRegion(del); - if (IsMachineCodeObject(reinterpret_cast(obj))) { - JitFortUnProt(del->GetRegionSize(), reinterpret_cast(obj)); - } - if (del->GetRegionSize() > RegionDesc::LARGE_OBJECT_RELEASE_THRESHOLD) { - garbageSize += ReleaseRegion(del); - } else { - garbageSize += CollectRegion(del); - } - } else { - DLOG(REGION, "clear mark-bit for large region %p@0x%zx+%zu type %u", region, region->GetRegionStart(), - region->GetRegionAllocatedSize(), region->GetRegionType()); - region = region->GetNextRegion(); - } - } - - region = recentLargeRegionList_.GetHeadRegion(); - while (region != nullptr) { - region = region->GetNextRegion(); - } - - return garbageSize; -} - #if defined(GCINFO_DEBUG) && GCINFO_DEBUG void RegionManager::DumpRegionDesc() const { @@ -861,49 +608,13 @@ void RegionManager::DumpRegionStats() const size_t totalUnits = totalSize / RegionDesc::UNIT_SIZE; size_t activeSize = inactiveZone_ - regionHeapStart_; size_t activeUnits = activeSize / RegionDesc::UNIT_SIZE; - - size_t garbageRegions = garbageRegionList_.GetRegionCount(); - size_t garbageUnits = garbageRegionList_.GetUnitCount(); - size_t garbageSize = garbageUnits * RegionDesc::UNIT_SIZE; - size_t allocGarbageSize = garbageRegionList_.GetAllocatedSize(); - - size_t pinnedRegions = pinnedRegionList_.GetRegionCount(); - size_t pinnedUnits = pinnedRegionList_.GetUnitCount(); - size_t pinnedSize = pinnedUnits * RegionDesc::UNIT_SIZE; - size_t allocPinnedSize = pinnedRegionList_.GetAllocatedSize(); - - size_t recentPinnedRegions = recentPinnedRegionList_.GetRegionCount(); - size_t recentPinnedUnits = recentPinnedRegionList_.GetUnitCount(); - size_t recentPinnedSize = recentPinnedUnits * RegionDesc::UNIT_SIZE; - size_t allocRecentPinnedSize = recentPinnedRegionList_.GetAllocatedSize(); - - size_t largeRegions = largeRegionList_.GetRegionCount(); - size_t largeUnits = largeRegionList_.GetUnitCount(); - size_t largeSize = largeUnits * RegionDesc::UNIT_SIZE; - size_t allocLargeSize = largeRegionList_.GetAllocatedSize(); - - size_t recentlargeRegions = recentLargeRegionList_.GetRegionCount(); - size_t recentlargeUnits = recentLargeRegionList_.GetUnitCount(); - size_t recentLargeSize = recentlargeUnits * RegionDesc::UNIT_SIZE; - size_t allocRecentLargeSize = recentLargeRegionList_.GetAllocatedSize(); - - size_t releasedUnits = freeRegionManager_.GetReleasedUnitCount(); - size_t dirtyUnits = freeRegionManager_.GetDirtyUnitCount(); - VLOG(DEBUG, "\ttotal units: %zu (%zu B)", totalUnits, totalSize); VLOG(DEBUG, "\tactive units: %zu (%zu B)", activeUnits, activeSize); - VLOG(DEBUG, "\tgarbage regions %zu: %zu units (%zu B, alloc %zu)", - garbageRegions, garbageUnits, garbageSize, allocGarbageSize); - VLOG(DEBUG, "\tpinned regions %zu: %zu units (%zu B, alloc %zu)", - pinnedRegions, pinnedUnits, pinnedSize, allocPinnedSize); - VLOG(DEBUG, "\trecent pinned regions %zu: %zu units (%zu B, alloc %zu)", - recentPinnedRegions, recentPinnedUnits, recentPinnedSize, allocRecentPinnedSize); - VLOG(DEBUG, "\tlarge-object regions %zu: %zu units (%zu B, alloc %zu)", - largeRegions, largeUnits, largeSize, allocLargeSize); - VLOG(DEBUG, "\trecent large-object regions %zu: %zu units (%zu B, alloc %zu)", - recentlargeRegions, recentlargeUnits, recentLargeSize, allocRecentLargeSize); + garbageRegionList_.DumpRegionSummary(); + size_t releasedUnits = freeRegionManager_.GetReleasedUnitCount(); + size_t dirtyUnits = freeRegionManager_.GetDirtyUnitCount(); VLOG(DEBUG, "\treleased units: %zu (%zu B)", releasedUnits, releasedUnits * RegionDesc::UNIT_SIZE); VLOG(DEBUG, "\tdirty units: %zu (%zu B)", dirtyUnits, dirtyUnits * RegionDesc::UNIT_SIZE); } @@ -917,7 +628,7 @@ void RegionManager::RequestForRegion(size_t size) Heap& heap = Heap::GetHeap(); GCStats& gcstats = heap.GetCollector().GetGCStats(); - size_t allocatedBytes = GetAllocatedSize() - gcstats.liveBytesAfterGC; + size_t allocatedBytes = heap.GetAllocatedSize() - gcstats.liveBytesAfterGC; constexpr double pi = 3.14; size_t availableBytesAfterGC = heap.GetMaxCapacity() - gcstats.liveBytesAfterGC; double heuAllocRate = std::cos((pi / 2.0) * allocatedBytes / availableBytesAfterGC) * gcstats.collectionRate; @@ -939,89 +650,4 @@ void RegionManager::RequestForRegion(size_t size) std::this_thread::sleep_for(std::chrono::nanoseconds{ sleepTime }); prevRegionAllocTime_ = TimeUtil::NanoSeconds(); } - -uintptr_t RegionManager::AllocPinnedFromFreeList(size_t cellCount) -{ - GCPhase mutatorPhase = Mutator::GetMutator()->GetMutatorPhase(); - // workaround: make sure once fixline is set, newly allocated objects are after fixline - if (mutatorPhase == GC_PHASE_FIX || mutatorPhase == GC_PHASE_MARK) { - return 0; - } - - RegionList* list = fixedPinnedRegionList_[cellCount]; - std::lock_guard lock(list->GetListMutex()); - uintptr_t allocPtr = list->AllocFromFreeListInLock(); - // For making bitmap comform with live object count, do not mark object repeated. - if (allocPtr == 0 || mutatorPhase == GCPhase::GC_PHASE_IDLE) { - return allocPtr; - } - - // Mark new allocated pinned object. - BaseObject* object = reinterpret_cast(allocPtr); - (reinterpret_cast(&Heap::GetHeap().GetCollector()))->MarkObject(object, cellCount); - return allocPtr; -} - -uintptr_t RegionManager::AllocReadOnly(size_t size, bool allowGC) -{ - uintptr_t addr = 0; - std::mutex& regionListMutex = readOnlyRegionList_.GetListMutex(); - - std::lock_guard lock(regionListMutex); - RegionDesc* headRegion = readOnlyRegionList_.GetHeadRegion(); - if (headRegion != nullptr) { - addr = headRegion->Alloc(size); - } - if (addr == 0) { - RegionDesc* region = - TakeRegion(maxUnitCountPerRegion_, RegionDesc::UnitRole::SMALL_SIZED_UNITS, false, allowGC); - if (region == nullptr) { - return 0; - } - DLOG(REGION, "alloc read only region @0x%zx+%zu type %u", region->GetRegionStart(), - region->GetRegionAllocatedSize(), - region->GetRegionType()); - GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); - if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB || - phase == GC_PHASE_POST_MARK) { - region->SetTraceLine(); - } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY) { - region->SetTraceLine(); - region->SetCopyLine(); - } else if (phase == GC_PHASE_FIX) { - region->SetTraceLine(); - region->SetCopyLine(); - region->SetFixLine(); - } - - // To make sure the allocedSize are consistent, it must prepend region first then alloc object. - readOnlyRegionList_.PrependRegionLocked(region, RegionDesc::RegionType::READ_ONLY_REGION); - addr = region->Alloc(size); - } - - DLOG(ALLOC, "alloc read only obj 0x%zx(%zu)", addr, size); - return addr; -} - -void RegionManager::VisitRememberSet(const std::function& func) -{ - auto visitFunc = [&func](RegionDesc* region) { - region->VisitAllObjectsBeforeTrace([®ion, &func](BaseObject* obj) { - if (region->IsInRSet(obj)) { - func(obj); - } - }); - }; - recentPinnedRegionList_.VisitAllRegions(visitFunc); - pinnedRegionList_.VisitAllRegions(visitFunc); - recentLargeRegionList_.VisitAllRegions(visitFunc); - largeRegionList_.VisitAllRegions(visitFunc); - appSpawnRegionList_.VisitAllRegions(visitFunc); - rawPointerRegionList_.VisitAllRegions(visitFunc); - - for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { - recentFixedPinnedRegionList_[i]->VisitAllRegions(visitFunc); - fixedPinnedRegionList_[i]->VisitAllRegions(visitFunc); - } -} -} // namespace common +} \ No newline at end of file diff --git a/common_components/heap/allocator/region_manager.h b/common_components/heap/allocator/region_manager.h index 2a786da2a40e4bbeed293f2d2ac576b21695143e..3d8f044cfd24446d8a90b1eda27d881cae00a831 100755 --- a/common_components/heap/allocator/region_manager.h +++ b/common_components/heap/allocator/region_manager.h @@ -27,12 +27,12 @@ #include "common_components/heap/allocator/allocator.h" #include "common_components/heap/allocator/free_region_manager.h" #include "common_components/heap/allocator/region_list.h" +#include "common_components/heap/allocator/fix_heap.h" #include "common_components/heap/allocator/slot_list.h" +#include "common_components/common_runtime/hooks.h" namespace common { -using JitFortUnProtHookType = void (*)(size_t size, void* base); - -class TraceCollector; +class MarkingCollector; class CompactCollector; class RegionManager; class Taskpool; @@ -40,17 +40,18 @@ class Taskpool; // and thus its Alloc should be rewrite with AllocObj(objSize) class RegionManager { public: - constexpr static size_t FIXED_PINNED_REGION_COUNT = 128; - constexpr static size_t FIXED_PINNED_THRESHOLD = sizeof(uint64_t) * FIXED_PINNED_REGION_COUNT; /* region memory layout: - 1. region info for each region, part of heap metadata - 2. region space for allocation, i.e., the heap + 1. some paddings memory to aligned + 2. region info for each region, part of heap metadata + 3. region space for allocation, i.e., the heap --- start address is aligend to `RegionDesc::UNIT_SIZE` */ static size_t GetHeapMemorySize(size_t heapSize) { size_t regionNum = GetHeapUnitCount(heapSize); size_t metadataSize = GetMetadataSize(regionNum); - size_t totalSize = metadataSize + RoundUp(heapSize, RegionDesc::UNIT_SIZE); + // Add one more `RegionDesc::UNIT_SIZE` totalSize, because we need the region address is aligned to + // `RegionDesc::UNIT_SIZE`, this need some paddings + size_t totalSize = metadataSize + RoundUp(heapSize, RegionDesc::UNIT_SIZE) + RegionDesc::UNIT_SIZE; return totalSize; } @@ -67,37 +68,15 @@ public: return RoundUp(metadataSize, COMMON_PAGE_SIZE); } - static void FixRegionList(TraceCollector& collector, RegionList& list); - static void FixRecentRegionList(TraceCollector& collector, RegionList& list); - static void FixToRegionList(TraceCollector& collector, RegionList& list); - static void FixOldRegionList(TraceCollector& collector, RegionList& list); - static void FixRecentOldRegionList(TraceCollector& collector, RegionList& list); - void Initialize(size_t regionNum, uintptr_t regionInfoStart); RegionManager() - : freeRegionManager_(*this), garbageRegionList_("garbage regions"), - pinnedRegionList_("pinned regions"), recentPinnedRegionList_("recent pinned regions"), - rawPointerRegionList_("raw pointer pinned regions"), largeRegionList_("large regions"), - recentLargeRegionList_("recent large regions"), readOnlyRegionList_("read only region"), - largeTraceRegions_("large trace regions"), appSpawnRegionList_("appSpawn regions") - { - for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { - recentFixedPinnedRegionList_[i] = new RegionList("fixed recent pinned regions"); - fixedPinnedRegionList_[i] = new RegionList("fixed pinned regions"); - } - } + : freeRegionManager_(*this), garbageRegionList_("garbage regions") {} RegionManager(const RegionManager&) = delete; RegionManager& operator=(const RegionManager&) = delete; - void FixAllRegionLists(); - void FixPinnedRegionList(TraceCollector& collector, RegionList& list, GCStats& stats); - void FixFixedRegionList(TraceCollector& collector, RegionList& list, size_t cellCount, GCStats& stats); - - using RootSet = MarkStack; - #if defined(GCINFO_DEBUG) && GCINFO_DEBUG void DumpRegionDesc() const; #endif @@ -116,193 +95,38 @@ public: return nullptr; } - ~RegionManager() - { - for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { - if (recentFixedPinnedRegionList_[i] != nullptr) { - delete recentFixedPinnedRegionList_[i]; - recentFixedPinnedRegionList_[i] = nullptr; - } - if (fixedPinnedRegionList_[i] != nullptr) { - delete fixedPinnedRegionList_[i]; - fixedPinnedRegionList_[i] = nullptr; - } - } - } - // take a region with *num* units for allocation - RegionDesc* TakeRegion(size_t num, RegionDesc::UnitRole, bool expectPhysicalMem = false, bool allowgc = true); + ~RegionManager() {} - RegionDesc* TakeRegion(bool expectPhysicalMem, bool allowgc) - { - return TakeRegion(maxUnitCountPerRegion_, RegionDesc::UnitRole::SMALL_SIZED_UNITS, expectPhysicalMem, allowgc); - } - - void AddRecentPinnedRegion(RegionDesc* region) - { - recentPinnedRegionList_.PrependRegion(region, RegionDesc::RegionType::RECENT_PINNED_REGION); - } - - uintptr_t AllocPinnedFromFreeList(size_t size); - - uintptr_t AllocReadOnly(size_t size, bool allowGC = true); - - uintptr_t AllocPinned(size_t size, bool allowGC = true) - { - uintptr_t addr = 0; - if (!allowGC || size > FIXED_PINNED_THRESHOLD) { - DLOG(ALLOC, "alloc pinned obj 0x%zx(%zu)", addr, size); - return AllocNextFitPinned(size); - } - CHECK_CC(size % sizeof(uint64_t) == 0); - size_t cellCount = size / sizeof(uint64_t) - 1; - RegionList* list = recentFixedPinnedRegionList_[cellCount]; - std::mutex& listMutex = list->GetListMutex(); - listMutex.lock(); - RegionDesc* headRegion = list->GetHeadRegion(); - if (headRegion != nullptr) { - addr = headRegion->Alloc(size); - } - if (addr == 0) { - addr = AllocPinnedFromFreeList(cellCount); - } - if (addr == 0) { - RegionDesc* region = - TakeRegion(maxUnitCountPerRegion_, RegionDesc::UnitRole::SMALL_SIZED_UNITS, false, allowGC); - if (region == nullptr) { - listMutex.unlock(); - return 0; - } - DLOG(REGION, "alloc pinned region @0x%zx+%zu type %u", region->GetRegionStart(), - region->GetRegionAllocatedSize(), - region->GetRegionType()); - ASSERT(cellCount == static_cast(static_cast(cellCount))); - region->SetRegionCellCount(static_cast(cellCount)); - GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); - if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || - phase == GC_PHASE_REMARK_SATB || phase == GC_PHASE_POST_MARK) { - region->SetTraceLine(); - } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY) { - region->SetTraceLine(); - region->SetCopyLine(); - } else if (phase == GC_PHASE_FIX) { - region->SetTraceLine(); - region->SetCopyLine(); - region->SetFixLine(); - } - // To make sure the allocedSize are consistent, it must prepend region first then alloc object. - list->PrependRegionLocked(region, RegionDesc::RegionType::FIXED_PINNED_REGION); - addr = region->Alloc(size); - } - DLOG(ALLOC, "alloc pinned obj 0x%zx(%zu)", addr, size); - listMutex.unlock(); - return addr; - } - - uintptr_t AllocNextFitPinned(size_t size, bool allowGC = true) - { - uintptr_t addr = 0; - std::mutex& regionListMutex = recentPinnedRegionList_.GetListMutex(); - - std::lock_guard lock(regionListMutex); - RegionDesc* headRegion = recentPinnedRegionList_.GetHeadRegion(); - if (headRegion != nullptr) { - addr = headRegion->Alloc(size); - } - if (addr == 0) { - RegionDesc* region = - TakeRegion(maxUnitCountPerRegion_, RegionDesc::UnitRole::SMALL_SIZED_UNITS, false, allowGC); - if (region == nullptr) { - return 0; - } - DLOG(REGION, "alloc pinned region @0x%zx+%zu type %u", region->GetRegionStart(), - region->GetRegionAllocatedSize(), - region->GetRegionType()); - GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); - if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB || - phase == GC_PHASE_POST_MARK) { - region->SetTraceLine(); - } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY) { - region->SetTraceLine(); - region->SetCopyLine(); - } else if (phase == GC_PHASE_FIX) { - region->SetTraceLine(); - region->SetCopyLine(); - region->SetFixLine(); - } - - // To make sure the allocedSize are consistent, it must prepend region first then alloc object. - recentPinnedRegionList_.PrependRegionLocked(region, RegionDesc::RegionType::RECENT_PINNED_REGION); - addr = region->Alloc(size); - } - - DLOG(ALLOC, "alloc pinned obj 0x%zx(%zu)", addr, size); - return addr; - } + // take a region with *num* units for allocation + RegionDesc* TakeRegion(size_t num, RegionDesc::UnitRole, bool expectPhysicalMem = false, bool allowgc = true, + bool isCopy = false); - // note: AllocSmall() is always performed by region owned by mutator thread - // thus no need to do in RegionManager - // caller assures size is truely large (> region size) - uintptr_t AllocLarge(size_t size, bool allowGC = true) + RegionDesc* TakeRegion(bool expectPhysicalMem, bool allowgc, bool isCopy = false) { - size_t regionCount = (size + RegionDesc::UNIT_SIZE - 1) / RegionDesc::UNIT_SIZE; - RegionDesc* region = TakeRegion(regionCount, RegionDesc::UnitRole::LARGE_SIZED_UNITS, false, allowGC); - if (region == nullptr) { - return 0; - } - GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); - if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB || - phase == GC_PHASE_POST_MARK) { - region->SetTraceLine(); - } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY) { - region->SetTraceLine(); - region->SetCopyLine(); - } else if (phase == GC_PHASE_FIX) { - region->SetTraceLine(); - region->SetCopyLine(); - region->SetFixLine(); - } - - DLOG(REGION, "alloc large region @0x%zx+%zu type %u", region->GetRegionStart(), - region->GetRegionSize(), region->GetRegionType()); - uintptr_t addr = region->Alloc(size); - recentLargeRegionList_.PrependRegion(region, RegionDesc::RegionType::RECENT_LARGE_REGION); - return addr; + return TakeRegion(1, RegionDesc::UnitRole::SMALL_SIZED_UNITS, expectPhysicalMem, allowgc, isCopy); } void CountLiveObject(const BaseObject* obj); - void AssembleLargeGarbageCandidates(); - void AssemblePinnedGarbageCandidates(); - - void ReassembleAppspawnSpace(RegionList& regionList) - { - appSpawnRegionList_.MergeRegionList(regionList, RegionDesc::RegionType::APPSPAWN_REGION); - } - void CollectFromSpaceGarbage(RegionList& fromList) { garbageRegionList_.MergeRegionList(fromList, RegionDesc::RegionType::GARBAGE_REGION); } - void AddRawPointerRegion(RegionDesc* region) - { - rawPointerRegionList_.PrependRegion(region, RegionDesc::RegionType::RAW_POINTER_REGION); - } - size_t CollectRegion(RegionDesc* region) { DLOG(REGION, "collect region %p@%#zx+%zu type %u", region, region->GetRegionStart(), region->GetLiveByteCount(), region->GetRegionType()); - garbageRegionList_.PrependRegion(region, RegionDesc::RegionType::GARBAGE_REGION); #ifdef USE_HWASAN - ASAN_POISON_MEMORY_REGION(reinterpret_cast(region->GetRegionStart()), - region->GetRegionSize()); - const uintptr_t p_addr = region->GetRegionStart(); - const uintptr_t p_size = region->GetRegionSize(); + ASAN_POISON_MEMORY_REGION(reinterpret_cast(region->GetRegionBase()), + region->GetRegionBaseSize()); + const uintptr_t p_addr = region->GetRegionBase(); + const uintptr_t p_size = region->GetRegionBaseSize(); LOG_COMMON(DEBUG) << std::hex << "set [" << p_addr << std::hex << ", " << p_addr + p_size << ") poisoned\n"; #endif + garbageRegionList_.PrependRegion(region, RegionDesc::RegionType::GARBAGE_REGION); if (region->IsLargeRegion()) { return region->GetRegionSize(); } else { @@ -322,62 +146,16 @@ public: } } - size_t CollectLargeGarbage(); - // targetSize: size of memory which we do not release and keep it as cache for future allocation. size_t ReleaseGarbageRegions(size_t targetSize) { return freeRegionManager_.ReleaseGarbageRegions(targetSize); } void ForEachObjectUnsafe(const std::function& visitor) const; void ForEachObjectSafe(const std::function& visitor) const; - void ForEachAwaitingJitFortUnsafe(const std::function& visitor) const; - - size_t GetUsedRegionSize() const { return GetUsedUnitCount() * RegionDesc::UNIT_SIZE; } - - size_t GetRecentAllocatedSize() const - { - return recentLargeRegionList_.GetAllocatedSize() + recentPinnedRegionList_.GetAllocatedSize(); - } - - size_t GetSurvivedSize() const - { - return pinnedRegionList_.GetAllocatedSize() + largeRegionList_.GetAllocatedSize(); - } - - size_t GetUsedUnitCount() const - { - return largeRegionList_.GetUnitCount() + recentLargeRegionList_.GetUnitCount() + - pinnedRegionList_.GetUnitCount() + recentPinnedRegionList_.GetUnitCount() + - rawPointerRegionList_.GetUnitCount() + readOnlyRegionList_.GetUnitCount() + - largeTraceRegions_.GetUnitCount() + appSpawnRegionList_.GetUnitCount(); - } size_t GetDirtyUnitCount() const { return freeRegionManager_.GetDirtyUnitCount(); } size_t GetInactiveUnitCount() const { return (regionHeapEnd_ - inactiveZone_) / RegionDesc::UNIT_SIZE; } - - inline size_t GetLargeObjectSize() const - { - return largeRegionList_.GetAllocatedSize() + recentLargeRegionList_.GetAllocatedSize(); - } - - size_t GetAllocatedSize() const - { - return largeRegionList_.GetAllocatedSize() + recentLargeRegionList_.GetAllocatedSize() + - pinnedRegionList_.GetAllocatedSize() + recentPinnedRegionList_.GetAllocatedSize() + - rawPointerRegionList_.GetAllocatedSize() + readOnlyRegionList_.GetAllocatedSize() + - largeTraceRegions_.GetAllocatedSize() + appSpawnRegionList_.GetAllocatedSize(); - } - - inline size_t GetPinnedSpaceSize() const - { - size_t pinnedSpaceSize = - pinnedRegionList_.GetAllocatedSize() + recentPinnedRegionList_.GetAllocatedSize(); - for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { - pinnedSpaceSize += recentFixedPinnedRegionList_[i]->GetAllocatedSize(); - pinnedSpaceSize += fixedPinnedRegionList_[i]->GetAllocatedSize(); - } - return pinnedSpaceSize; - } + size_t GetActiveSize() const { return inactiveZone_ - regionHeapStart_; } RegionDesc* GetNextNeighborRegion(RegionDesc* region) const { @@ -388,134 +166,11 @@ public: return nullptr; } - size_t GetLargeObjectThreshold() const { return largeObjectThreshold_; } - // this method checks whether allocation is permitted for now, otherwise, it is suspened // until allocation does no harm to gc. void RequestForRegion(size_t size); - void SetMaxUnitCountForRegion(); - void SetLargeObjectThreshold(); - - void PrepareTrace() - { - AllocBufferVisitor visitor = [](AllocationBuffer& regionBuffer) { - RegionDesc* region = regionBuffer.GetRegion(); - if (region != RegionDesc::NullRegion()) { - region->SetTraceLine(); - } - }; - Heap::GetHeap().GetAllocator().VisitAllocBuffers(visitor); - - RegionDesc* pinRegion = recentPinnedRegionList_.GetHeadRegion(); - if (pinRegion != nullptr && pinRegion != RegionDesc::NullRegion()) { - pinRegion->SetTraceLine(); - } - - RegionDesc* readOnlyRegion = readOnlyRegionList_.GetHeadRegion(); - if (readOnlyRegion != nullptr && readOnlyRegion != RegionDesc::NullRegion()) { - readOnlyRegion->SetTraceLine(); - } - - for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { - RegionDesc* region = recentFixedPinnedRegionList_[i]->GetHeadRegion(); - if (region != nullptr && region != RegionDesc::NullRegion()) { - region->SetTraceLine(); - } - } - } - - void PrepareForward() - { - AllocBufferVisitor visitor = [](AllocationBuffer& regionBuffer) { - RegionDesc* region = regionBuffer.GetRegion(); - if (region != RegionDesc::NullRegion()) { - region->SetCopyLine(); - } - }; - Heap::GetHeap().GetAllocator().VisitAllocBuffers(visitor); - } - - void PrepareFix() - { - AllocBufferVisitor visitor = [](AllocationBuffer& regionBuffer) { - RegionDesc* region = regionBuffer.GetRegion(); - if (region != RegionDesc::NullRegion()) { - region->SetFixLine(); - } - }; - Heap::GetHeap().GetAllocator().VisitAllocBuffers(visitor); - } - - void PrepareFixForPin() - { - RegionDesc* region = recentPinnedRegionList_.GetHeadRegion(); - if (region != nullptr && region != RegionDesc::NullRegion()) { - region->SetFixLine(); - } - for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { - RegionDesc* region = recentFixedPinnedRegionList_[i]->GetHeadRegion(); - if (region != nullptr && region != RegionDesc::NullRegion()) { - region->SetFixLine(); - } - } - } - - void ClearAllGCInfo() - { - ClearGCInfo(largeRegionList_); - ClearGCInfo(recentLargeRegionList_); - ClearGCInfo(recentPinnedRegionList_); - ClearGCInfo(rawPointerRegionList_); - ClearGCInfo(pinnedRegionList_); - ClearGCInfo(readOnlyRegionList_); - ClearGCInfo(appSpawnRegionList_); - for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { - ClearGCInfo(*recentFixedPinnedRegionList_[i]); - ClearGCInfo(*fixedPinnedRegionList_[i]); - } - } - - void SetReadOnlyToRORegionList() - { - auto visitor = [](RegionDesc* region) { - if (region != nullptr) { - region->SetReadOnly(); - } - }; - readOnlyRegionList_.VisitAllRegions(visitor); - } - - void ClearReadOnlyFromRORegionList() - { - auto visitor = [](RegionDesc* region) { - if (region != nullptr) { - region->ClearReadOnly(); - } - }; - readOnlyRegionList_.VisitAllRegions(visitor); - } - - void VisitRememberSet(const std::function& func); - void ClearRSet(); - - void MarkJitFortMemInstalled(BaseObject *obj) - { - std::lock_guard guard(awaitingJitFortMutex_); - RegionDesc::GetRegionDescAt(reinterpret_cast(obj))->SetJitFortAwaitInstallFlag(false); - awaitingJitFort_.erase(obj); - } - - void MarkJitFortMemAwaitingInstall(BaseObject *obj) - { - std::lock_guard guard(awaitingJitFortMutex_); - RegionDesc::GetRegionDescAt(reinterpret_cast(obj))->SetJitFortAwaitInstallFlag(true); - awaitingJitFort_.insert(obj); - } - private: - static const size_t MAX_UNIT_COUNT_PER_REGION; - static const size_t HUGE_PAGE; inline void TagHugePage(RegionDesc* region, size_t num) const; inline void UntagHugePage(RegionDesc* region, size_t num) const; @@ -524,7 +179,7 @@ private: RegionList tmp("temp region list"); list.CopyListTo(tmp); tmp.VisitAllRegions([](RegionDesc* region) { - region->ClearTraceCopyFixLine(); + region->ClearMarkingCopyLine(); region->ClearLiveInfo(); region->ResetMarkBit(); }); @@ -532,44 +187,9 @@ private: FreeRegionManager freeRegionManager_; - // region lists actually represent life cycle of regions. - // each region must belong to only one list at any time. - // cache for fromRegionList after forwarding. RegionList garbageRegionList_; - // regions for small-sized object which is not movable. - RegionList pinnedRegionList_; - RegionList* fixedPinnedRegionList_[FIXED_PINNED_REGION_COUNT]; - - // regions which allocated since last GC beginning. - // record pinned regions in here first and move those regions - // to pinned/fixedPinned RegionList when gc starts. - RegionList recentPinnedRegionList_; - RegionList* recentFixedPinnedRegionList_[FIXED_PINNED_REGION_COUNT]; - - // region lists for small-sized raw-pointer objects (i.e. future, monitor) - // which can not be moved ever (even during compaction). - RegionList rawPointerRegionList_; // delete rawPointerRegion, use PinnedRegion - - // regions for large-sized objects. - // large region is recorded here after large object is allocated. - RegionList largeRegionList_; - - // large regions which allocated since last GC beginning. - // record pinned regions in here first and move those when gc starts. - RegionList recentLargeRegionList_; - - // regions for read only objects - RegionList readOnlyRegionList_; - - // if large region is allocated during gc trace phase, it is called a trace-region, - // it is recorded here when it is full. - RegionCache largeTraceRegions_; - - // regions for appspawn region list. - RegionList appSpawnRegionList_; - uintptr_t regionInfoStart_ = 0; // the address of first RegionDesc uintptr_t regionHeapStart_ = 0; // the address of first region to allocate object @@ -580,12 +200,6 @@ private: // heap space not allocated yet for even once. this value should not be decreased. std::atomic inactiveZone_ = { 0 }; - size_t maxUnitCountPerRegion_ = MAX_UNIT_COUNT_PER_REGION; // max units count for threadLocal buffer. - size_t largeObjectThreshold_; - // Awaiting JitFort object has no references from other objects, - // but we need to keep them as live untill jit compilation has finished installing. - std::set awaitingJitFort_; - std::mutex awaitingJitFortMutex_; friend class VerifyIterator; }; diff --git a/common_components/heap/allocator/region_space.cpp b/common_components/heap/allocator/regional_heap.cpp similarity index 55% rename from common_components/heap/allocator/region_space.cpp rename to common_components/heap/allocator/regional_heap.cpp index 7b6f10dea213ee0773bca3bb480bc5540a4f799d..7ccb1bc526ffefd2804ff9d6244bee40ce198f13 100755 --- a/common_components/heap/allocator/region_space.cpp +++ b/common_components/heap/allocator/regional_heap.cpp @@ -13,7 +13,7 @@ * limitations under the License. */ -#include "common_components/heap/allocator/region_space.h" +#include "common_components/heap/allocator/regional_heap.h" #include "common_components/heap/collector/collector.h" #include "common_components/heap/collector/collector_resources.h" @@ -25,74 +25,76 @@ #include "common_components/heap/heap.h" namespace common { -RegionDesc* RegionSpace::AllocateThreadLocalRegion(bool expectPhysicalMem) +template +RegionDesc* RegionalHeap::AllocateThreadLocalRegion(bool expectPhysicalMem) { - RegionDesc* region = regionManager_.TakeRegion(expectPhysicalMem, true); - if (region != nullptr) { - if (IsGcThread()) { - toSpace_.AddThreadLocalRegion(region); - DLOG(REGION, "alloc to-region %p @0x%zx+%zu units[%zu+%zu, %zu) type %u", - region, region->GetRegionStart(), region->GetRegionSize(), - region->GetUnitIdx(), region->GetUnitCount(), region->GetUnitIdx() + region->GetUnitCount(), - region->GetRegionType()); - } else { - GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); - if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB || - phase == GC_PHASE_POST_MARK) { - region->SetTraceLine(); - } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY) { - region->SetTraceLine(); - region->SetCopyLine(); - } else if (phase == GC_PHASE_FIX) { - region->SetTraceLine(); - region->SetCopyLine(); - region->SetFixLine(); - } - - youngSpace_.AddThreadLocalRegion(region); - DLOG(REGION, "alloc tl-region %p @0x%zx+%zu units[%zu+%zu, %zu) type %u, gc phase: %u", - region, region->GetRegionStart(), region->GetRegionSize(), - region->GetUnitIdx(), region->GetUnitCount(), region->GetUnitIdx() + region->GetUnitCount(), - region->GetRegionType(), phase); - } + if constexpr (type == AllocBufferType::TO) { + return toSpace_.AllocateThreadLocalRegion(expectPhysicalMem); + } else if constexpr (type == AllocBufferType::YOUNG) { + return youngSpace_.AllocateThreadLocalRegion(expectPhysicalMem); + } else if constexpr (type == AllocBufferType::OLD) { + return oldSpace_.AllocateThreadLocalRegion(expectPhysicalMem); } +} - return region; +// used to dump a brief summary of all regions. +void RegionalHeap::DumpAllRegionSummary(const char* msg) const +{ + auto from = fromSpace_.GetAllocatedSize(); + auto exempt = fromSpace_.GetSurvivedSize(); + auto to = toSpace_.GetAllocatedSize(); + auto young = youngSpace_.GetAllocatedSize(); + auto old = oldSpace_.GetAllocatedSize(); + auto other = nonMovableSpace_.GetAllocatedSize() + largeSpace_.GetAllocatedSize() + + appSpawnSpace_.GetAllocatedSize() + readonlySpace_.GetAllocatedSize() + + rawpointerSpace_.GetAllocatedSize(); + + std::ostringstream oss; + oss << msg << "Current allocated: " << Pretty(from + to + young + old + other) << ". (from: " << Pretty(from) + << "(exempt: " << Pretty(exempt) << "), to: " << Pretty(to) << ", young: " << Pretty(young) + << ", old: " << Pretty(old) << ", other: " << Pretty(other) << ")"; + VLOG(DEBUG, oss.str().c_str()); } -void RegionSpace::DumpAllRegionStats(const char* msg) const +// used to dump a detailed information of all regions. +void RegionalHeap::DumpAllRegionStats(const char* msg) const { VLOG(DEBUG, msg); - youngSpace_.DumpRegionStats(); oldSpace_.DumpRegionStats(); fromSpace_.DumpRegionStats(); toSpace_.DumpRegionStats(); + nonMovableSpace_.DumpRegionStats(); + largeSpace_.DumpRegionStats(); + appSpawnSpace_.DumpRegionStats(); + rawpointerSpace_.DumpRegionStats(); + readonlySpace_.DumpRegionStats(); + regionManager_.DumpRegionStats(); size_t usedUnits = GetUsedUnitCount(); VLOG(DEBUG, "\tused units: %zu (%zu B)", usedUnits, usedUnits * RegionDesc::UNIT_SIZE); } -HeapAddress RegionSpace::TryAllocateOnce(size_t allocSize, AllocType allocType) +HeapAddress RegionalHeap::TryAllocateOnce(size_t allocSize, AllocType allocType) { - if (UNLIKELY_CC(allocType == AllocType::PINNED_OBJECT)) { - return regionManager_.AllocPinned(allocSize); - } if (UNLIKELY_CC(allocType == AllocType::READ_ONLY_OBJECT)) { - return regionManager_.AllocReadOnly(allocSize); + return readonlySpace_.Alloc(allocSize); } - if (UNLIKELY_CC(allocSize >= regionManager_.GetLargeObjectThreshold())) { - return regionManager_.AllocLarge(allocSize); + if (UNLIKELY_CC(allocSize >= RegionDesc::LARGE_OBJECT_DEFAULT_THRESHOLD)) { + return largeSpace_.Alloc(allocSize); + } + if (UNLIKELY_CC(allocType == AllocType::NONMOVABLE_OBJECT)) { + return nonMovableSpace_.Alloc(allocSize); } AllocationBuffer* allocBuffer = AllocationBuffer::GetOrCreateAllocBuffer(); return allocBuffer->Allocate(allocSize, allocType); } -bool RegionSpace::ShouldRetryAllocation(size_t& tryTimes) const +bool RegionalHeap::ShouldRetryAllocation(size_t& tryTimes) const { { - // check STW request. + // check safepoint ScopedEnterSaferegion enterSaferegion(true); } @@ -103,100 +105,49 @@ bool RegionSpace::ShouldRetryAllocation(size_t& tryTimes) const ScopedEnterSaferegion enterSaferegion(false); Heap::GetHeap().GetCollectorResources().WaitForGCFinish(); } else { - Heap::GetHeap().GetCollector().RequestGC(GC_REASON_HEU, false); + Heap::GetHeap().GetCollector().RequestGC(GC_REASON_HEU, false, GC_TYPE_FULL); } return true; } else if (tryTimes == static_cast(TryAllocationThreshold::TRIGGER_OOM)) { if (!Heap::GetHeap().IsGcStarted()) { VLOG(INFO, "gc is triggered for OOM"); - Heap::GetHeap().GetCollector().RequestGC(GC_REASON_OOM, false); + Heap::GetHeap().GetCollector().RequestGC(GC_REASON_OOM, false, GC_TYPE_FULL); } else { ScopedEnterSaferegion enterSaferegion(false); Heap::GetHeap().GetCollectorResources().WaitForGCFinish(); tryTimes--; } return true; - } else { + } else { //LCOV_EXCL_BR_LINE Heap::throwOOM(); return false; } } -uintptr_t RegionSpace::AllocRegion() +uintptr_t RegionalHeap::AllocOldRegion() { - RegionDesc* region = regionManager_.TakeRegion(false, false); - ASSERT(region != nullptr); - - GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); - if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB || - phase == GC_PHASE_POST_MARK) { - region->SetTraceLine(); - } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY) { - region->SetTraceLine(); - region->SetCopyLine(); - } else if (phase == GC_PHASE_FIX) { - region->SetTraceLine(); - region->SetCopyLine(); - region->SetFixLine(); - } - - DLOG(REGION, "alloc small object region %p @0x%zx+%zu units[%zu+%zu, %zu) type %u", - region, region->GetRegionStart(), region->GetRegionSize(), - region->GetUnitIdx(), region->GetUnitCount(), region->GetUnitIdx() + region->GetUnitCount(), - region->GetRegionType()); - youngSpace_.AddFullRegion(region); - - uintptr_t start = region->GetRegionStart(); - uintptr_t addr = region->Alloc(region->GetRegionSize()); - ASSERT(addr != 0); - - return start; + return oldSpace_.AllocFullRegion(); } -uintptr_t RegionSpace::AllocPinnedRegion() +uintptr_t RegionalHeap::AllocateNonMovableRegion() { - RegionDesc* region = regionManager_.TakeRegion(false, false); - ASSERT(region != nullptr); - - GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); - if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB || - phase == GC_PHASE_POST_MARK) { - region->SetTraceLine(); - } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY) { - region->SetTraceLine(); - region->SetCopyLine(); - } else if (phase == GC_PHASE_FIX) { - region->SetTraceLine(); - region->SetCopyLine(); - region->SetFixLine(); - } - - DLOG(REGION, "alloc pinned region @0x%zx+%zu type %u", region->GetRegionStart(), - region->GetRegionAllocatedSize(), - region->GetRegionType()); - regionManager_.AddRecentPinnedRegion(region); - - uintptr_t start = region->GetRegionStart(); - uintptr_t addr = region->Alloc(region->GetRegionSize()); - ASSERT(addr != 0); - - return start; + return nonMovableSpace_.AllocFullRegion(); } -uintptr_t RegionSpace::AllocLargeRegion(size_t size) +uintptr_t RegionalHeap::AllocLargeRegion(size_t size) { - return regionManager_.AllocLarge(size, false); + return largeSpace_.Alloc(size, false); } -uintptr_t RegionSpace::AllocJitFortRegion(size_t size) +uintptr_t RegionalHeap::AllocJitFortRegion(size_t size) { - uintptr_t addr = regionManager_.AllocLarge(size, false); + uintptr_t addr = largeSpace_.Alloc(size, false); os::PrctlSetVMA(reinterpret_cast(addr), size, "ArkTS Code"); - regionManager_.MarkJitFortMemAwaitingInstall(reinterpret_cast(addr)); + MarkJitFortMemAwaitingInstall(reinterpret_cast(addr)); return addr; } -HeapAddress RegionSpace::Allocate(size_t size, AllocType allocType) +HeapAddress RegionalHeap::Allocate(size_t size, AllocType allocType) { size_t tryTimes = 0; uintptr_t internalAddr = 0; @@ -225,17 +176,17 @@ HeapAddress RegionSpace::Allocate(size_t size, AllocType allocType) } // Only used for serialization in which allocType and target memory should keep consistency. -HeapAddress RegionSpace::AllocateNoGC(size_t size, AllocType allocType) +HeapAddress RegionalHeap::AllocateNoGC(size_t size, AllocType allocType) { bool allowGC = false; uintptr_t internalAddr = 0; size_t allocSize = ToAllocatedSize(size); - if (UNLIKELY_CC(allocType == AllocType::PINNED_OBJECT)) { - internalAddr = regionManager_.AllocPinned(allocSize, allowGC); - } else if (LIKELY_CC(allocType == AllocType::MOVEABLE_OBJECT)) { + if (UNLIKELY_CC(allocType == AllocType::NONMOVABLE_OBJECT)) { + internalAddr = nonMovableSpace_.Alloc(allocSize, allowGC); + } else if (LIKELY_CC(allocType == AllocType::MOVEABLE_OBJECT || allocType == AllocType::MOVEABLE_OLD_OBJECT)) { AllocationBuffer* allocBuffer = AllocationBuffer::GetOrCreateAllocBuffer(); internalAddr = allocBuffer->Allocate(allocSize, allocType); - } else { + } else { //LCOV_EXCL_BR_LINE // Unreachable for serialization UNREACHABLE_CC(); } @@ -248,7 +199,7 @@ HeapAddress RegionSpace::AllocateNoGC(size_t size, AllocType allocType) return internalAddr + HEADER_SIZE; } -void RegionSpace::CopyRegion(RegionDesc* region) +void RegionalHeap::CopyRegion(RegionDesc* region) { LOGF_CHECK(region->IsFromRegion()) << "region type " << static_cast(region->GetRegionType()); DLOG(COPY, "try forward region %p @0x%zx+%zu type %u, live bytes %u", @@ -276,11 +227,18 @@ void RegionSpace::CopyRegion(RegionDesc* region) } } -void RegionSpace::Init(const RuntimeParam& param) +void RegionalHeap::Init(const RuntimeParam& param) { MemoryMap::Option opt = MemoryMap::DEFAULT_OPTIONS; opt.tag = "region_heap"; size_t heapSize = param.heapParam.heapSize * KB; + +#ifndef PANDA_TARGET_32 + static constexpr uint64_t MAX_SUPPORT_CAPACITY = 4ULL * GB; + // 2: double heap size + LOGF_CHECK((heapSize / 2)<= MAX_SUPPORT_CAPACITY) << "Max support capacity 4G"; +#endif + size_t totalSize = RegionManager::GetHeapMemorySize(heapSize); size_t regionNum = RegionManager::GetHeapUnitCount(heapSize); #if defined(COMMON_ASAN_SUPPORT) @@ -329,11 +287,27 @@ AllocationBuffer* AllocationBuffer::GetOrCreateAllocBuffer() void AllocationBuffer::ClearThreadLocalRegion() { if (LIKELY_CC(tlRegion_ != RegionDesc::NullRegion())) { - RegionSpace& heap = reinterpret_cast(Heap::GetHeap().GetAllocator()); - heap.HandleFullThreadLocalRegion(tlRegion_); + RegionalHeap& heap = reinterpret_cast(Heap::GetHeap().GetAllocator()); + heap.HandleFullThreadLocalRegion(tlRegion_); tlRegion_ = RegionDesc::NullRegion(); } + if (LIKELY_CC(tlOldRegion_ != RegionDesc::NullRegion())) { + RegionalHeap& heap = reinterpret_cast(Heap::GetHeap().GetAllocator()); + heap.HandleFullThreadLocalRegion(tlOldRegion_); + tlOldRegion_ = RegionDesc::NullRegion(); + } + if (LIKELY_CC(tlToRegion_ != RegionDesc::NullRegion())) { + RegionalHeap& heap = reinterpret_cast(Heap::GetHeap().GetAllocator()); + heap.HandleFullThreadLocalRegion(tlToRegion_); + tlToRegion_ = RegionDesc::NullRegion(); + } } + +void AllocationBuffer::Unregister() +{ + Heap::GetHeap().UnregisterAllocBuffer(*this); +} + AllocationBuffer* AllocationBuffer::GetAllocBuffer() { return ThreadLocal::GetAllocBuffer(); } AllocationBuffer::~AllocationBuffer() @@ -346,22 +320,30 @@ void AllocationBuffer::Init() static_assert(offsetof(AllocationBuffer, tlRegion_) == 0, "need to modify the offset of this value in llvm-project at the same time"); tlRegion_ = RegionDesc::NullRegion(); + tlOldRegion_ = RegionDesc::NullRegion(); Heap::GetHeap().RegisterAllocBuffer(*this); } -HeapAddress AllocationBuffer::ToSpaceAllocate(size_t totalSize, AllocType allocType) +HeapAddress AllocationBuffer::ToSpaceAllocate(size_t totalSize) { HeapAddress addr = 0; - if (UNLIKELY_CC(allocType == AllocType::RAW_POINTER_OBJECT)) { - return AllocateRawPointerObject(totalSize); - } - - if (LIKELY_CC(tlRegion_ != RegionDesc::NullRegion())) { - addr = tlRegion_->Alloc(totalSize); + if (LIKELY_CC(tlToRegion_ != RegionDesc::NullRegion())) { + addr = tlToRegion_->Alloc(totalSize); } if (UNLIKELY_CC(addr == 0)) { - addr = AllocateImpl(totalSize, allocType); + RegionalHeap& heapSpace = reinterpret_cast(Heap::GetHeap().GetAllocator()); + + heapSpace.HandleFullThreadLocalRegion(tlToRegion_); + tlToRegion_ = RegionDesc::NullRegion(); + + RegionDesc* r = heapSpace.AllocateThreadLocalRegion(false); + if (UNLIKELY_CC(r == nullptr)) { + return 0; + } + + tlToRegion_ = r; + addr = tlToRegion_->Alloc(totalSize); } DLOG(ALLOC, "alloc to 0x%zx(%zu)", addr, totalSize); @@ -376,8 +358,17 @@ HeapAddress AllocationBuffer::Allocate(size_t totalSize, AllocType allocType) return AllocateRawPointerObject(totalSize); } - if (LIKELY_CC(tlRegion_ != RegionDesc::NullRegion())) { - addr = tlRegion_->Alloc(totalSize); + ASSERT_LOGF(allocType == AllocType::MOVEABLE_OBJECT || allocType == AllocType::MOVEABLE_OLD_OBJECT, + "unexpected alloc type"); + + if (allocType == AllocType::MOVEABLE_OBJECT) { + if (LIKELY_CC(tlRegion_ != RegionDesc::NullRegion())) { + addr = tlRegion_->Alloc(totalSize); + } + } else if (allocType == AllocType::MOVEABLE_OLD_OBJECT) { + if (LIKELY_CC(tlOldRegion_ != RegionDesc::NullRegion())) { + addr = tlOldRegion_->Alloc(totalSize); + } } if (UNLIKELY_CC(addr == 0)) { @@ -391,28 +382,33 @@ HeapAddress AllocationBuffer::Allocate(size_t totalSize, AllocType allocType) // try an allocation but do not handle failure HeapAddress AllocationBuffer::AllocateImpl(size_t totalSize, AllocType allocType) { - RegionSpace& heapSpace = reinterpret_cast(Heap::GetHeap().GetAllocator()); + RegionalHeap& heapSpace = reinterpret_cast(Heap::GetHeap().GetAllocator()); - // allocate from thread local region - if (LIKELY_CC(tlRegion_ != RegionDesc::NullRegion())) { - HeapAddress addr = tlRegion_->Alloc(totalSize); - if (addr != 0) { - return addr; + // allocate new thread local region and try alloc + if (allocType == AllocType::MOVEABLE_OBJECT) { + heapSpace.HandleFullThreadLocalRegion(tlRegion_); + tlRegion_ = RegionDesc::NullRegion(); + + RegionDesc* r = heapSpace.AllocateThreadLocalRegion(false); + if (UNLIKELY_CC(r == nullptr)) { + return 0; } - // allocation failed because region is full. - if (tlRegion_->IsThreadLocalRegion()) { - heapSpace.HandleFullThreadLocalRegion(tlRegion_); - tlRegion_ = RegionDesc::NullRegion(); + tlRegion_ = r; + return tlRegion_->Alloc(totalSize); + } else if (allocType == AllocType::MOVEABLE_OLD_OBJECT) { + heapSpace.HandleFullThreadLocalRegion(tlOldRegion_); + tlOldRegion_ = RegionDesc::NullRegion(); + + RegionDesc* r = heapSpace.AllocateThreadLocalRegion(false); + if (UNLIKELY_CC(r == nullptr)) { + return 0; } - } - RegionDesc* r = heapSpace.AllocateThreadLocalRegion(); - if (UNLIKELY_CC(r == nullptr)) { - return 0; + tlOldRegion_ = r; + return tlOldRegion_->Alloc(totalSize); } - tlRegion_ = r; - return r->Alloc(totalSize); + UNREACHABLE(); } HeapAddress AllocationBuffer::AllocateRawPointerObject(size_t totalSize) @@ -424,7 +420,7 @@ HeapAddress AllocationBuffer::AllocateRawPointerObject(size_t totalSize) return allocAddr; } } - RegionManager& manager = reinterpret_cast(Heap::GetHeap().GetAllocator()).GetRegionManager(); + RegionManager& manager = reinterpret_cast(Heap::GetHeap().GetAllocator()).GetRegionManager(); size_t needRegionNum = totalSize / RegionDesc::UNIT_SIZE + 1; // region should have at least 2 unit needRegionNum = (needRegionNum == 1) ? 2 : needRegionNum; @@ -440,7 +436,7 @@ HeapAddress AllocationBuffer::AllocateRawPointerObject(size_t totalSize) } #ifndef NDEBUG -bool RegionSpace::IsHeapObject(HeapAddress addr) const +bool RegionalHeap::IsHeapObject(HeapAddress addr) const { if (!IsHeapAddress(addr)) { return false; @@ -448,7 +444,7 @@ bool RegionSpace::IsHeapObject(HeapAddress addr) const return true; } #endif -void RegionSpace::FeedHungryBuffers() +void RegionalHeap::FeedHungryBuffers() { ScopedObjectAccess soa; AllocBufferManager::HungryBuffers hungryBuffers; @@ -457,7 +453,7 @@ void RegionSpace::FeedHungryBuffers() for (auto* buffer : hungryBuffers) { if (buffer->GetPreparedRegion() != nullptr) { continue; } if (region == nullptr) { - region = AllocateThreadLocalRegion(true); + region = AllocateThreadLocalRegion(true); if (region == nullptr) { return; } } if (buffer->SetPreparedRegion(region)) { @@ -469,9 +465,55 @@ void RegionSpace::FeedHungryBuffers() } } -void RegionSpace::VisitRememberSet(const std::function& func) +void RegionalHeap::MarkRememberSet(const std::function& func) +{ + oldSpace_.MarkRememberSet(func); + nonMovableSpace_.MarkRememberSet(func); + largeSpace_.MarkRememberSet(func); + appSpawnSpace_.MarkRememberSet(func); + rawpointerSpace_.MarkRememberSet(func); +} + +void RegionalHeap::ForEachAwaitingJitFortUnsafe(const std::function& visitor) const +{ + ASSERT(BaseRuntime::GetInstance()->GetMutatorManager().WorldStopped()); + for (const auto jitFort : awaitingJitFort_) { + visitor(jitFort); + } +} + +void RegionalHeap::MarkJitFortMemInstalled(void *thread, BaseObject *obj) { - oldSpace_.VisitRememberSet(func); - regionManager_.VisitRememberSet(func); + std::lock_guard guard(awaitingJitFortMutex_); + // GC is running, we should mark JitFort installled after GC finish + if (Heap::GetHeap().GetGCPhase() != GCPhase::GC_PHASE_IDLE) { + jitFortPostGCInstallTask_.emplace(nullptr, obj); + } else { + // a threadlocal JitFort mem + if (thread) { + MarkThreadLocalJitFortInstalled(thread, obj); + } else { + RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj))->SetJitFortAwaitInstallFlag(false); + } + awaitingJitFort_.erase(obj); + } } + +void RegionalHeap::MarkJitFortMemAwaitingInstall(BaseObject *obj) +{ + std::lock_guard guard(awaitingJitFortMutex_); + RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj))->SetJitFortAwaitInstallFlag(true); + awaitingJitFort_.insert(obj); +} + +void RegionalHeap::HandlePostGCJitFortInstallTask() +{ + ASSERT(Heap::GetHeap().GetGCPhase() == GCPhase::GC_PHASE_IDLE); + while (!jitFortPostGCInstallTask_.empty()) { + auto [thread, machineCode] = jitFortPostGCInstallTask_.top(); + MarkJitFortMemInstalled(thread, machineCode); + jitFortPostGCInstallTask_.pop(); + } +} + } // namespace common diff --git a/common_components/heap/allocator/region_space.h b/common_components/heap/allocator/regional_heap.h similarity index 55% rename from common_components/heap/allocator/region_space.h rename to common_components/heap/allocator/regional_heap.h index d811ee595ad09473b03c59328dcf7aca48d08ea6..2bc0cc98d5c982ac558a5541bbd8e73f045aa1d8 100755 --- a/common_components/heap/allocator/region_space.h +++ b/common_components/heap/allocator/regional_heap.h @@ -29,7 +29,13 @@ #include "common_components/heap/space/old_space.h" #include "common_components/heap/space/from_space.h" #include "common_components/heap/space/to_space.h" +#include "common_components/heap/space/nonmovable_space.h" #include "common_components/mutator/mutator.h" +#include "common_components/heap/space/appspawn_space.h" +#include "common_components/heap/space/large_space.h" +#include "common_components/heap/space/rawpointer_space.h" +#include "common_components/heap/space/readonly_space.h" +#include "objects/base_object.h" #if defined(COMMON_SANITIZER_SUPPORT) #include "common_components/sanitizer/sanitizer_interface.h" #endif @@ -38,12 +44,11 @@ namespace common { class Taskpool; -// RegionSpace aims to be the API for other components of runtime +// RegionalHeap aims to be the API for other components of runtime // the complication of implementation is delegated to RegionManager // allocator should not depend on any assumptions on the details of RegionManager -// todo: Allocator -> BaseAllocator, RegionSpace -> RegionalHeap -class RegionSpace : public Allocator { +class RegionalHeap : public Allocator { public: static size_t ToAllocatedSize(size_t objSize) { @@ -57,9 +62,13 @@ public: return ToAllocatedSize(objSize); } - RegionSpace() : youngSpace_(regionManager_), oldSpace_(regionManager_), - fromSpace_(regionManager_, *this), toSpace_(regionManager_) {} - NO_INLINE_CC virtual ~RegionSpace() + RegionalHeap() : youngSpace_(regionManager_), oldSpace_(regionManager_), + fromSpace_(regionManager_, *this), toSpace_(regionManager_), + nonMovableSpace_(regionManager_), largeSpace_(regionManager_), + appSpawnSpace_(regionManager_), rawpointerSpace_(regionManager_), + readonlySpace_(regionManager_) {} + + NO_INLINE_CC virtual ~RegionalHeap() { if (allocBufferManager_ != nullptr) { delete allocBufferManager_; @@ -73,22 +82,32 @@ public: void Init(const RuntimeParam ¶m) override; + template RegionDesc* AllocateThreadLocalRegion(bool expectPhysicalMem = false); + template void HandleFullThreadLocalRegion(RegionDesc* region) noexcept { - ASSERT_LOGF(region->IsThreadLocalRegion(), "unexpected region type"); - if (IsGcThread()) { - toSpace_.HandleFullThreadLocalRegion(region); - } else { + if (region == RegionDesc::NullRegion()) { + return; + } + ASSERT_LOGF(region->IsThreadLocalRegion() || region->IsToRegion() || region->IsOldRegion(), + "unexpected region type"); + + if constexpr (type == AllocBufferType::YOUNG) { + ASSERT_LOGF(!IsGcThread(), "unexpected gc thread for old space"); youngSpace_.HandleFullThreadLocalRegion(region); + } else if constexpr (type == AllocBufferType::OLD) { + ASSERT_LOGF(!IsGcThread(), "unexpected gc thread for old space"); + oldSpace_.HandleFullThreadLocalRegion(region); + } else if constexpr (type == AllocBufferType::TO) { + toSpace_.HandleFullThreadLocalRegion(region); } } // only used for deserialize allocation, allocate one region and regard it as full region - // todo: adapt for concurrent gc - uintptr_t AllocRegion(); - uintptr_t AllocPinnedRegion(); + uintptr_t AllocOldRegion(); + uintptr_t AllocateNonMovableRegion(); uintptr_t AllocLargeRegion(size_t size); uintptr_t AllocJitFortRegion(size_t size); @@ -115,23 +134,28 @@ public: inline size_t GetRecentAllocatedSize() const { - return youngSpace_.GetRecentAllocatedSize() + regionManager_.GetRecentAllocatedSize(); + return youngSpace_.GetRecentAllocatedSize() + nonMovableSpace_.GetRecentAllocatedSize() + + largeSpace_.GetRecentAllocatedSize(); } // size of objects survived in previous gc. - inline size_t GetSurvivedSize() const + size_t GetSurvivedSize() const override { - return fromSpace_.GetSurvivedSize() + toSpace_.GetAllocatedSize() + - youngSpace_.GetAllocatedSize() + oldSpace_.GetAllocatedSize() + regionManager_.GetSurvivedSize(); + return fromSpace_.GetSurvivedSize() + toSpace_.GetAllocatedSize() + youngSpace_.GetAllocatedSize() + + oldSpace_.GetAllocatedSize() + nonMovableSpace_.GetSurvivedSize() + largeSpace_.GetAllocatedSize(); } inline size_t GetUsedUnitCount() const { - return fromSpace_.GetUsedUnitCount() + toSpace_.GetUsedUnitCount() + - youngSpace_.GetUsedUnitCount() + oldSpace_.GetUsedUnitCount() + regionManager_.GetUsedUnitCount(); + return fromSpace_.GetUsedUnitCount() + toSpace_.GetUsedUnitCount() + youngSpace_.GetUsedUnitCount() + + oldSpace_.GetUsedUnitCount() + nonMovableSpace_.GetUsedUnitCount() + largeSpace_.GetUsedUnitCount() + + appSpawnSpace_.GetUsedUnitCount() + readonlySpace_.GetUsedUnitCount() + rawpointerSpace_.GetUsedUnitCount(); } - size_t GetUsedPageSize() const override { return regionManager_.GetUsedRegionSize(); } + size_t GetUsedPageSize() const override + { + return GetUsedUnitCount() * RegionDesc::UNIT_SIZE; + } inline size_t GetTargetSize() const { @@ -141,16 +165,18 @@ public: size_t GetAllocatedBytes() const override { - return fromSpace_.GetAllocatedSize() + toSpace_.GetAllocatedSize() + - youngSpace_.GetAllocatedSize() + oldSpace_.GetAllocatedSize() + regionManager_.GetAllocatedSize(); + return fromSpace_.GetAllocatedSize() + toSpace_.GetAllocatedSize() + youngSpace_.GetAllocatedSize() + + oldSpace_.GetAllocatedSize() + nonMovableSpace_.GetAllocatedSize() + LargeObjectSize(); } - size_t LargeObjectSize() const override { return regionManager_.GetLargeObjectSize(); } + size_t LargeObjectSize() const override { return largeSpace_.GetAllocatedSize(); } size_t FromSpaceSize() const { return fromSpace_.GetAllocatedSize(); } + // note: it doesn't contain exemptFromRegion + size_t FromRegionSize() const { return fromSpace_.GetFromRegionAllocatedSize(); } size_t ToSpaceSize() const { return toSpace_.GetAllocatedSize(); } - size_t PinnedSpaceSize() const { return regionManager_.GetPinnedSpaceSize(); } + size_t NonMovableSpaceSize() const { return nonMovableSpace_.GetAllocatedSize(); } #ifndef NDEBUG bool IsHeapObject(HeapAddress addr) const override; @@ -193,7 +219,7 @@ public: BaseObject* RouteObject(BaseObject* fromObj, size_t size) { AllocationBuffer* buffer = AllocationBuffer::GetOrCreateAllocBuffer(); - uintptr_t toAddr = buffer->ToSpaceAllocate(size, AllocType::MOVEABLE_OBJECT); + uintptr_t toAddr = buffer->ToSpaceAllocate(size); return reinterpret_cast(toAddr); } @@ -203,23 +229,44 @@ public: fromSpace_.CopyFromRegions(threadPool); } - void FixHeap() + FixHeapTaskList CollectFixTasks() { - youngSpace_.FixAllRegions(); - oldSpace_.FixAllRegions(); - fromSpace_.FixAllRegions(); - toSpace_.FixAllRegions(); - regionManager_.FixAllRegionLists(); + FixHeapTaskList taskList; + youngSpace_.CollectFixTasks(taskList); + oldSpace_.CollectFixTasks(taskList); + fromSpace_.CollectFixTasks(taskList); + toSpace_.CollectFixTasks(taskList); + nonMovableSpace_.CollectFixTasks(taskList); + largeSpace_.CollectFixTasks(taskList); + rawpointerSpace_.CollectFixTasks(taskList); + appSpawnSpace_.CollectFixTasks(taskList); + + return taskList; } void MarkAwaitingJitFort() { - regionManager_.ForEachAwaitingJitFortUnsafe(MarkObject); + ForEachAwaitingJitFortUnsafe(MarkObject); } - using RootSet = MarkStack; + void ClearJitFortAwaitingMark() + { + HandlePostGCJitFortInstallTask(); + } - size_t CollectLargeGarbage() { return regionManager_.CollectLargeGarbage(); } + void MarkJitFortMemInstalled(void *thread, BaseObject *obj); + + void SetReadOnlyToROSpace() + { + readonlySpace_.SetReadOnlyToRORegionList(); + } + + void ClearReadOnlyFromROSpace() + { + readonlySpace_.ClearReadOnlyFromRORegionList(); + } + + size_t CollectLargeGarbage() { return largeSpace_.CollectLargeGarbage(); } void CollectFromSpaceGarbage() { @@ -235,120 +282,153 @@ public: void AssembleSmallGarbageCandidates() { youngSpace_.AssembleGarbageCandidates(fromSpace_); + oldSpace_.AssembleRecentFull(); if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) { oldSpace_.ClearRSet(); oldSpace_.AssembleGarbageCandidates(fromSpace_); - regionManager_.ClearRSet(); + nonMovableSpace_.ClearRSet(); + largeSpace_.ClearRSet(); + appSpawnSpace_.ClearRSet(); + rawpointerSpace_.ClearRSet(); } } void CollectAppSpawnSpaceGarbage() { regionManager_.CollectFromSpaceGarbage(fromSpace_.GetFromRegionList()); - regionManager_.ReassembleAppspawnSpace(fromSpace_.GetExemptedRegionList()); - regionManager_.ReassembleAppspawnSpace(toSpace_.GetTlToRegionList()); - regionManager_.ReassembleAppspawnSpace(toSpace_.GetFullToRegionList()); + appSpawnSpace_.ReassembleAppspawnSpace(fromSpace_.GetExemptedRegionList()); + appSpawnSpace_.ReassembleAppspawnSpace(toSpace_.GetTlToRegionList()); + appSpawnSpace_.ReassembleAppspawnSpace(toSpace_.GetFullToRegionList()); } void ClearAllGCInfo() { - regionManager_.ClearAllGCInfo(); youngSpace_.ClearAllGCInfo(); oldSpace_.ClearAllGCInfo(); toSpace_.ClearAllGCInfo(); fromSpace_.ClearAllGCInfo(); + nonMovableSpace_.ClearAllGCInfo(); + largeSpace_.ClearAllGCInfo(); + appSpawnSpace_.ClearAllGCInfo(); + rawpointerSpace_.ClearAllGCInfo(); + readonlySpace_.ClearAllGCInfo(); } void AssembleGarbageCandidates() { AssembleSmallGarbageCandidates(); - regionManager_.AssemblePinnedGarbageCandidates(); - regionManager_.AssembleLargeGarbageCandidates(); + nonMovableSpace_.AssembleGarbageCandidates(); + largeSpace_.AssembleGarbageCandidates(); } + void DumpAllRegionSummary(const char* msg) const; void DumpAllRegionStats(const char* msg) const; void CountLiveObject(const BaseObject* obj) { regionManager_.CountLiveObject(obj); } - void PrepareTrace() { regionManager_.PrepareTrace(); } - void PrepareForward() { regionManager_.PrepareForward(); } - void PrepareFix() { regionManager_.PrepareFix(); } - void PrepareFixForPin() { regionManager_.PrepareFixForPin(); } + void PrepareMarking() + { + AllocBufferVisitor visitor = [](AllocationBuffer& regionBuffer) { + RegionDesc* region = regionBuffer.GetRegion(); + if (region != RegionDesc::NullRegion()) { + region->SetMarkingLine(); + } + region = regionBuffer.GetRegion(); + if (region != RegionDesc::NullRegion()) { + region->SetMarkingLine(); + } + }; + VisitAllocBuffers(visitor); + + nonMovableSpace_.PrepareMarking(); + readonlySpace_.PrepareMarking(); + } + + void PrepareForward() + { + AllocBufferVisitor visitor = [](AllocationBuffer& regionBuffer) { + RegionDesc* region = regionBuffer.GetRegion(); + if (region != RegionDesc::NullRegion()) { + region->SetCopyLine(); + } + region = regionBuffer.GetRegion(); + if (region != RegionDesc::NullRegion()) { + region->SetCopyLine(); + } + }; + VisitAllocBuffers(visitor); + + nonMovableSpace_.PrepareForward(); + readonlySpace_.PrepareForward(); + } void FeedHungryBuffers() override; // markObj static bool MarkObject(const BaseObject* obj) { - RegionDesc* regionInfo = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + RegionDesc* regionInfo = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); return regionInfo->MarkObject(obj); } static bool ResurrentObject(const BaseObject* obj) { - RegionDesc* regionInfo = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + RegionDesc* regionInfo = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); return regionInfo->ResurrentObject(obj); } static bool EnqueueObject(const BaseObject* obj) { - RegionDesc* regionInfo = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + RegionDesc* regionInfo = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); return regionInfo->EnqueueObject(obj); } static bool IsMarkedObject(const BaseObject* obj) { - RegionDesc* regionInfo = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + RegionDesc* regionInfo = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); return regionInfo->IsMarkedObject(obj); } static bool IsResurrectedObject(const BaseObject* obj) { - RegionDesc* regionInfo = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + RegionDesc* regionInfo = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); return regionInfo->IsResurrectedObject(obj); } static bool IsEnqueuedObject(const BaseObject* obj) { - RegionDesc* regionInfo = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + RegionDesc* regionInfo = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); return regionInfo->IsEnqueuedObject(obj); } - static bool IsNewObjectSinceTrace(const BaseObject* object) + static bool IsNewObjectSinceMarking(const BaseObject* object) { - RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(object)); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(object)); ASSERT_LOGF(region != nullptr, "region is nullptr"); - return region->IsNewObjectSinceTrace(object); + return region->IsNewObjectSinceMarking(object); } static bool IsReadOnlyObject(const BaseObject* object) { - RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(object)); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(object)); ASSERT_LOGF(region != nullptr, "region is nullptr"); return region->IsReadOnlyRegion(); } static bool IsYoungSpaceObject(const BaseObject* object) { - RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(object)); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(object)); ASSERT_LOGF(region != nullptr, "region is nullptr"); return region->IsInYoungSpace(); } - static bool IsInRememberSet(const BaseObject* object) - { - RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(object)); - ASSERT_LOGF(region != nullptr, "region is nullptr"); - return region->IsInRSet(const_cast(object)); - } - void AddRawPointerObject(BaseObject* obj) { - RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); region->IncRawPointerObjectCount(); if (region->IsFromRegion() && fromSpace_.TryDeleteFromRegion(region, RegionDesc::RegionType::FROM_REGION, RegionDesc::RegionType::RAW_POINTER_REGION)) { GCPhase phase = Heap::GetHeap().GetGCPhase(); CHECK(phase != GCPhase::GC_PHASE_COPY && phase != GCPhase::GC_PHASE_PRECOPY); - regionManager_.AddRawPointerRegion(region); + rawpointerSpace_.AddRawPointerRegion(region); } else { CHECK(region->GetRegionType() != RegionDesc::RegionType::LONE_FROM_REGION); } @@ -356,40 +436,52 @@ public: void RemoveRawPointerObject(BaseObject* obj) { - RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); region->DecRawPointerObjectCount(); } void AddRawPointerRegion(RegionDesc* region) { - regionManager_.AddRawPointerRegion(region); + rawpointerSpace_.AddRawPointerRegion(region); } void CopyRegion(RegionDesc* region); - - void VisitRememberSet(const std::function &func); + void MarkRememberSet(const std::function &func); friend class Allocator; private: enum class TryAllocationThreshold { RESCHEDULE = 3, - TRIGGER_OOM = 5, + TRIGGER_OOM = 4, }; HeapAddress TryAllocateOnce(size_t allocSize, AllocType allocType); bool ShouldRetryAllocation(size_t& tryTimes) const; + + void ForEachAwaitingJitFortUnsafe(const std::function& visitor) const; + void MarkJitFortMemAwaitingInstall(BaseObject *obj); + void HandlePostGCJitFortInstallTask(); + HeapAddress reservedStart_ = 0; HeapAddress reservedEnd_ = 0; RegionManager regionManager_; MemoryMap* map_{ nullptr }; + // Awaiting JitFort object has no references from other objects, + // but we need to keep them as live untill jit compilation has finished installing. + std::set awaitingJitFort_; + std::stack> jitFortPostGCInstallTask_; + std::mutex awaitingJitFortMutex_; + YoungSpace youngSpace_; OldSpace oldSpace_; - FromSpace fromSpace_; ToSpace toSpace_; + NonMovableSpace nonMovableSpace_; + LargeSpace largeSpace_; + AppSpawnSpace appSpawnSpace_; + RawPointerSpace rawpointerSpace_; + ReadOnlySpace readonlySpace_; }; - -using RegionalHeap = RegionSpace; } // namespace common #endif // COMMON_COMPONENTS_HEAP_ALLOCATOR_REGION_SPACE_H diff --git a/common_components/heap/allocator/tests/BUILD.gn b/common_components/heap/allocator/tests/BUILD.gn index 7315b7d07da211cdb0ee386f99092503ec1a1ea9..f4e01c7e8416e917f2fb79d2dbc5280b16df199a 100755 --- a/common_components/heap/allocator/tests/BUILD.gn +++ b/common_components/heap/allocator/tests/BUILD.gn @@ -59,6 +59,7 @@ host_unittest_action("Region_Manager_Test") { "icu:shared_icui18n", "icu:shared_icuuc", "zlib:libz", + "bounds_checking_function:libsec_shared", ] } @@ -85,14 +86,63 @@ host_unittest_action("Treap_Test") { ] } +host_unittest_action("Regional_Heap_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "regional_heap_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "bounds_checking_function:libsec_shared", + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +host_unittest_action("Heap_Allocator_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "heap_allocator_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + group("unittest") { testonly = true # deps file deps = [ ":Allocator_Test", - ":Region_Manager_Test", ":Treap_Test", + ":Regional_Heap_Test", + ":Heap_Allocator_Test", + ":Region_Manager_Test" ] } @@ -102,7 +152,9 @@ group("host_unittest") { # deps file deps = [ ":Allocator_TestAction", - ":Region_Manager_TestAction", ":Treap_TestAction", + ":Regional_Heap_TestAction", + ":Heap_Allocator_TestAction", + ":Region_Manager_TestAction" ] } diff --git a/common_components/heap/allocator/tests/allocator_test.cpp b/common_components/heap/allocator/tests/allocator_test.cpp index 66639081f6765f1e0378a89c238453ffb7e6d2b4..9ad634eb4eb5c6e6e7572f32c1576c4cbed26bec 100755 --- a/common_components/heap/allocator/tests/allocator_test.cpp +++ b/common_components/heap/allocator/tests/allocator_test.cpp @@ -43,6 +43,8 @@ public: #ifndef NDEBUG bool IsHeapObject(HeapAddress) const override { return false; } #endif + void FeedHungryBuffers() override {} + size_t GetSurvivedSize() const override { return 0; } }; class AllocatorTest : public common::test::BaseTestWithScope { }; @@ -50,8 +52,14 @@ class AllocatorTest : public common::test::BaseTestWithScope { HWTEST_F_L0(AllocatorTest, EnvNotSet) { unsetenv("arkEnableAsyncAllocation"); + TestAllocator allocator; - EXPECT_FALSE(allocator.GetIsAsyncAllocationEnable()); + bool result = allocator.GetIsAsyncAllocationEnable(); +#if defined(PANDA_TARGET_OHOS) + EXPECT_TRUE(result); +#else + EXPECT_FALSE(result); +#endif } HWTEST_F_L0(AllocatorTest, InvalidLength) diff --git a/common_components/heap/allocator/tests/heap_allocator_test.cpp b/common_components/heap/allocator/tests/heap_allocator_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7423874fc20bdbb8507e2fa7a14d4ccd6e6404d6 --- /dev/null +++ b/common_components/heap/allocator/tests/heap_allocator_test.cpp @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_interfaces/heap/heap_allocator.h" +#include "common_components/heap/allocator/region_desc.h" +#include "common_components/heap/allocator/regional_heap.h" +#include "common_components/tests/test_helper.h" +#include "common_interfaces/base_runtime.h" + +using namespace common; + +namespace common::test { +class HeapAllocatorTest : public BaseTestWithScope { +protected: + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() + { + BaseRuntime::GetInstance()->Fini(); + } + + void SetUp() override + { + holder_ = ThreadHolder::CreateAndRegisterNewThreadHolder(nullptr); + scope_ = new ThreadHolder::TryBindMutatorScope(holder_); + } + + void TearDown() override + { + if (scope_ != nullptr) { + delete scope_; + scope_ = nullptr; + } + } + + ThreadHolder *holder_ {nullptr}; + ThreadHolder::TryBindMutatorScope *scope_ {nullptr}; +}; + +HWTEST_F_L0(HeapAllocatorTest, AllocLargeObject) +{ + uintptr_t addr = common::HeapAllocator::AllocateInHuge(Heap::NORMAL_UNIT_SIZE, common::LanguageType::DYNAMIC); + ASSERT(addr > 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + ASSERT(region->IsLargeRegion()); +} + +HWTEST_F_L0(HeapAllocatorTest, AllocLargeRegion) +{ + uintptr_t addr = common::HeapAllocator::AllocateLargeRegion(Heap::NORMAL_UNIT_SIZE); + ASSERT(addr > 0); + RegionDesc *region = RegionDesc::GetAliveRegionDescAt(addr); + ASSERT(region->IsLargeRegion()); +} +} \ No newline at end of file diff --git a/common_components/heap/allocator/tests/region_manager_test.cpp b/common_components/heap/allocator/tests/region_manager_test.cpp index 4e1b6c4af38c14522724b6cfa462e33f70fabe93..be0b4b2eaef972d9a95e79526b8c24daab30c7b2 100755 --- a/common_components/heap/allocator/tests/region_manager_test.cpp +++ b/common_components/heap/allocator/tests/region_manager_test.cpp @@ -14,13 +14,12 @@ */ #include "common_components/heap/allocator/region_manager.h" -#include "common_components/heap/collector/trace_collector.h" +#include "common_components/heap/collector/marking_collector.h" #include "common_components/heap/heap.cpp" -#include "common_components/base_runtime/base_runtime_param.h" +#include "common_components/common_runtime/base_runtime_param.h" #include "common_components/heap/heap_manager.h" #include "common_components/tests/test_helper.h" #include -#include using namespace common; @@ -39,7 +38,17 @@ protected: size_t totalUnits_ = SIZE_MAX_TEST; size_t heapSize_; Mutator* mutator_ = nullptr; - bool isInit_ = false; + + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() + { + BaseRuntime::GetInstance()->Fini(); + } + void SetUp() override { heapSize_ = totalUnits_ * RegionDesc::UNIT_SIZE; @@ -58,15 +67,14 @@ protected: void TearDown() override { - if (isInit_) { - BaseRuntime::GetInstance()->Fini(); + if (mutator_) { + delete mutator_; + mutator_ = nullptr; + } + if (regionMemory_) { + free(regionMemory_); + regionMemory_ = nullptr; } - } - - void InitializeBaseRuntime() - { - BaseRuntime::GetInstance()->Init(); - isInit_ = true; } }; @@ -125,8 +133,8 @@ HWTEST_F_L0(RegionManagerTest, VisitLiveObjectsUntilFalse_IsLargeRegion) HWTEST_F_L0(RegionManagerTest, VisitLiveObjectsUntilFalse) { size_t unitIdx = 0; - size_t nUnit = 4; - RegionDesc* region = RegionDesc::InitRegion(unitIdx, nUnit, RegionDesc::UnitRole::FREE_UNITS); + size_t nUnit = 1; + RegionDesc* region = RegionDesc::InitRegion(unitIdx, nUnit, RegionDesc::UnitRole::SMALL_SIZED_UNITS); ASSERT_NE(region, nullptr); region->AddLiveByteCount(SIZE_SIXTEEN); bool callbackCalled = false; @@ -148,9 +156,8 @@ HWTEST_F_L0(RegionManagerTest, VisitAllObjectsBeforeFix1) uintptr_t start = region->GetRegionStart(); region->SetRegionAllocPtr(start + SIZE_SIXTEEN); - region->SetFixLine(); bool callbackCalled = false; - region->VisitAllObjectsBeforeFix([&](BaseObject* obj) { + region->VisitAllObjectsBeforeCopy([&](BaseObject* obj) { callbackCalled = true; EXPECT_EQ(obj, reinterpret_cast(region->GetRegionStart())); }); @@ -160,15 +167,15 @@ HWTEST_F_L0(RegionManagerTest, VisitAllObjectsBeforeFix1) HWTEST_F_L0(RegionManagerTest, VisitAllObjectsBeforeFix2) { size_t unitIdx = 0; - size_t nUnit = 4; - RegionDesc* region = RegionDesc::InitRegion(unitIdx, nUnit, RegionDesc::UnitRole::FREE_UNITS); + size_t nUnit = 1; + RegionDesc* region = RegionDesc::InitRegion(unitIdx, nUnit, RegionDesc::UnitRole::SMALL_SIZED_UNITS); + RegionDesc::InitFreeRegion(unitIdx, nUnit); ASSERT_NE(region, nullptr); uintptr_t start = region->GetRegionStart(); region->SetRegionAllocPtr(start + SIZE_SIXTEEN); - region->SetFixLine(); bool callbackCalled = false; - region->VisitAllObjectsBeforeFix([&](BaseObject* obj) { + region->VisitAllObjectsBeforeCopy([&](BaseObject* obj) { callbackCalled = true; EXPECT_EQ(obj, reinterpret_cast(region->GetRegionStart())); }); @@ -182,9 +189,9 @@ HWTEST_F_L0(RegionManagerTest, VisitAllObjectsBeforeFix3) RegionDesc* region = RegionDesc::InitRegion(unitIdx, nUnit, RegionDesc::UnitRole::LARGE_SIZED_UNITS); ASSERT_NE(region, nullptr); - region->SetFixLine(); + bool callbackCalled = false; - region->VisitAllObjectsBeforeFix([&](BaseObject* obj) { + region->VisitAllObjectsBeforeCopy([&](BaseObject* obj) { callbackCalled = true; EXPECT_EQ(obj, reinterpret_cast(region->GetRegionStart())); }); @@ -194,13 +201,13 @@ HWTEST_F_L0(RegionManagerTest, VisitAllObjectsBeforeFix3) HWTEST_F_L0(RegionManagerTest, VisitAllObjectsBeforeFix4) { size_t unitIdx = 0; - size_t nUnit = 4; + size_t nUnit = 1; - RegionDesc* region = RegionDesc::InitRegion(unitIdx, nUnit, RegionDesc::UnitRole::FREE_UNITS); + RegionDesc* region = RegionDesc::InitRegion(unitIdx, nUnit, RegionDesc::UnitRole::SMALL_SIZED_UNITS); ASSERT_NE(region, nullptr); - region->SetFixLine(); + bool callbackCalled = false; - region->VisitAllObjectsBeforeFix([&](BaseObject* obj) { + region->VisitAllObjectsBeforeCopy([&](BaseObject* obj) { callbackCalled = true; EXPECT_EQ(obj, reinterpret_cast(region->GetRegionStart())); }); @@ -227,27 +234,17 @@ HWTEST_F_L0(RegionManagerTest, ReleaseGarbageRegions) EXPECT_GT(released, 0); } -HWTEST_F_L0(RegionManagerTest, SetLargeObjectThreshold) -{ - auto& heapParam = BaseRuntime::GetInstance()->GetHeapParam(); - heapParam.regionSize = SIZE_SIXTEEN; - RegionManager rm; - rm.SetLargeObjectThreshold(); - EXPECT_LT(BaseRuntime::GetInstance()->GetHeapParam().regionSize * KB, - RegionDesc::LARGE_OBJECT_DEFAULT_THRESHOLD); -} - HWTEST_F_L0(RegionManagerTest, ReclaimRegion1) { size_t huge_page = (2048 * KB) / getpagesize(); size_t nUnit = 1; size_t unitIdx = 0; + RegionManager manager; + manager.Initialize(SIZE_MAX_TEST, reinterpret_cast(regionMemory_)); RegionDesc* region = RegionDesc::InitRegion(unitIdx, nUnit, RegionDesc::UnitRole::SMALL_SIZED_UNITS); ASSERT_NE(region, nullptr); EXPECT_FALSE(region->IsLargeRegion()); - RegionManager manager; - manager.Initialize(SIZE_MAX_TEST, reinterpret_cast(regionMemory_)); manager.ReclaimRegion(region); EXPECT_GT(manager.GetDirtyUnitCount(), 0); } @@ -257,11 +254,11 @@ HWTEST_F_L0(RegionManagerTest, ReclaimRegion2) size_t huge_page = (2048 * KB) / getpagesize(); size_t nUnit = huge_page; size_t unitIdx = 0; + RegionManager manager; + manager.Initialize(SIZE_MAX_TEST, reinterpret_cast(regionMemory_)); RegionDesc* region = RegionDesc::InitRegion(unitIdx, nUnit, RegionDesc::UnitRole::LARGE_SIZED_UNITS); ASSERT_NE(region, nullptr); - RegionManager manager; - manager.Initialize(SIZE_MAX_TEST, reinterpret_cast(regionMemory_)); manager.ReclaimRegion(region); EXPECT_GT(manager.GetDirtyUnitCount(), 0); } @@ -271,11 +268,11 @@ HWTEST_F_L0(RegionManagerTest, ReleaseRegion) size_t huge_page = (2048 * KB) / getpagesize(); size_t nUnit = 1; size_t unitIdx = 0; + RegionManager manager; + manager.Initialize(SIZE_MAX_TEST, reinterpret_cast(regionMemory_)); RegionDesc* region = RegionDesc::InitRegion(unitIdx, nUnit, RegionDesc::UnitRole::LARGE_SIZED_UNITS); ASSERT_NE(region, nullptr); - RegionManager manager; - manager.Initialize(SIZE_MAX_TEST, reinterpret_cast(regionMemory_)); auto ret = manager.ReleaseRegion(region); EXPECT_EQ(ret, region->GetRegionSize()); } @@ -284,14 +281,112 @@ HWTEST_F_L0(RegionManagerTest, TakeRegion1) { ASSERT_NE(mutator_, nullptr); mutator_->SetMutatorPhase(GCPhase::GC_PHASE_ENUM); - InitializeBaseRuntime(); RegionManager manager; - size_t nUnit = 1; manager.Initialize(SIZE_MAX_TEST, reinterpret_cast(regionMemory_)); + size_t nUnit = 4; RegionDesc* garbageRegion = RegionDesc::InitRegion(SIZE_HALF_MAX_TEST, nUnit, - RegionDesc::UnitRole::SMALL_SIZED_UNITS); + RegionDesc::UnitRole::LARGE_SIZED_UNITS); auto size = manager.CollectRegion(garbageRegion); - RegionDesc* region = manager.TakeRegion(16, RegionDesc::UnitRole::SMALL_SIZED_UNITS, false, false); + RegionDesc* region = manager.TakeRegion(1, RegionDesc::UnitRole::SMALL_SIZED_UNITS, false, false); EXPECT_GT(manager.GetDirtyUnitCount(), 0); } + +HWTEST_F_L0(RegionManagerTest, TakeRegion2) +{ + ASSERT_NE(mutator_, nullptr); + mutator_->SetMutatorPhase(GCPhase::GC_PHASE_ENUM); + RegionManager manager; + size_t nUnit = 1; + manager.Initialize(SIZE_MAX_TEST, reinterpret_cast(regionMemory_)); + RegionDesc* garbageRegion = RegionDesc::InitRegion(SIZE_HALF_MAX_TEST, nUnit, + RegionDesc::UnitRole::LARGE_SIZED_UNITS); + auto size = manager.CollectRegion(garbageRegion); + RegionDesc* region = manager.TakeRegion(16, RegionDesc::UnitRole::LARGE_SIZED_UNITS, true, false); + EXPECT_NE(region, nullptr); +} + +HWTEST_F_L0(RegionManagerTest, VisitRememberSetTest) +{ + size_t totalUnits = 1024; + size_t heapSize = totalUnits * RegionDesc::UNIT_SIZE; + + void* regionMemory = malloc(heapSize + 4096); + ASSERT_NE(regionMemory, nullptr); + + uintptr_t heapStartAddress = reinterpret_cast(regionMemory); + uintptr_t regionInfoAddr = heapStartAddress + 4096; + + RegionManager manager; + manager.Initialize(totalUnits, regionInfoAddr); + + size_t unitIdx = 0; + size_t nUnit = 4; + RegionDesc* region = RegionDesc::InitRegion(unitIdx, nUnit, RegionDesc::UnitRole::LARGE_SIZED_UNITS); + ASSERT_NE(region, nullptr); + + int callbackCount = 0; + region->VisitRememberSet([&](BaseObject* obj) { + callbackCount++; + }); + + EXPECT_GE(callbackCount, 0); + free(regionMemory); +} + +HWTEST_F_L0(RegionManagerTest, VisitRememberSetBeforeCopyTest) +{ + size_t totalUnits = 1024; + size_t heapSize = totalUnits * RegionDesc::UNIT_SIZE; + + void* regionMemory = malloc(heapSize + 4096); + ASSERT_NE(regionMemory, nullptr); + + uintptr_t heapStartAddress = reinterpret_cast(regionMemory); + uintptr_t regionInfoAddr = heapStartAddress + 4096; + + RegionManager manager; + manager.Initialize(totalUnits, regionInfoAddr); + + size_t unitIdx = 0; + size_t nUnit = 4; + RegionDesc* region = RegionDesc::InitRegion(unitIdx, nUnit, RegionDesc::UnitRole::LARGE_SIZED_UNITS); + ASSERT_NE(region, nullptr); + + int callbackCount = 0; + region->VisitRememberSetBeforeCopy([&](BaseObject* obj) { + callbackCount++; + }); + + EXPECT_GE(callbackCount, 0); + free(regionMemory); +} + +HWTEST_F_L0(RegionManagerTest, VisitRememberSetBeforeMarkingTest) +{ + size_t totalUnits = 1024; + size_t heapSize = totalUnits * RegionDesc::UNIT_SIZE; + + void* regionMemory = malloc(heapSize + 4096); + ASSERT_NE(regionMemory, nullptr); + + uintptr_t heapStartAddress = reinterpret_cast(regionMemory); + uintptr_t regionInfoAddr = heapStartAddress + 4096; + + RegionManager manager; + manager.Initialize(totalUnits, regionInfoAddr); + + size_t unitIdx = 0; + size_t nUnit = 4; + RegionDesc* region = RegionDesc::InitRegion(unitIdx, nUnit, RegionDesc::UnitRole::LARGE_SIZED_UNITS); + ASSERT_NE(region, nullptr); + + int callbackCount = 0; + region->VisitRememberSetBeforeMarking([&](BaseObject* obj) { + callbackCount++; + }); + + EXPECT_GE(callbackCount, 0); + free(regionMemory); +} + } diff --git a/common_components/heap/allocator/tests/region_space_test.cpp b/common_components/heap/allocator/tests/region_space_test.cpp deleted file mode 100755 index e81f9f7cff44e515d3ea0d6ec9fee012c29a70dd..0000000000000000000000000000000000000000 --- a/common_components/heap/allocator/tests/region_space_test.cpp +++ /dev/null @@ -1,366 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "common_interfaces/heap/heap_allocator.h" -#include "common_components/heap/allocator/region_space.h" -#include "common_components/heap/collector/trace_collector.h" -#include "common_components/heap/heap.cpp" -#include "common_components/base_runtime/base_runtime_param.h" -#include "common_components/heap/heap_manager.h" -#include "common_components/tests/test_helper.h" -#include - -using namespace common; - -namespace common::test { -class RegionSpaceTest : public common::test::BaseTestWithScope { -protected: - void* regionMemory_; - size_t totalUnits_ = 1024; - size_t heapSize_; - Mutator* mutator_ = nullptr; - - static void SetUpTestCase() - { - BaseRuntime::GetInstance()->Init(); - } - - static void TearDownTestCase() - { - BaseRuntime::GetInstance()->Fini(); - } - - void SetUp() override - { - heapSize_ = totalUnits_ * RegionDesc::UNIT_SIZE; - size_t allocSize = heapSize_ + totalUnits_ * sizeof(RegionDesc); - regionMemory_ = malloc(allocSize); - ASSERT_NE(regionMemory_, nullptr); - uintptr_t unitInfoStart = reinterpret_cast(regionMemory_); - size_t metadataSize = totalUnits_ * sizeof(RegionDesc); - uintptr_t heapStartAddress = unitInfoStart + metadataSize; - RegionDesc::Initialize(totalUnits_, unitInfoStart, heapStartAddress); - mutator_ = Mutator::NewMutator(); - ASSERT_NE(mutator_, nullptr); - mutator_->InitTid(); - ThreadLocal::GetThreadLocalData()->mutator = mutator_; - } - - void TearDown() override - { - if (mutator_) { - delete mutator_; - mutator_ = nullptr; - } - if (regionMemory_) { - free(regionMemory_); - regionMemory_ = nullptr; - } - } -}; - -HWTEST_F_L0(RegionSpaceTest, ShouldRetryAllocation) -{ - auto* mutator = common::Mutator::GetMutator(); - mutator->SetMutatorPhase(GCPhase::GC_PHASE_UNDEF); - ThreadLocal::SetThreadType(ThreadType::GC_THREAD); - Heap::GetHeap().EnableGC(false); - Heap::GetHeap().GetCollectorResources().SetGcStarted(true); - Allocator *regionSpace = RegionSpace::CreateAllocator(); - EXPECT_EQ(regionSpace->Allocate(16, AllocType::MOVEABLE_OBJECT), 0); -} - -HWTEST_F_L0(RegionSpaceTest, FeedHungryBuffers2) -{ - auto* mutator = common::Mutator::GetMutator(); - mutator->SetMutatorPhase(GCPhase::GC_PHASE_FIX); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - RegionManager& manager = theAllocator.GetRegionManager(); - ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); - manager.Initialize(1024, reinterpret_cast(regionMemory_)); - AllocationBuffer* buffer1 = new (std::nothrow) AllocationBuffer(); - AllocationBuffer* buffer2 = new (std::nothrow) AllocationBuffer(); - RegionDesc* Region = RegionDesc::InitRegion(0, 1, RegionDesc::UnitRole::SMALL_SIZED_UNITS); - buffer1->SetPreparedRegion(Region); - - Heap::GetHeap().GetAllocator().AddHungryBuffer(*buffer1); - Heap::GetHeap().GetAllocator().AddHungryBuffer(*buffer2); - Heap::GetHeap().GetAllocator().FeedHungryBuffers(); - EXPECT_NE(buffer2->GetPreparedRegion(), nullptr); -} - -HWTEST_F_L0(RegionSpaceTest, FeedHungryBuffers3) -{ - auto* mutator = common::Mutator::GetMutator(); - mutator->SetMutatorPhase(GCPhase::GC_PHASE_FIX); - Heap::GetHeap().GetAllocator().FeedHungryBuffers(); - AllocBufferManager::HungryBuffers hungryBuffers; - Heap::GetHeap().GetAllocator().SwapHungryBuffers(hungryBuffers); - EXPECT_EQ(hungryBuffers.size(), 0); -} - - -HWTEST_F_L0(RegionSpaceTest, AllocRegion_PhaseEnum) -{ - ASSERT_NE(mutator_, nullptr); - mutator_->SetMutatorPhase(GCPhase::GC_PHASE_ENUM); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetTraceLine(), region->GetRegionStart()); - EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); - EXPECT_EQ(region->GetFixLine(), std::numeric_limits::max()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocRegion_PhaseMark) -{ - ASSERT_NE(mutator_, nullptr); - mutator_->SetMutatorPhase(GCPhase::GC_PHASE_MARK); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetTraceLine(), region->GetRegionStart()); - EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); - EXPECT_EQ(region->GetFixLine(), std::numeric_limits::max()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocRegion_PhaseRemarkStab) -{ - ASSERT_NE(mutator_, nullptr); - mutator_->SetMutatorPhase(GCPhase::GC_PHASE_REMARK_SATB); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetTraceLine(), region->GetRegionStart()); - EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); - EXPECT_EQ(region->GetFixLine(), std::numeric_limits::max()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocRegion_PhasePostMark) -{ - ASSERT_NE(mutator_, nullptr); - mutator_->SetMutatorPhase(GCPhase::GC_PHASE_POST_MARK); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetTraceLine(), region->GetRegionStart()); - EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); - EXPECT_EQ(region->GetFixLine(), std::numeric_limits::max()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocRegion_PhasePrecopy) -{ - auto* mutator = common::Mutator::GetMutator(); - mutator->SetMutatorPhase(GCPhase::GC_PHASE_PRECOPY); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocRegion_PhaseCopy) -{ - auto* mutator = common::Mutator::GetMutator(); - mutator->SetMutatorPhase(GCPhase::GC_PHASE_COPY); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocRegion_PhaseFix) -{ - auto* mutator = common::Mutator::GetMutator(); - mutator->SetMutatorPhase(GCPhase::GC_PHASE_FIX); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); - EXPECT_EQ(region->GetFixLine(), region->GetRegionStart()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocRegion_PhaseUndef) -{ - auto* mutator = common::Mutator::GetMutator(); - mutator->SetMutatorPhase(GCPhase::GC_PHASE_UNDEF); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); - EXPECT_EQ(region->GetFixLine(), std::numeric_limits::max()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocPinnedRegion_PhaseEnum) -{ - ASSERT_NE(mutator_, nullptr); - mutator_->SetMutatorPhase(GCPhase::GC_PHASE_ENUM); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocPinnedRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetTraceLine(), region->GetRegionStart()); - EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); - EXPECT_EQ(region->GetFixLine(), std::numeric_limits::max()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocPinnedRegion_PhaseMark) -{ - ASSERT_NE(mutator_, nullptr); - mutator_->SetMutatorPhase(GCPhase::GC_PHASE_MARK); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocPinnedRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetTraceLine(), region->GetRegionStart()); - EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); - EXPECT_EQ(region->GetFixLine(), std::numeric_limits::max()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocPinnedRegion_PhaseRemarkStab) -{ - ASSERT_NE(mutator_, nullptr); - mutator_->SetMutatorPhase(GCPhase::GC_PHASE_REMARK_SATB); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocPinnedRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetTraceLine(), region->GetRegionStart()); - EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); - EXPECT_EQ(region->GetFixLine(), std::numeric_limits::max()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocPinnedRegion_PhasePostMark) -{ - ASSERT_NE(mutator_, nullptr); - mutator_->SetMutatorPhase(GCPhase::GC_PHASE_POST_MARK); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocPinnedRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetTraceLine(), region->GetRegionStart()); - EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); - EXPECT_EQ(region->GetFixLine(), std::numeric_limits::max()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocPinnedRegion_PhasePrecopy) -{ - auto* mutator = common::Mutator::GetMutator(); - mutator->SetMutatorPhase(GCPhase::GC_PHASE_PRECOPY); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocPinnedRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocPinnedRegion_PhaseCopy) -{ - auto* mutator = common::Mutator::GetMutator(); - mutator->SetMutatorPhase(GCPhase::GC_PHASE_COPY); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocPinnedRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocPinnedRegion_PhaseFix) -{ - auto* mutator = common::Mutator::GetMutator(); - mutator->SetMutatorPhase(GCPhase::GC_PHASE_FIX); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocPinnedRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); - EXPECT_EQ(region->GetFixLine(), region->GetRegionStart()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocPinnedRegion_PhaseUndef) -{ - auto* mutator = common::Mutator::GetMutator(); - mutator->SetMutatorPhase(GCPhase::GC_PHASE_UNDEF); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - uintptr_t addr = theAllocator.AllocPinnedRegion(); - ASSERT_NE(addr, 0); - RegionDesc* region = RegionDesc::GetRegionDescAt(addr); - EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); - EXPECT_EQ(region->GetFixLine(), std::numeric_limits::max()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocateThreadLocalRegion2) -{ - auto* mutator = common::Mutator::GetMutator(); - mutator->SetMutatorPhase(GCPhase::GC_PHASE_FIX); - ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - RegionDesc* region = theAllocator.AllocateThreadLocalRegion(false); - EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); - EXPECT_EQ(region->GetFixLine(), region->GetRegionStart()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocateThreadLocalRegion3) -{ - auto* mutator = common::Mutator::GetMutator(); - mutator->SetMutatorPhase(GCPhase::GC_PHASE_COPY); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); - RegionDesc* region = theAllocator.AllocateThreadLocalRegion(false); - EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); - EXPECT_EQ(region->GetFixLine(), std::numeric_limits::max()); - mutator->SetMutatorPhase(GCPhase::GC_PHASE_PRECOPY); - RegionDesc* region1 = theAllocator.AllocateThreadLocalRegion(false); - EXPECT_EQ(region1->GetCopyLine(), region1->GetRegionStart()); - EXPECT_EQ(region1->GetFixLine(), std::numeric_limits::max()); -} - -HWTEST_F_L0(RegionSpaceTest, AllocateThreadLocalRegion4) -{ - auto* mutator = common::Mutator::GetMutator(); - mutator->SetMutatorPhase(GCPhase::GC_PHASE_ENUM); - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); - RegionDesc* region = theAllocator.AllocateThreadLocalRegion(false); - EXPECT_EQ(region->GetTraceLine(), region->GetRegionStart()); - EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); - EXPECT_EQ(region->GetFixLine(), std::numeric_limits::max()); - - mutator->SetMutatorPhase(GCPhase::GC_PHASE_MARK); - RegionDesc* region2 = theAllocator.AllocateThreadLocalRegion(false); - EXPECT_EQ(region2->GetTraceLine(), region2->GetRegionStart()); - EXPECT_EQ(region2->GetCopyLine(), std::numeric_limits::max()); - EXPECT_EQ(region2->GetFixLine(), std::numeric_limits::max()); - - mutator->SetMutatorPhase(GCPhase::GC_PHASE_REMARK_SATB); - RegionDesc* region3 = theAllocator.AllocateThreadLocalRegion(false); - EXPECT_EQ(region3->GetTraceLine(), region3->GetRegionStart()); - EXPECT_EQ(region3->GetCopyLine(), std::numeric_limits::max()); - EXPECT_EQ(region3->GetFixLine(), std::numeric_limits::max()); - - mutator->SetMutatorPhase(GCPhase::GC_PHASE_POST_MARK); - RegionDesc* region4 = theAllocator.AllocateThreadLocalRegion(false); - EXPECT_EQ(region4->GetTraceLine(), region4->GetRegionStart()); - EXPECT_EQ(region4->GetCopyLine(), std::numeric_limits::max()); - EXPECT_EQ(region4->GetFixLine(), std::numeric_limits::max()); -} -} diff --git a/common_components/heap/allocator/tests/regional_heap_test.cpp b/common_components/heap/allocator/tests/regional_heap_test.cpp new file mode 100755 index 0000000000000000000000000000000000000000..5d781ff993112ae5aa05c6ec6a4775fa785ca198 --- /dev/null +++ b/common_components/heap/allocator/tests/regional_heap_test.cpp @@ -0,0 +1,519 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/common/type_def.h" +#include "common_components/heap/allocator/region_desc.h" +#include "common_components/heap/allocator/region_manager.h" +#include "common_components/heap/allocator/regional_heap.h" +#include "common_components/heap/allocator/regional_heap.cpp" +#include "common_components/heap/collector/collector_resources.h" +#include "common_components/heap/space/nonmovable_space.h" +#include "common_components/heap/space/readonly_space.h" +#include "common_components/tests/test_helper.h" +#include "common_interfaces/base_runtime.h" + +using namespace common; + +namespace common::test { +class RegionalHeapTest : public common::test::BaseTestWithScope { +protected: + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() + { + BaseRuntime::GetInstance()->Fini(); + } + + void SetUp() override + { + holder_ = ThreadHolder::CreateAndRegisterNewThreadHolder(nullptr); + scope_ = new ThreadHolder::TryBindMutatorScope(holder_); + } + + void TearDown() override + { + if (scope_ != nullptr) { + delete scope_; + scope_ = nullptr; + } + } + + ThreadHolder *holder_ {nullptr}; + ThreadHolder::TryBindMutatorScope *scope_ {nullptr}; +}; + +HWTEST_F_L0(RegionalHeapTest, FeedHungryBuffers2) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_FIX); + ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); + AllocationBuffer* buffer1 = new (std::nothrow) AllocationBuffer(); + AllocationBuffer* buffer2 = new (std::nothrow) AllocationBuffer(); + RegionDesc* region = RegionDesc::InitRegion(0, 1, RegionDesc::UnitRole::LARGE_SIZED_UNITS); + region->InitFreeUnits(); + buffer1->SetPreparedRegion(region); + buffer2->SetPreparedRegion(region); + + Heap::GetHeap().GetAllocator().AddHungryBuffer(*buffer1); + Heap::GetHeap().GetAllocator().AddHungryBuffer(*buffer2); + Heap::GetHeap().GetAllocator().FeedHungryBuffers(); + EXPECT_NE(buffer2->GetPreparedRegion(), nullptr); + delete buffer1; + delete buffer2; +} + +HWTEST_F_L0(RegionalHeapTest, FeedHungryBuffers3) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_FIX); + Heap::GetHeap().GetAllocator().FeedHungryBuffers(); + AllocBufferManager::HungryBuffers hungryBuffers; + Heap::GetHeap().GetAllocator().SwapHungryBuffers(hungryBuffers); + EXPECT_EQ(hungryBuffers.size(), 0); +} + +HWTEST_F_L0(RegionalHeapTest, AllocRegion_PhaseEnum) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_ENUM); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetMarkingLine(), region->GetRegionStart()); + EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocRegion_PhaseMark) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_MARK); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetMarkingLine(), region->GetRegionStart()); + EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocRegion_PhaseRemarkStab) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_REMARK_SATB); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetMarkingLine(), region->GetRegionStart()); + EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocRegion_PhasePostMark) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_POST_MARK); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetMarkingLine(), region->GetRegionStart()); + EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocRegion_PhasePrecopy) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_PRECOPY); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocRegion_PhaseCopy) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_COPY); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocRegion_PhaseFix) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_FIX); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocRegion_PhaseUndef) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_UNDEF); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocateNonMovableRegion_PhaseEnum) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_ENUM); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocateNonMovableRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetMarkingLine(), region->GetRegionStart()); + EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocateNonMovableRegion_PhaseMark) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_MARK); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocateNonMovableRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetMarkingLine(), region->GetRegionStart()); + EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocateNonMovableRegion_PhaseRemarkStab) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_REMARK_SATB); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocateNonMovableRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetMarkingLine(), region->GetRegionStart()); + EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocateNonMovableRegion_PhasePostMark) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_POST_MARK); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocateNonMovableRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetMarkingLine(), region->GetRegionStart()); + EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocateNonMovableRegion_PhasePrecopy) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_PRECOPY); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocateNonMovableRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocateNonMovableRegion_PhaseCopy) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_COPY); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocateNonMovableRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocateNonMovableRegion_PhaseFix) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_FIX); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocateNonMovableRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocateNonMovableRegion_PhaseUndef) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_UNDEF); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocateNonMovableRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(addr); + EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocateThreadLocalRegion2) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_FIX); + ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + RegionDesc* region = theAllocator.AllocateThreadLocalRegion(false); + EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocateThreadLocalRegion3) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_COPY); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); + RegionDesc* region = theAllocator.AllocateThreadLocalRegion(false); + EXPECT_EQ(region->GetCopyLine(), region->GetRegionStart()); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_PRECOPY); + RegionDesc* region1 = theAllocator.AllocateThreadLocalRegion(false); + EXPECT_EQ(region1->GetCopyLine(), region1->GetRegionStart()); +} + +HWTEST_F_L0(RegionalHeapTest, AllocateThreadLocalRegion4) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_ENUM); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); + RegionDesc* region = theAllocator.AllocateThreadLocalRegion(false); + EXPECT_EQ(region->GetMarkingLine(), region->GetRegionStart()); + EXPECT_EQ(region->GetCopyLine(), std::numeric_limits::max()); + + mutator->SetMutatorPhase(GCPhase::GC_PHASE_MARK); + RegionDesc* region2 = theAllocator.AllocateThreadLocalRegion(false); + EXPECT_EQ(region2->GetMarkingLine(), region2->GetRegionStart()); + EXPECT_EQ(region2->GetCopyLine(), std::numeric_limits::max()); + + mutator->SetMutatorPhase(GCPhase::GC_PHASE_REMARK_SATB); + RegionDesc* region3 = theAllocator.AllocateThreadLocalRegion(false); + EXPECT_EQ(region3->GetMarkingLine(), region3->GetRegionStart()); + EXPECT_EQ(region3->GetCopyLine(), std::numeric_limits::max()); + + mutator->SetMutatorPhase(GCPhase::GC_PHASE_POST_MARK); + RegionDesc* region4 = theAllocator.AllocateThreadLocalRegion(false); + EXPECT_EQ(region4->GetMarkingLine(), region4->GetRegionStart()); + EXPECT_EQ(region4->GetCopyLine(), std::numeric_limits::max()); +} + +HWTEST_F_L0(RegionalHeapTest, CopyRegion) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_ENUM); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::FROM_REGION); + ASSERT(region->IsFromRegion()); + theAllocator.CopyRegion(region); + EXPECT_EQ(theAllocator.FromSpaceSize(), 0); +} + +HWTEST_F_L0(RegionalHeapTest, AllocateThreadLocalRegion1_NotGcThread_EntersElseBranch) +{ + auto& heapAllocator = Heap::GetHeap().GetAllocator(); + RegionalHeap& regionalHeap = reinterpret_cast(heapAllocator); + + Mutator::GetMutator()->SetMutatorPhase(GCPhase::GC_PHASE_ENUM); + + auto* region = regionalHeap.AllocateThreadLocalRegion(false); + EXPECT_NE(region, nullptr); +} + +HWTEST_F_L0(RegionalHeapTest, AllocateThreadLocalRegion2_NotGcThread_EntersElseBranch) +{ + auto& heapAllocator = Heap::GetHeap().GetAllocator(); + RegionalHeap& regionalHeap = reinterpret_cast(heapAllocator); + + Mutator::GetMutator()->SetMutatorPhase(GCPhase::GC_PHASE_PRECOPY); + + auto* region = regionalHeap.AllocateThreadLocalRegion(false); + EXPECT_NE(region, nullptr); +} + +HWTEST_F_L0(RegionalHeapTest, AllocateThreadLocalRegion3_NotGcThread_EntersElseBranch) +{ + auto& heapAllocator = Heap::GetHeap().GetAllocator(); + RegionalHeap& regionalHeap = reinterpret_cast(heapAllocator); + + Mutator::GetMutator()->SetMutatorPhase(GCPhase::GC_PHASE_FIX); + + auto* region = regionalHeap.AllocateThreadLocalRegion(false); + EXPECT_NE(region, nullptr); +} + +HWTEST_F_L0(RegionalHeapTest, Allocate_ValidSize_ReturnsNonNull) +{ + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->InitFreeUnits(); + region->SetRegionType(RegionDesc::RegionType::THREAD_LOCAL_REGION); + Mutator::GetMutator()->SetMutatorPhase(GCPhase::GC_PHASE_UNDEF); + Heap::GetHeap().EnableGC(true); + Heap::GetHeap().GetCollectorResources().SetGcStarted(false); + + uintptr_t result = theAllocator.Allocate(16, AllocType::NONMOVABLE_OBJECT); + EXPECT_NE(result, 0u); +} + +HWTEST_F_L0(RegionalHeapTest, FeedHungryBuffers_ShouldProvideValidRegions) +{ + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + + AllocationBuffer* buffer1 = new (std::nothrow) AllocationBuffer(); + AllocationBuffer* buffer2 = new (std::nothrow) AllocationBuffer(); + ASSERT_NE(buffer1, nullptr); + ASSERT_NE(buffer2, nullptr); + + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + ASSERT_NE(region, nullptr); + region->InitFreeUnits(); + region->SetRegionType(RegionDesc::RegionType::THREAD_LOCAL_REGION); + + buffer1->SetPreparedRegion(region); + buffer2->SetPreparedRegion(region); + Heap::GetHeap().GetAllocator().AddHungryBuffer(*buffer1); + Heap::GetHeap().GetAllocator().AddHungryBuffer(*buffer2); + + Mutator::GetMutator()->SetMutatorPhase(GCPhase::GC_PHASE_FIX); + + Heap::GetHeap().GetAllocator().FeedHungryBuffers(); + + EXPECT_NE(buffer2->GetPreparedRegion(), nullptr); + delete buffer1; + delete buffer2; +} + +HWTEST_F_L0(RegionalHeapTest, AllocationBuffer_AllocateRawPointerObject_ValidSize_ReturnsNonNull) +{ + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + ASSERT_NE(region, nullptr); + region->InitFreeUnits(); + region->SetRegionType(RegionDesc::RegionType::THREAD_LOCAL_REGION); + + AllocationBuffer* buffer = new (std::nothrow) AllocationBuffer(); + ASSERT_NE(buffer, nullptr); + buffer->SetPreparedRegion(region); + + Mutator::GetMutator()->SetMutatorPhase(GCPhase::GC_PHASE_UNDEF); + Heap::GetHeap().EnableGC(true); + Heap::GetHeap().GetCollectorResources().SetGcStarted(false); + + uintptr_t result = theAllocator.Allocate(16, AllocType::NONMOVABLE_OBJECT); + EXPECT_NE(result, 0u); + delete buffer; +} + +HWTEST_F_L0(RegionalHeapTest, AllocNonMovableObject) +{ + auto* mutator = common::Mutator::GetMutator(); + RegionManager regionManager; + NonMovableSpace nonMovableSpace(regionManager); + EXPECT_EQ(nonMovableSpace.Alloc(sizeof(RegionDesc), false), 0); +} + +HWTEST_F_L0(RegionalHeapTest, AllocReadOnly1) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_ENUM); + ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); + RegionManager regionManager; + ReadOnlySpace readOnlySpace(regionManager); + uintptr_t ret = readOnlySpace.Alloc(sizeof(RegionDesc), false); + EXPECT_EQ(ret, 0); +} + +HWTEST_F_L0(RegionalHeapTest, AllocReadOnly2) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_MARK); + ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t ret = theAllocator.Allocate(sizeof(RegionDesc), AllocType::READ_ONLY_OBJECT); + EXPECT_NE(ret, 0); +} + +HWTEST_F_L0(RegionalHeapTest, AllocReadOnly3) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_POST_MARK); + ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t ret = theAllocator.Allocate(sizeof(RegionDesc), AllocType::READ_ONLY_OBJECT); + EXPECT_NE(ret, 0); +} + +HWTEST_F_L0(RegionalHeapTest, AllocReadOnly4) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_PRECOPY); + ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t ret = theAllocator.Allocate(sizeof(RegionDesc), AllocType::READ_ONLY_OBJECT); + EXPECT_NE(ret, 0); +} + +HWTEST_F_L0(RegionalHeapTest, AllocReadOnly5) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_COPY); + ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t ret = theAllocator.Allocate(sizeof(RegionDesc), AllocType::READ_ONLY_OBJECT); + EXPECT_NE(ret, 0); +} + +HWTEST_F_L0(RegionalHeapTest, AllocReadOnly6) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_FIX); + ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t ret = theAllocator.Allocate(sizeof(RegionDesc), AllocType::READ_ONLY_OBJECT); + EXPECT_NE(ret, 0); +} + +HWTEST_F_L0(RegionalHeapTest, AllocReadOnly7) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_UNDEF); + ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t ret = theAllocator.Allocate(sizeof(RegionDesc), AllocType::READ_ONLY_OBJECT); + EXPECT_NE(ret, 0); +} +} diff --git a/common_components/heap/allocator/treap.h b/common_components/heap/allocator/treap.h index 3cc48bff5d63241cc16d4b24f1dd8394257a3acf..3b7d82eae5d816c700d21906c9900b6183d0ce87 100755 --- a/common_components/heap/allocator/treap.h +++ b/common_components/heap/allocator/treap.h @@ -85,7 +85,7 @@ public: void DecTotalCount(uint32_t cnt) { - if (totalCount_ < cnt) { + if (totalCount_ < cnt) { //LCOV_EXCL_BR_LINE LOG_COMMON(FATAL) << "Treap::DecTotalCount() Should not execute here, abort."; UNREACHABLE_CC(); } @@ -430,6 +430,9 @@ private: } node = *nodePtr; + if (UNLIKELY_CC(node == nullptr)) { + return false; + } idx = node->GetIndex(); auto count = node->GetCount(); @@ -605,7 +608,7 @@ private: node = it.Next(); } - if (total != GetTotalCount()) { + if (total != GetTotalCount()) { //LCOV_EXCL_BR_LINE DLOG(REGION, "c-tree %p total unit count %u (expect %u)", this, GetTotalCount(), total); DumpTree("internal error tree"); LOG_COMMON(FATAL) << "Treap::VerifyTree() Should not execute here, abort."; diff --git a/common_components/heap/w_collector/w_collector.cpp b/common_components/heap/ark_collector/ark_collector.cpp similarity index 48% rename from common_components/heap/w_collector/w_collector.cpp rename to common_components/heap/ark_collector/ark_collector.cpp index 2774d3ff5b77f466f34deb81d6ea34c4314d3981..4f1ac067fda8c0f6ff345e178d7355d5f6a67527 100755 --- a/common_components/heap/w_collector/w_collector.cpp +++ b/common_components/heap/ark_collector/ark_collector.cpp @@ -12,9 +12,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "common_components/heap/w_collector/w_collector.h" +#include "common_components/heap/ark_collector/ark_collector.h" -#include "common_components/base_runtime/hooks.h" +#include "common_components/common_runtime/hooks.h" #include "common_components/log/log.h" #include "common_components/mutator/mutator_manager-inl.h" #include "common_components/heap/verification.h" @@ -22,9 +22,15 @@ #include "common_interfaces/objects/ref_field.h" #include "common_interfaces/profiler/heap_profiler_listener.h" #include "common_components/objects/string_table_internal.h" +#include "common_components/heap/allocator/fix_heap.h" +#include "common_components/heap/allocator/regional_heap.h" + +#ifdef ENABLE_QOS +#include "qos.h" +#endif namespace common { -bool WCollector::IsUnmovableFromObject(BaseObject* obj) const +bool ArkCollector::IsUnmovableFromObject(BaseObject* obj) const { // filter const string object. if (!Heap::IsHeapAddress(obj)) { @@ -32,24 +38,17 @@ bool WCollector::IsUnmovableFromObject(BaseObject* obj) const } RegionDesc* regionInfo = nullptr; - regionInfo = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + regionInfo = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); return regionInfo->IsUnmovableFromRegion(); } -bool WCollector::MarkObject(BaseObject* obj, size_t cellCount) const +bool ArkCollector::MarkObject(BaseObject* obj) const { - bool marked = RegionSpace::MarkObject(obj); + bool marked = RegionalHeap::MarkObject(obj); if (!marked) { - RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); - (void)region; - - if (region->IsGarbageRegion()) { - LOG_COMMON(FATAL) << "Unresolved fatal"; - UNREACHABLE_CC(); - } - size_t size = cellCount == 0 ? obj->GetSize() : (cellCount + 1) * sizeof(uint64_t); - region->AddLiveByteCount(size); - DLOG(TRACE, "mark obj %p<%p>(%zu) in region %p(%u)@%#zx, live %u", obj, obj->GetTypeInfo(), obj->GetSize(), + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); + DCHECK_CC(!region->IsGarbageRegion()); + DLOG(TRACE, "mark obj %p<%p> in region %p(%u)@%#zx, live %u", obj, obj->GetTypeInfo(), region, region->GetRegionType(), region->GetRegionStart(), region->GetLiveByteCount()); } return marked; @@ -57,42 +56,42 @@ bool WCollector::MarkObject(BaseObject* obj, size_t cellCount) const // this api updates current pointer as well as old pointer, caller should take care of this. template -bool WCollector::TryUpdateRefFieldImpl(BaseObject* obj, RefField<>& field, BaseObject*& fromObj, - BaseObject*& toObj) const +bool ArkCollector::TryUpdateRefFieldImpl(BaseObject* obj, RefField<>& field, BaseObject*& fromObj, + BaseObject*& toObj) const { RefField<> oldRef(field); fromObj = oldRef.GetTargetObject(); - if (IsFromObject(fromObj)) { - if (copy) { - toObj = const_cast(this)->TryForwardObject(fromObj); - if (toObj != nullptr) { + if (IsFromObject(fromObj)) { //LCOV_EXCL_BR_LINE + if (copy) { //LCOV_EXCL_BR_LINE + toObj = const_cast(this)->TryForwardObject(fromObj); + if (toObj != nullptr) { //LCOV_EXCL_BR_LINE HeapProfilerListener::GetInstance().OnMoveEvent(reinterpret_cast(fromObj), reinterpret_cast(toObj), toObj->GetSize()); } - } else { + } else { //LCOV_EXCL_BR_LINE toObj = FindToVersion(fromObj); } - if (toObj == nullptr) { + if (toObj == nullptr) { //LCOV_EXCL_BR_LINE return false; } - RefField<> tmpField(toObj); - if (field.CompareExchange(oldRef.GetFieldValue(), tmpField.GetFieldValue())) { - if (obj != nullptr) { + RefField<> tmpField(toObj, oldRef.IsWeak()); + if (field.CompareExchange(oldRef.GetFieldValue(), tmpField.GetFieldValue())) { //LCOV_EXCL_BR_LINE + if (obj != nullptr) { //LCOV_EXCL_BR_LINE DLOG(TRACE, "update obj %p<%p>(%zu)+%zu ref-field@%p: %#zx -> %#zx", obj, obj->GetTypeInfo(), obj->GetSize(), BaseObject::FieldOffset(obj, &field), &field, oldRef.GetFieldValue(), tmpField.GetFieldValue()); - } else { + } else { //LCOV_EXCL_BR_LINE DLOG(TRACE, "update ref@%p: 0x%zx -> %p", &field, oldRef.GetFieldValue(), toObj); } return true; - } else { - if (obj != nullptr) { + } else { //LCOV_EXCL_BR_LINE + if (obj != nullptr) { //LCOV_EXCL_BR_LINE DLOG(TRACE, "update obj %p<%p>(%zu)+%zu but cas failed ref-field@%p: %#zx(%#zx) -> %#zx but cas failed ", obj, obj->GetTypeInfo(), obj->GetSize(), BaseObject::FieldOffset(obj, &field), &field, oldRef.GetFieldValue(), field.GetFieldValue(), tmpField.GetFieldValue()); - } else { + } else { //LCOV_EXCL_BR_LINE DLOG(TRACE, "update but cas failed ref@%p: 0x%zx(%zx) -> %p", &field, oldRef.GetFieldValue(), field.GetFieldValue(), toObj); } @@ -103,37 +102,37 @@ bool WCollector::TryUpdateRefFieldImpl(BaseObject* obj, RefField<>& field, BaseO return false; } -bool WCollector::TryUpdateRefField(BaseObject* obj, RefField<>& field, BaseObject*& newRef) const +bool ArkCollector::TryUpdateRefField(BaseObject* obj, RefField<>& field, BaseObject*& newRef) const { BaseObject* oldRef = nullptr; return TryUpdateRefFieldImpl(obj, field, oldRef, newRef); } -bool WCollector::TryForwardRefField(BaseObject* obj, RefField<>& field, BaseObject*& newRef) const +bool ArkCollector::TryForwardRefField(BaseObject* obj, RefField<>& field, BaseObject*& newRef) const { BaseObject* oldRef = nullptr; return TryUpdateRefFieldImpl(obj, field, oldRef, newRef); } // this api untags current pointer as well as old pointer, caller should take care of this. -bool WCollector::TryUntagRefField(BaseObject* obj, RefField<>& field, BaseObject*& target) const +bool ArkCollector::TryUntagRefField(BaseObject* obj, RefField<>& field, BaseObject*& target) const { - for (;;) { + for (;;) { //LCOV_EXCL_BR_LINE RefField<> oldRef(field); - if (oldRef.IsTagged()) { + if (oldRef.IsTagged()) { //LCOV_EXCL_BR_LINE target = oldRef.GetTargetObject(); RefField<> newRef(target); - if (field.CompareExchange(oldRef.GetFieldValue(), newRef.GetFieldValue())) { - if (obj != nullptr) { + if (field.CompareExchange(oldRef.GetFieldValue(), newRef.GetFieldValue())) { //LCOV_EXCL_BR_LINE + if (obj != nullptr) { //LCOV_EXCL_BR_LINE DLOG(FIX, "untag obj %p<%p>(%zu) ref-field@%p: %#zx -> %#zx", obj, obj->GetTypeInfo(), obj->GetSize(), &field, oldRef.GetFieldValue(), newRef.GetFieldValue()); - } else { + } else { //LCOV_EXCL_BR_LINE DLOG(FIX, "untag ref@%p: %#zx -> %#zx", &field, oldRef.GetFieldValue(), newRef.GetFieldValue()); } return true; } - } else { + } else { //LCOV_EXCL_BR_LINE return false; } } @@ -141,11 +140,11 @@ bool WCollector::TryUntagRefField(BaseObject* obj, RefField<>& field, BaseObject return false; } -static void TraceRefField(BaseObject *obj, BaseObject *targetObj, RefField<> &field, - TraceCollector::WorkStack &workStack, RegionDesc *targetRegion); -// note each ref-field will not be traced twice, so each old pointer the tracer meets must come from previous gc. -static void TraceRefField(BaseObject *obj, RefField<> &field, TraceCollector::WorkStack &workStack, - TraceCollector::WeakStack &weakStack, const GCReason gcReason) +static void MarkingRefField(BaseObject *obj, BaseObject *targetObj, RefField<> &field, + ParallelLocalMarkStack &markStack, RegionDesc *targetRegion); +// note each ref-field will not be marked twice, so each old pointer the markingr meets must come from previous gc. +static void MarkingRefField(BaseObject *obj, RefField<> &field, ParallelLocalMarkStack &markStack, + WeakStack &weakStack, const GCReason gcReason) { RefField<> oldField(field); BaseObject* targetObj = oldField.GetTargetObject(); @@ -153,100 +152,73 @@ static void TraceRefField(BaseObject *obj, RefField<> &field, TraceCollector::Wo if (!Heap::IsTaggedObject(oldField.GetFieldValue())) { return; } - auto region = RegionDesc::GetRegionDescAt(reinterpret_cast((void*)targetObj)); // field is tagged object, should be in heap DCHECK_CC(Heap::IsHeapAddress(targetObj)); - auto targetRegion = RegionDesc::GetRegionDescAt(reinterpret_cast((void*)targetObj)); + auto targetRegion = RegionDesc::GetAliveRegionDescAt(reinterpret_cast((void*)targetObj)); if (gcReason != GC_REASON_YOUNG && oldField.IsWeak()) { - DLOG(TRACE, "trace: skip weak obj when full gc, object: %p@%p, targetObj: %p", obj, &field, targetObj); - weakStack.push_back(&field); + DLOG(TRACE, "marking: skip weak obj when full gc, object: %p@%p, targetObj: %p", obj, &field, targetObj); + // weak ref is cleared after roots pre-forward, so there might be a to-version weak ref which also need to be + // cleared, offset recorded here will help us find it + weakStack.push_back(std::make_shared*, size_t>>( + &field, reinterpret_cast(&field) - reinterpret_cast(obj))); return; } - if (gcReason == GC_REASON_YOUNG && targetRegion->IsInOldSpace()) { - DLOG(TRACE, "trace: skip old object %p@%p, target object: %p<%p>(%zu)", + // cannot skip objects in EXEMPTED_FROM_REGION, because its rset is incomplete + if (gcReason == GC_REASON_YOUNG && !targetRegion->IsInYoungSpace()) { + DLOG(TRACE, "marking: skip non-young object %p@%p, target object: %p<%p>(%zu)", obj, &field, targetObj, targetObj->GetTypeInfo(), targetObj->GetSize()); return; } - common::TraceRefField(obj, targetObj, field, workStack, targetRegion); + common::MarkingRefField(obj, targetObj, field, markStack, targetRegion); } -// note each ref-field will not be traced twice, so each old pointer the tracer meets must come from previous gc. -static void TraceRefField(BaseObject *obj, BaseObject *targetObj, RefField<> &field, - TraceCollector::WorkStack &workStack, RegionDesc *targetRegion) +// note each ref-field will not be marked twice, so each old pointer the markingr meets must come from previous gc. +static void MarkingRefField(BaseObject *obj, BaseObject *targetObj, RefField<> &field, + ParallelLocalMarkStack &markStack, RegionDesc *targetRegion) { - if (targetRegion->IsNewObjectSinceTrace(targetObj)) { - DLOG(TRACE, "trace: skip new obj %p<%p>(%zu)", targetObj, targetObj->GetTypeInfo(), targetObj->GetSize()); + if (targetRegion->IsNewObjectSinceMarking(targetObj)) { + DLOG(TRACE, "marking: skip new obj %p<%p>(%zu)", targetObj, targetObj->GetTypeInfo(), targetObj->GetSize()); return; } if (targetRegion->MarkObject(targetObj)) { - DLOG(TRACE, "trace: obj has been marked %p", targetObj); + DLOG(TRACE, "marking: obj has been marked %p", targetObj); return; } - DLOG(TRACE, "trace obj %p ref@%p: %p<%p>(%zu)", + DLOG(TRACE, "marking obj %p ref@%p: %p<%p>(%zu)", obj, &field, targetObj, targetObj->GetTypeInfo(), targetObj->GetSize()); - workStack.push_back(targetObj); + markStack.Push(targetObj); } -TraceCollector::TraceRefFieldVisitor WCollector::CreateTraceObjectRefFieldsVisitor(WorkStack *workStack, - WeakStack *weakStack) +MarkingCollector::MarkingRefFieldVisitor ArkCollector::CreateMarkingObjectRefFieldsVisitor( + ParallelLocalMarkStack &markStack, WeakStack &weakStack) { - TraceRefFieldVisitor visitor; + MarkingRefFieldVisitor visitor; if (gcReason_ == GCReason::GC_REASON_YOUNG) { - visitor.SetVisitor([obj = visitor.GetClosure(), workStack, weakStack](RefField<> &field) { + visitor.SetVisitor([obj = visitor.GetClosure(), &markStack, &weakStack](RefField<> &field) { const GCReason gcReason = GCReason::GC_REASON_YOUNG; - TraceRefField(*obj, field, *workStack, *weakStack, gcReason); + MarkingRefField(*obj, field, markStack, weakStack, gcReason); }); } else { - visitor.SetVisitor([obj = visitor.GetClosure(), workStack, weakStack](RefField<> &field) { + visitor.SetVisitor([obj = visitor.GetClosure(), &markStack, &weakStack](RefField<> &field) { const GCReason gcReason = GCReason::GC_REASON_HEU; - TraceRefField(*obj, field, *workStack, *weakStack, gcReason); + MarkingRefField(*obj, field, markStack, weakStack, gcReason); }); } return visitor; } -void WCollector::TraceObjectRefFields(BaseObject *obj, TraceRefFieldVisitor *data) +void ArkCollector::MarkingObjectRefFields(BaseObject *obj, MarkingRefFieldVisitor *data) { - data->SetTraceRefFieldArgs(obj); + data->SetMarkingRefFieldArgs(obj); obj->ForEachRefField(data->GetRefFieldVisitor()); } -#ifdef PANDA_JS_ETS_HYBRID_MODE -// note each ref-field will not be traced twice, so each old pointer the tracer meets must come from previous gc. -void WCollector::TraceXRef(RefField<>& field, WorkStack& workStack) const -{ - BaseObject* targetObj = field.GetTargetObject(); - auto region = RegionDesc::GetRegionDescAt(reinterpret_cast(targetObj)); - // field is tagged object, should be in heap - DCHECK_CC(Heap::IsHeapAddress(targetObj)); - - DLOG(TRACE, "trace obj %p <%p>(%zu)", targetObj, targetObj->GetTypeInfo(), targetObj->GetSize()); - if (region->IsNewObjectSinceTrace(targetObj)) { - DLOG(TRACE, "trace: skip new obj %p<%p>(%zu)", targetObj, targetObj->GetTypeInfo(), targetObj->GetSize()); - return; - } - ASSERT(!field.IsWeak()); - if (!region->MarkObject(targetObj)) { - workStack.push_back(targetObj); - } -} - -void WCollector::TraceObjectXRef(BaseObject* obj, WorkStack& workStack) -{ - auto refFunc = [this, &workStack] (RefField<>& field) { - TraceXRef(field, workStack); - }; - - obj->IterateXRef(refFunc); -} -#endif - -void WCollector::FixRefField(BaseObject* obj, RefField<>& field) const +void ArkCollector::FixRefField(BaseObject* obj, RefField<>& field) const { RefField<> oldField(field); BaseObject* targetObj = oldField.GetTargetObject(); @@ -258,18 +230,26 @@ void WCollector::FixRefField(BaseObject* obj, RefField<>& field) const return; } - BaseObject* latest = FindToVersion(targetObj); - - // update remember set - BaseObject* toObj = latest == nullptr ? targetObj : latest; - RegionDesc* objRegion = RegionDesc::GetRegionDescAt(reinterpret_cast((void*)obj)); - RegionDesc* refRegion = RegionDesc::GetRegionDescAt(reinterpret_cast((void*)toObj)); - if (!objRegion->IsInRecentSpace() && refRegion->IsInRecentSpace()) { - if (objRegion->MarkRSetCardTable(obj)) { - DLOG(TRACE, "fix phase update point-out remember set of region %p, obj %p, ref: %p<%p>", - objRegion, obj, toObj, toObj->GetTypeInfo()); + RegionDesc::InlinedRegionMetaData *refRegion = RegionDesc::InlinedRegionMetaData::GetInlinedRegionMetaData( + reinterpret_cast(targetObj)); + bool isFrom = refRegion->IsFromRegion(); + bool isInRcent = refRegion->IsInRecentSpace(); + if (isInRcent) { + RegionDesc::InlinedRegionMetaData *objRegion = RegionDesc::InlinedRegionMetaData::GetInlinedRegionMetaData( + reinterpret_cast(obj)); + if (!objRegion->IsInRecentSpace() && + objRegion->MarkRSetCardTable(obj)) { + DLOG(TRACE, + "fix phase update point-out remember set of region %p, obj " + "%p, ref: <%p>", + objRegion, obj, targetObj->GetTypeInfo()); } + return; + } else if (!isFrom) { + return; } + BaseObject* latest = FindToVersion(targetObj); + if (latest == nullptr) { return; } CHECK_CC(latest->IsValidObject()); @@ -280,14 +260,14 @@ void WCollector::FixRefField(BaseObject* obj, RefField<>& field) const } } -void WCollector::FixObjectRefFields(BaseObject* obj) const +void ArkCollector::FixObjectRefFields(BaseObject* obj) const { DLOG(FIX, "fix obj %p<%p>(%zu)", obj, obj->GetTypeInfo(), obj->GetSize()); auto refFunc = [this, obj](RefField<>& field) { FixRefField(obj, field); }; obj->ForEachRefField(refFunc); } -BaseObject* WCollector::ForwardUpdateRawRef(ObjectRef& root) +BaseObject* ArkCollector::ForwardUpdateRawRef(ObjectRef& root) { auto& refField = reinterpret_cast&>(root); RefField<> oldField(refField); @@ -310,33 +290,232 @@ BaseObject* WCollector::ForwardUpdateRawRef(ObjectRef& root) return oldObj; } -void WCollector::RemarkAndPreforwardStaticRoots(WorkStack& workStack) +// UDAV +//#ifdef PANDA_JS_ETS_HYBRID_MODE +//// note each ref-field will not be traced twice, so each old pointer the tracer meets must come from previous gc. +//void ArkCollector::TraceXRef(RefField<>& field, WorkStack& workStack) const +//{ +// BaseObject* targetObj = field.GetTargetObject(); +// auto region = RegionDesc::GetRegionDescAt(reinterpret_cast(targetObj)); +// // field is tagged object, should be in heap +// DCHECK_CC(Heap::IsHeapAddress(targetObj)); +// +// DLOG(TRACE, "trace obj %p <%p>(%zu)", targetObj, targetObj->GetTypeInfo(), targetObj->GetSize()); +// if (region->IsNewObjectSinceTrace(targetObj)) { +// DLOG(TRACE, "trace: skip new obj %p<%p>(%zu)", targetObj, targetObj->GetTypeInfo(), targetObj->GetSize()); +// return; +// } +// ASSERT(!field.IsWeak()); +// if (!region->MarkObject(targetObj)) { +// workStack.push_back(targetObj); +// } +//} +// +//void ArkCollector::TraceObjectXRef(BaseObject* obj, WorkStack& workStack) +//{ +// auto refFunc = [this, &workStack] (RefField<>& field) { +// TraceXRef(field, workStack); +// }; +// +// obj->IterateXRef(refFunc); +//} +//#endif + +void ArkCollector::CollectGarbageWithXRef() { - const auto markObject = [&workStack, this](BaseObject *temp) { - if (!this->MarkObject(temp)) { - workStack.push_back(temp); +// UDAV +//#ifdef ENABLE_CMC_RB_DFX +// WVerify::DisableReadBarrierDFX(*this); +//#endif +// +// ScopedStopTheWorld stw("stw-gc"); +// RemoveXRefFromRoots(); +// +// WorkStack workStack = NewWorkStack(); +// EnumRoots(workStack); +// TraceHeap(workStack); +// SweepUnmarkedXRefs(); +// PostTrace(); +// +// AddXRefToRoots(); +// Preforward(); +// // reclaim large objects should after preforward(may process weak ref) and +// // before fix heap(may clear live bit) +// CollectLargeGarbage(); +// SweepThreadLocalJitFort(); +// +// CopyFromSpace(); +// WVerify::VerifyAfterForward(*this); +// +// PrepareFix(); +// FixHeap(); +// CollectPinnedGarbage(); +// +// TransitionToGCPhase(GCPhase::GC_PHASE_IDLE, true); +// +// ClearAllGCInfo(); +// CollectSmallSpace(); +// UnmarkAllXRefs(); +// +//#if defined(ENABLE_CMC_RB_DFX) +// WVerify::EnableReadBarrierDFX(*this); +//#endif +} + +class RemarkAndPreforwardVisitor { +public: + RemarkAndPreforwardVisitor(LocalCollectStack &collectStack, ArkCollector *collector) + : collectStack_(collectStack), collector_(collector) {} + + void operator()(RefField<> &refField) + { + RefField<> oldField(refField); + BaseObject* oldObj = oldField.GetTargetObject(); + DLOG(FIX, "visit raw-ref @%p: %p", &refField, oldObj); + + auto regionType = + RegionDesc::InlinedRegionMetaData::GetInlinedRegionMetaData(reinterpret_cast(oldObj)) + ->GetRegionType(); + if (regionType == RegionDesc::RegionType::FROM_REGION) { + BaseObject* toVersion = collector_->TryForwardObject(oldObj); + if (toVersion == nullptr) { //LCOV_EXCL_BR_LINE + Heap::throwOOM(); + return; + } + HeapProfilerListener::GetInstance().OnMoveEvent(reinterpret_cast(oldObj), + reinterpret_cast(toVersion), + toVersion->GetSize()); + RefField<> newField(toVersion); + // CAS failure means some mutator or gc thread writes a new ref (must be a to-object), no need to retry. + if (refField.CompareExchange(oldField.GetFieldValue(), newField.GetFieldValue())) { + DLOG(FIX, "fix raw-ref @%p: %p -> %p", &refField, oldObj, toVersion); + } + MarkToObject(oldObj, toVersion); + } else { + if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) { + MarkObject(oldObj); + } else if (RegionalHeap::IsYoungSpaceObject(oldObj) && !RegionalHeap::IsNewObjectSinceMarking(oldObj) && + !RegionalHeap::IsMarkedObject(oldObj)) { + // RSet don't protect exempted objects, we need to mark it + MarkObject(oldObj); + } } - }; + } + +private: + void MarkObject(BaseObject *object) + { + if (!RegionalHeap::IsNewObjectSinceMarking(object) && !collector_->MarkObject(object)) { + collectStack_.Push(object); + } + } + + void MarkToObject(BaseObject *oldVersion, BaseObject *toVersion) + { + // We've checked oldVersion is in fromSpace, no need to check markingLine + if (!collector_->MarkObject(oldVersion)) { + // No need to count oldVersion object size, as it has been copied. + collector_->MarkObject(toVersion); + // oldVersion don't have valid type info, cannot push it + collectStack_.Push(toVersion); + } + } + +private: + LocalCollectStack &collectStack_; + ArkCollector *collector_; +}; + +class RemarkingAndPreforwardTask : public common::Task { +public: + RemarkingAndPreforwardTask(ArkCollector *collector, GlobalMarkStack &globalMarkStack, TaskPackMonitor &monitor, + std::function& next) + : Task(0), collector_(collector), globalMarkStack_(globalMarkStack), monitor_(monitor), getNextMutator_(next) + {} + + bool Run([[maybe_unused]] uint32_t threadIndex) override + { + ThreadLocal::SetThreadType(ThreadType::GC_THREAD); + LocalCollectStack collectStack(&globalMarkStack_); + RemarkAndPreforwardVisitor visitor(collectStack, collector_); + Mutator *mutator = getNextMutator_(); + while (mutator != nullptr) { + VisitMutatorRoot(visitor, *mutator); + mutator = getNextMutator_(); + } + collectStack.Publish(); + ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); + ThreadLocal::ClearAllocBufferRegion(); + monitor_.NotifyFinishOne(); + return true; + } + +private: + ArkCollector *collector_ {nullptr}; + GlobalMarkStack &globalMarkStack_; + TaskPackMonitor &monitor_; + std::function &getNextMutator_; +}; - const auto markToObject = [&workStack, this](BaseObject *oldVersion, BaseObject *toVersion) { - if (!this->MarkObject(toVersion)) { - // Therefore, we must still attempt to mark the old object to prevent - // it from being pushed into the mark stack during subsequent - // traversals. - this->MarkObject(oldVersion); - // The reference in toSpace needs to be fixed up. Therefore, even if - // the oldVersion has been marked, it must still be pushed into the - // stack. This will be optimized later. - workStack.push_back(toVersion); +void ArkCollector::ParallelRemarkAndPreforward(GlobalMarkStack &globalMarkStack) +{ + std::vector taskList; + MutatorManager &mutatorManager = MutatorManager::Instance(); + mutatorManager.VisitAllMutators([&taskList](Mutator &mutator) { + taskList.push_back(&mutator); + }); + std::atomic taskIter = 0; + std::function getNextMutator = [&taskIter, &taskList]() -> Mutator* { + uint32_t idx = static_cast(taskIter.fetch_add(1U, std::memory_order_relaxed)); + if (idx < taskList.size()) { + return taskList[idx]; } + return nullptr; }; - RefFieldVisitor visitor = [this, &markObject, &markToObject](RefField<>& refField) { + + const uint32_t runningWorkers = std::min(GetGCThreadCount(true), taskList.size()); + uint32_t parallelCount = runningWorkers + 1; // 1 :DaemonThread + TaskPackMonitor monitor(runningWorkers, runningWorkers); + for (uint32_t i = 1; i < parallelCount; ++i) { + GetThreadPool()->PostTask(std::make_unique(this, globalMarkStack, monitor, + getNextMutator)); + } + // Run in daemon thread. + LocalCollectStack collectStack(&globalMarkStack); + RemarkAndPreforwardVisitor visitor(collectStack, this); + VisitGlobalRoots(visitor); + Mutator *mutator = getNextMutator(); + while (mutator != nullptr) { + VisitMutatorRoot(visitor, *mutator); + mutator = getNextMutator(); + } + collectStack.Publish(); + monitor.WaitAllFinished(); +} + +void ArkCollector::RemarkAndPreforwardStaticRoots(GlobalMarkStack &globalMarkStack) +{ + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::RemarkAndPreforwardStaticRoots", ""); + const uint32_t maxWorkers = GetGCThreadCount(true) - 1; + if (maxWorkers > 0) { + ParallelRemarkAndPreforward(globalMarkStack); + } else { + LocalCollectStack collectStack(&globalMarkStack); + RemarkAndPreforwardVisitor visitor(collectStack, this); + VisitSTWRoots(visitor); + collectStack.Publish(); + } +} + +void ArkCollector::PreforwardConcurrentRoots() +{ + RefFieldVisitor visitor = [this](RefField<> &refField) { RefField<> oldField(refField); - BaseObject* oldObj = oldField.GetTargetObject(); + BaseObject *oldObj = oldField.GetTargetObject(); DLOG(FIX, "visit raw-ref @%p: %p", &refField, oldObj); if (IsFromObject(oldObj)) { - BaseObject* toVersion = TryForwardObject(oldObj); - CHECK_CC(toVersion != nullptr); + BaseObject *toVersion = TryForwardObject(oldObj); + ASSERT_LOGF(toVersion != nullptr, "TryForwardObject failed"); HeapProfilerListener::GetInstance().OnMoveEvent(reinterpret_cast(oldObj), reinterpret_cast(toVersion), toVersion->GetSize()); @@ -345,26 +524,18 @@ void WCollector::RemarkAndPreforwardStaticRoots(WorkStack& workStack) if (refField.CompareExchange(oldField.GetFieldValue(), newField.GetFieldValue())) { DLOG(FIX, "fix raw-ref @%p: %p -> %p", &refField, oldObj, toVersion); } - markToObject(oldObj, toVersion); - } else { - markObject(oldObj); } }; - UpdateRoots(visitor); - // inline MergeAllocBufferRoots - MutatorManager &mutatorManager = MutatorManager::Instance(); - bool worldStopped = mutatorManager.WorldStopped(); - worldStopped ? ((void)0) : mutatorManager.MutatorManagementWLock(); - theAllocator_.VisitAllocBuffers([&markObject](AllocationBuffer &buffer) { buffer.MarkStack(markObject); }); - worldStopped ? ((void)0) : mutatorManager.MutatorManagementWUnlock(); + VisitConcurrentRoots(visitor); } -void WCollector::PreforwardStaticWeakRoots() +void ArkCollector::PreforwardStaticWeakRoots() { OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::PreforwardStaticRoots", ""); WeakRefFieldVisitor weakVisitor = GetWeakRefFieldVisitor(); VisitWeakRoots(weakVisitor); + InvokeSharedNativePointerCallbacks(); MutatorManager::Instance().VisitAllMutators([](Mutator& mutator) { // Request finalize callback in each vm-thread when gc finished. mutator.SetFinalizeRequest(); @@ -372,11 +543,11 @@ void WCollector::PreforwardStaticWeakRoots() AllocationBuffer* allocBuffer = AllocationBuffer::GetAllocBuffer(); if (LIKELY_CC(allocBuffer != nullptr)) { - allocBuffer->ClearRegion(); + allocBuffer->ClearRegions(); } } -void WCollector::PreforwardConcurrencyModelRoots() +void ArkCollector::PreforwardConcurrencyModelRoots() { LOG_COMMON(FATAL) << "Unresolved fatal"; UNREACHABLE_CC(); @@ -411,50 +582,53 @@ void EnumRootsBuffer::UpdateBufferSize() bufferSize_ = std::max(buffer_.capacity(), bufferSize_); } if (buffer_.capacity() > UINT16_MAX) { - LOG_COMMON(INFO) << "too many roots, allocate too larget buffer: " << buffer_.size() << ", allocate " + LOG_COMMON(INFO) << "too many roots, allocated buffer too large: " << buffer_.size() << ", allocate " << (static_cast(buffer_.capacity()) / MB); } } -template -CArrayList WCollector::EnumRoots() +template +CArrayList ArkCollector::EnumRoots() { + STWParam stwParam{"wgc-enumroot"}; EnumRootsBuffer buffer; CArrayList *results = buffer.GetBuffer(); - common::RefFieldVisitor visitor = [&results](RefField<> &filed) { results->push_back(filed.GetTargetObject()); }; + common::RefFieldVisitor visitor = [&results](RefField<>& field) { results->push_back(field.GetTargetObject()); }; if constexpr (policy == EnumRootsPolicy::NO_STW_AND_NO_FLIP_MUTATOR) { EnumRootsImpl(visitor); } else if constexpr (policy == EnumRootsPolicy::STW_AND_NO_FLIP_MUTATOR) { - ScopedStopTheWorld stw("wgc-enumroot"); + ScopedStopTheWorld stw(stwParam); OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, ("CMCGC::EnumRoots-STW-bufferSize(" + std::to_string(results->capacity()) + ")").c_str(), ""); EnumRootsImpl(visitor); } else if constexpr (policy == EnumRootsPolicy::STW_AND_FLIP_MUTATOR) { - auto rootSet = EnumRootsFlip(visitor); + auto rootSet = EnumRootsFlip(stwParam, visitor); for (const auto &roots : rootSet) { std::copy(roots.begin(), roots.end(), std::back_inserter(*results)); } + VisitConcurrentRoots(visitor); } buffer.UpdateBufferSize(); + GetGCStats().recordSTWTime(stwParam.GetElapsedNs()); return std::move(*results); } -void WCollector::TraceHeap(const CArrayList &collectedRoots) +void ArkCollector::MarkingHeap(const CArrayList &collectedRoots) { - COMMON_PHASE_TIMER("trace live objects"); + COMMON_PHASE_TIMER("marking live objects"); markedObjectCount_.store(0, std::memory_order_relaxed); TransitionToGCPhase(GCPhase::GC_PHASE_MARK, true); - TraceRoots(collectedRoots); + MarkingRoots(collectedRoots); ProcessFinalizers(); ExemptFromSpace(); } -void WCollector::PostTrace() +void ArkCollector::PostMarking() { - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::PostTrace", ""); - COMMON_PHASE_TIMER("PostTrace"); + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::PostMarking", ""); + COMMON_PHASE_TIMER("PostMarking"); TransitionToGCPhase(GC_PHASE_POST_MARK, true); // clear satb buffer when gc finish tracing. @@ -463,18 +637,18 @@ void WCollector::PostTrace() WVerify::VerifyAfterMark(*this); } -WeakRefFieldVisitor WCollector::GetWeakRefFieldVisitor() +WeakRefFieldVisitor ArkCollector::GetWeakRefFieldVisitor() { return [this](RefField<> &refField) -> bool { RefField<> oldField(refField); BaseObject *oldObj = oldField.GetTargetObject(); if (gcReason_ == GC_REASON_YOUNG) { - if (RegionSpace::IsYoungSpaceObject(oldObj) && !IsMarkedObject(oldObj) && - !RegionSpace::IsNewObjectSinceTrace(oldObj)) { + if (RegionalHeap::IsYoungSpaceObject(oldObj) && !IsMarkedObject(oldObj) && + !RegionalHeap::IsNewObjectSinceMarking(oldObj)) { return false; } } else { - if (!IsMarkedObject(oldObj) && !RegionSpace::IsNewObjectSinceTrace(oldObj)) { + if (!IsMarkedObject(oldObj) && !RegionalHeap::IsNewObjectSinceMarking(oldObj)) { return false; } } @@ -497,35 +671,69 @@ WeakRefFieldVisitor WCollector::GetWeakRefFieldVisitor() }; } -void WCollector::PreforwardFlip() +RefFieldVisitor ArkCollector::GetPrefowardRefFieldVisitor() +{ + return [this](RefField<> &refField) -> void { + RefField<> oldField(refField); + BaseObject *oldObj = oldField.GetTargetObject(); + if (IsFromObject(oldObj)) { + BaseObject *toVersion = TryForwardObject(oldObj); + CHECK_CC(toVersion != nullptr); + HeapProfilerListener::GetInstance().OnMoveEvent(reinterpret_cast(oldObj), + reinterpret_cast(toVersion), + toVersion->GetSize()); + RefField<> newField(toVersion); + // CAS failure means some mutator or gc thread writes a new ref (must be + // a to-object), no need to retry. + if (refField.CompareExchange(oldField.GetFieldValue(), newField.GetFieldValue())) { + DLOG(FIX, "fix raw-ref @%p: %p -> %p", &refField, oldObj, toVersion); + } + } + }; +} + +void ArkCollector::PreforwardFlip() { auto remarkAndForwardGlobalRoot = [this]() { OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::PreforwardFlip[STW]", ""); + SetGCThreadQosPriority(common::PriorityMode::STW); ASSERT_LOGF(GetThreadPool() != nullptr, "thread pool is null"); TransitionToGCPhase(GCPhase::GC_PHASE_FINAL_MARK, true); Remark(); - PostTrace(); - reinterpret_cast(theAllocator_).PrepareForward(); + PostMarking(); + reinterpret_cast(theAllocator_).PrepareForward(); TransitionToGCPhase(GCPhase::GC_PHASE_PRECOPY, true); WeakRefFieldVisitor weakVisitor = GetWeakRefFieldVisitor(); - VisitWeakGlobalRoots(weakVisitor); + SetGCThreadQosPriority(common::PriorityMode::FOREGROUND); + + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + // only visit weak roots that may reference young objects + VisitDynamicWeakGlobalRoots(weakVisitor); + } else { + VisitDynamicWeakGlobalRoots(weakVisitor); + VisitDynamicWeakGlobalRootsOld(weakVisitor); + } }; FlipFunction forwardMutatorRoot = [this](Mutator &mutator) { WeakRefFieldVisitor weakVisitor = GetWeakRefFieldVisitor(); VisitWeakMutatorRoot(weakVisitor, mutator); + RefFieldVisitor visitor = GetPrefowardRefFieldVisitor(); + VisitMutatorPreforwardRoot(visitor, mutator); // Request finalize callback in each vm-thread when gc finished. mutator.SetFinalizeRequest(); }; - MutatorManager::Instance().FlipMutators("final-mark", remarkAndForwardGlobalRoot, &forwardMutatorRoot); - + STWParam stwParam{"final-mark"}; + MutatorManager::Instance().FlipMutators(stwParam, remarkAndForwardGlobalRoot, &forwardMutatorRoot); + InvokeSharedNativePointerCallbacks(); + GetGCStats().recordSTWTime(stwParam.GetElapsedNs()); AllocationBuffer* allocBuffer = AllocationBuffer::GetAllocBuffer(); if (LIKELY_CC(allocBuffer != nullptr)) { - allocBuffer->ClearRegion(); + allocBuffer->ClearRegions(); } } -void WCollector::Preforward() +void ArkCollector::Preforward() { COMMON_PHASE_TIMER("Preforward"); OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::Preforward[STW]", ""); @@ -537,73 +745,107 @@ void WCollector::Preforward() // copy and fix finalizer roots. // Only one root task, no need to post task. PreforwardStaticWeakRoots(); + RefFieldVisitor visitor = GetPrefowardRefFieldVisitor(); + VisitPreforwardRoots(visitor); } -void WCollector::PrepareFix() +void ArkCollector::ConcurrentPreforward() { - // make sure all objects before fixline is initialized - COMMON_PHASE_TIMER("PrepareFix"); - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::PrepareFix[STW]", ""); - reinterpret_cast(theAllocator_).PrepareFix(); - reinterpret_cast(theAllocator_).PrepareFixForPin(); -#ifndef GC_STW_STRINGTABLE - auto *baseRuntime = BaseRuntime::GetInstance(); - auto& stringTable = reinterpret_cast(baseRuntime->GetStringTable()); - stringTable.GetInternalTable()->GetCleaner()->CleanUp(); -#endif - TransitionToGCPhase(GCPhase::GC_PHASE_FIX, true); + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::ConcurrentPreforward", ""); + PreforwardConcurrentRoots(); + ProcessStringTable(); } -void WCollector::FixHeap() +void ArkCollector::PrepareFix() { - COMMON_PHASE_TIMER("FixHeap"); - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::FixHeap", ""); - reinterpret_cast(theAllocator_).FixHeap(); + if (Heap::GetHeap().GetGCReason() == GCReason::GC_REASON_YOUNG) { + // string table objects are always not in young space, skip it + return; + } - WVerify::VerifyAfterFix(*this); -} + COMMON_PHASE_TIMER("PrepareFix"); -void WCollector::CollectGarbageWithXRef() -{ -#ifdef ENABLE_CMC_RB_DFX - WVerify::DisableReadBarrierDFX(*this); + // we cannot re-enter STW, check it first + if (!MutatorManager::Instance().WorldStopped()) { + STWParam prepareFixStwParam{"wgc-preparefix"}; + ScopedStopTheWorld stw(prepareFixStwParam); + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::PrepareFix[STW]", ""); + +#ifndef GC_STW_STRINGTABLE + auto *baseRuntime = BaseRuntime::GetInstance(); + auto& stringTable = reinterpret_cast(baseRuntime->GetStringTable()); + stringTable.GetInternalTable()->GetCleaner()->CleanUp(); #endif - ScopedStopTheWorld stw("stw-gc"); - RemoveXRefFromRoots(); + GetGCStats().recordSTWTime(prepareFixStwParam.GetElapsedNs()); + } else { +#ifndef GC_STW_STRINGTABLE + auto *baseRuntime = BaseRuntime::GetInstance(); + auto& stringTable = reinterpret_cast(baseRuntime->GetStringTable()); + stringTable.GetInternalTable()->GetCleaner()->CleanUp(); +#endif + } +} - WorkStack workStack = NewWorkStack(); - auto collectedRoots = EnumRoots(); - TraceHeap(collectedRoots); - SweepUnmarkedXRefs(); - PostTrace(); +void ArkCollector::ParallelFixHeap() +{ + auto& regionalHeap = reinterpret_cast(theAllocator_); + auto taskList = regionalHeap.CollectFixTasks(); + std::atomic taskIter = 0; + std::function getNextTask = [&taskIter, &taskList]() -> FixHeapTask* { + uint32_t idx = static_cast(taskIter.fetch_add(1U, std::memory_order_relaxed)); + if (idx < taskList.size()) { + return &taskList[idx]; + } + return nullptr; + }; - AddXRefToRoots(); - Preforward(); - // reclaim large objects should after preforward(may process weak ref) and - // before fix heap(may clear live bit) - CollectLargeGarbage(); - SweepThreadLocalJitFort(); + const uint32_t runningWorkers = GetGCThreadCount(true) - 1; + uint32_t parallelCount = runningWorkers + 1; // 1 :DaemonThread + FixHeapWorker::Result results[parallelCount]; + { + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::FixHeap [Parallel]", ""); + // Fix heap + TaskPackMonitor monitor(runningWorkers, runningWorkers); + for (uint32_t i = 1; i < parallelCount; ++i) { + GetThreadPool()->PostTask(std::make_unique(this, monitor, results[i], getNextTask)); + } - CopyFromSpace(); - WVerify::VerifyAfterForward(*this); + FixHeapWorker gcWorker(this, monitor, results[0], getNextTask); + auto task = getNextTask(); + while (task != nullptr) { + gcWorker.DispatchRegionFixTask(task); + task = getNextTask(); + } + monitor.WaitAllFinished(); + } - PrepareFix(); - FixHeap(); - CollectPinnedGarbage(); + { + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::Post FixHeap Clear [Parallel]", ""); + // Post clear task + TaskPackMonitor monitor(runningWorkers, runningWorkers); + for (uint32_t i = 1; i < parallelCount; ++i) { + GetThreadPool()->PostTask(std::make_unique(results[i], monitor)); + } - TransitionToGCPhase(GCPhase::GC_PHASE_IDLE, true); + PostFixHeapWorker gcWorker(results[0], monitor); + gcWorker.PostClearTask(); + PostFixHeapWorker::CollectEmptyRegions(); + monitor.WaitAllFinished(); + } +} - ClearAllGCInfo(); - CollectSmallSpace(); - UnmarkAllXRefs(); +void ArkCollector::FixHeap() +{ + TransitionToGCPhase(GCPhase::GC_PHASE_FIX, true); + COMMON_PHASE_TIMER("FixHeap"); + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::FixHeap", ""); + ParallelFixHeap(); -#if defined(ENABLE_CMC_RB_DFX) - WVerify::EnableReadBarrierDFX(*this); -#endif + WVerify::VerifyAfterFix(*this); } -void WCollector::DoGarbageCollection() +void ArkCollector::DoGarbageCollection() { const bool isNotYoungGC = gcReason_ != GCReason::GC_REASON_YOUNG; OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::DoGarbageCollection", ""); @@ -615,14 +857,17 @@ void WCollector::DoGarbageCollection() #ifdef ENABLE_CMC_RB_DFX WVerify::DisableReadBarrierDFX(*this); #endif - - ScopedStopTheWorld stw("stw-gc"); + STWParam stwParam{"stw-gc"}; + { + ScopedStopTheWorld stw(stwParam); auto collectedRoots = EnumRoots(); - TraceHeap(collectedRoots); + MarkingHeap(collectedRoots); + TransitionToGCPhase(GCPhase::GC_PHASE_FINAL_MARK, true); Remark(); - PostTrace(); + PostMarking(); Preforward(); + ConcurrentPreforward(); // reclaim large objects should after preforward(may process weak ref) and // before fix heap(may clear live bit) if (isNotYoungGC) { @@ -636,7 +881,7 @@ void WCollector::DoGarbageCollection() PrepareFix(); FixHeap(); if (isNotYoungGC) { - CollectPinnedGarbage(); + CollectNonMovableGarbage(); } TransitionToGCPhase(GCPhase::GC_PHASE_IDLE, true); @@ -647,17 +892,22 @@ void WCollector::DoGarbageCollection() #if defined(ENABLE_CMC_RB_DFX) WVerify::EnableReadBarrierDFX(*this); #endif + } + GetGCStats().recordSTWTime(stwParam.GetElapsedNs()); return; } else if (gcMode_ == GCMode::CONCURRENT_MARK) { // 1: concurrent-mark auto collectedRoots = EnumRoots(); - TraceHeap(collectedRoots); + MarkingHeap(collectedRoots); + STWParam finalMarkStwParam{"final-mark"}; { - ScopedStopTheWorld stw("final-mark", true, GCPhase::GC_PHASE_FINAL_MARK); + ScopedStopTheWorld stw(finalMarkStwParam, true, GCPhase::GC_PHASE_FINAL_MARK); Remark(); - PostTrace(); - reinterpret_cast(theAllocator_).PrepareForward(); + PostMarking(); + reinterpret_cast(theAllocator_).PrepareForward(); Preforward(); } + GetGCStats().recordSTWTime(finalMarkStwParam.GetElapsedNs()); + ConcurrentPreforward(); // reclaim large objects should after preforward(may process weak ref) and // before fix heap(may clear live bit) if (isNotYoungGC) { @@ -671,7 +921,7 @@ void WCollector::DoGarbageCollection() PrepareFix(); FixHeap(); if (isNotYoungGC) { - CollectPinnedGarbage(); + CollectNonMovableGarbage(); } TransitionToGCPhase(GCPhase::GC_PHASE_IDLE, true); @@ -681,8 +931,9 @@ void WCollector::DoGarbageCollection() } auto collectedRoots = EnumRoots(); - TraceHeap(collectedRoots); + MarkingHeap(collectedRoots); PreforwardFlip(); + ConcurrentPreforward(); // reclaim large objects should after preforward(may process weak ref) // and before fix heap(may clear live bit) if (isNotYoungGC) { @@ -693,23 +944,28 @@ void WCollector::DoGarbageCollection() CopyFromSpace(); WVerify::VerifyAfterForward(*this); - { - ScopedStopTheWorld stw("wgc-preparefix"); - PrepareFix(); - } + PrepareFix(); FixHeap(); if (isNotYoungGC) { - CollectPinnedGarbage(); + CollectNonMovableGarbage(); } TransitionToGCPhase(GCPhase::GC_PHASE_IDLE, true); ClearAllGCInfo(); + RegionalHeap &space = reinterpret_cast(theAllocator_); + space.DumpAllRegionSummary("Peak GC log"); + space.DumpAllRegionStats("region statistics when gc ends"); CollectSmallSpace(); } -CArrayList> WCollector::EnumRootsFlip(const common::RefFieldVisitor &visitor) +CArrayList> ArkCollector::EnumRootsFlip(STWParam& param, + const common::RefFieldVisitor &visitor) { - const auto enumGlobalRoots = [this, &visitor]() { EnumRootsImpl(visitor); }; + const auto enumGlobalRoots = [this, &visitor]() { + SetGCThreadQosPriority(common::PriorityMode::STW); + EnumRootsImpl(visitor); + SetGCThreadQosPriority(common::PriorityMode::FOREGROUND); + }; std::mutex stackMutex; CArrayList> rootSet; // allcate for each mutator @@ -720,21 +976,26 @@ CArrayList> WCollector::EnumRootsFlip(const common::Ref std::lock_guard lockGuard(stackMutex); rootSet.emplace_back(std::move(roots)); }; - MutatorManager::Instance().FlipMutators("wgc-enumroot", enumGlobalRoots, &enumMutatorRoot); + MutatorManager::Instance().FlipMutators(param, enumGlobalRoots, &enumMutatorRoot); return rootSet; } -void WCollector::ProcessStringTable() +void ArkCollector::ProcessStringTable() { #ifdef GC_STW_STRINGTABLE return; #endif + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + // no need to fix weak ref in young gc + return; + } + WeakRefFieldVisitor weakVisitor = [this](RefField<> &refField) -> bool { auto isSurvivor = [this](BaseObject* oldObj) { - RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(oldObj)); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(oldObj)); return (gcReason_ == GC_REASON_YOUNG && !region->IsInYoungSpace()) || region->IsMarkedObject(oldObj) - || region->IsNewObjectSinceTrace(oldObj) + || region->IsNewObjectSinceMarking(oldObj) || region->IsToRegion(); }; @@ -772,33 +1033,8 @@ void WCollector::ProcessStringTable() stringTableCleaner->JoinAndWaitSweepWeakRefTask(weakVisitor); } -void WCollector::ProcessWeakReferences() -{ - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::ProcessWeakReferences", ""); - { - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::ProcessGlobalWeakStack", ""); - while (!globalWeakStack_.empty()) { - RefField<>& field = reinterpret_cast&>(*globalWeakStack_.back()); - globalWeakStack_.pop_back(); - RefField<> oldField(field); - BaseObject* targetObj = oldField.GetTargetObject(); - if (gcReason_ == GC_REASON_YOUNG) { - if (!Heap::IsHeapAddress(targetObj) || IsMarkedObject(targetObj) || - RegionSpace::IsNewObjectSinceTrace(targetObj) || !RegionSpace::IsYoungSpaceObject(targetObj)) { - continue; - } - } else { - if (!Heap::IsHeapAddress(targetObj) || IsMarkedObject(targetObj) || - RegionSpace::IsNewObjectSinceTrace(targetObj)) { - continue; - } - field.ClearRef(oldField.GetFieldValue()); - } - } - } -} -void WCollector::ProcessFinalizers() +void ArkCollector::ProcessFinalizers() { std::function finalizable = [this](BaseObject* obj) { return !IsMarkedObject(obj); }; FinalizerProcessor& fp = collectorResources_.GetFinalizerProcessor(); @@ -806,7 +1042,7 @@ void WCollector::ProcessFinalizers() fp.Notify(); } -BaseObject* WCollector::ForwardObject(BaseObject* obj) +BaseObject* ArkCollector::ForwardObject(BaseObject* obj) { BaseObject* to = TryForwardObject(obj); if (to != nullptr) { @@ -817,13 +1053,13 @@ BaseObject* WCollector::ForwardObject(BaseObject* obj) return (to != nullptr) ? to : obj; } -BaseObject* WCollector::TryForwardObject(BaseObject* obj) +BaseObject* ArkCollector::TryForwardObject(BaseObject* obj) { return CopyObjectImpl(obj); } // ConcurrentGC -BaseObject* WCollector::CopyObjectImpl(BaseObject* obj) +BaseObject* ArkCollector::CopyObjectImpl(BaseObject* obj) { // reconsider phase difference between mutator and GC thread during transition. if (IsGcThread()) { @@ -862,9 +1098,9 @@ BaseObject* WCollector::CopyObjectImpl(BaseObject* obj) return nullptr; } -BaseObject* WCollector::CopyObjectAfterExclusive(BaseObject* obj) +BaseObject* ArkCollector::CopyObjectAfterExclusive(BaseObject* obj) { - size_t size = RegionSpace::GetAllocSize(*obj); + size_t size = RegionalHeap::GetAllocSize(*obj); // 8: size of free object, but free object can not be copied. if (size == 8) { LOG_COMMON(FATAL) << "forward free obj: " << obj << @@ -872,19 +1108,16 @@ BaseObject* WCollector::CopyObjectAfterExclusive(BaseObject* obj) } BaseObject* toObj = fwdTable_.RouteObject(obj, size); if (toObj == nullptr) { - ASSERT_LOGF(0, "OOM"); // ConcurrentGC obj->UnlockExclusive(BaseStateWord::ForwardState::NORMAL); return toObj; } DLOG(COPY, "copy obj %p<%p>(%zu) to %p", obj, obj->GetTypeInfo(), size, toObj); CopyObject(*obj, *toObj, size); - if (IsToObject(toObj)) { - toObj->SetForwardState(BaseStateWord::ForwardState::NORMAL); - } else { - // if this object is not in to-space, we label it as to-object explicitly. - toObj->SetForwardState(BaseStateWord::ForwardState::TO_VERSION); - } + + ASSERT_LOGF(IsToObject(toObj), "Copy object to invalid region"); + toObj->SetForwardState(BaseStateWord::ForwardState::NORMAL); + std::atomic_thread_fence(std::memory_order_release); obj->SetSizeForwarded(size); // Avoid seeing the fwd pointer before observing the size modification @@ -894,18 +1127,19 @@ BaseObject* WCollector::CopyObjectAfterExclusive(BaseObject* obj) return toObj; } -void WCollector::ClearAllGCInfo() +void ArkCollector::ClearAllGCInfo() { COMMON_PHASE_TIMER("ClearAllGCInfo"); - RegionSpace& space = reinterpret_cast(theAllocator_); + RegionalHeap& space = reinterpret_cast(theAllocator_); space.ClearAllGCInfo(); + reinterpret_cast(theAllocator_).ClearJitFortAwaitingMark(); } -void WCollector::CollectSmallSpace() +void ArkCollector::CollectSmallSpace() { OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::CollectSmallSpace", ""); GCStats& stats = GetGCStats(); - RegionSpace& space = reinterpret_cast(theAllocator_); + RegionalHeap& space = reinterpret_cast(theAllocator_); { COMMON_PHASE_TIMER("CollectFromSpaceGarbage"); stats.collectedBytes += stats.smallGarbageSize; @@ -918,23 +1152,23 @@ void WCollector::CollectSmallSpace() } } - size_t candidateBytes = stats.fromSpaceSize + stats.pinnedSpaceSize + stats.largeSpaceSize; + size_t candidateBytes = stats.fromSpaceSize + stats.nonMovableSpaceSize + stats.largeSpaceSize; stats.garbageRatio = (candidateBytes > 0) ? static_cast(stats.collectedBytes) / candidateBytes : 0; stats.liveBytesAfterGC = space.GetAllocatedBytes(); VLOG(INFO, - "collect %zu B: old small %zu - %zu B, old pinned %zu - %zu B, old large %zu - %zu B. garbage ratio %.2f%%", - stats.collectedBytes, stats.fromSpaceSize, stats.smallGarbageSize, stats.pinnedSpaceSize, - stats.pinnedGarbageSize, stats.largeSpaceSize, stats.largeGarbageSize, + "collect %zu B: small %zu - %zu B, non-movable %zu - %zu B, large %zu - %zu B. garbage ratio %.2f%%", + stats.collectedBytes, stats.fromSpaceSize, stats.smallGarbageSize, stats.nonMovableSpaceSize, + stats.nonMovableGarbageSize, stats.largeSpaceSize, stats.largeGarbageSize, stats.garbageRatio * 100); // The base of the percentage is 100 OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::CollectSmallSpace END", ( "collect:" + std::to_string(stats.collectedBytes) + - "B;old small:" + std::to_string(stats.fromSpaceSize) + + "B;small:" + std::to_string(stats.fromSpaceSize) + "-" + std::to_string(stats.smallGarbageSize) + - "B;old pinned:" + std::to_string(stats.pinnedSpaceSize) + - "-" + std::to_string(stats.pinnedGarbageSize) + - "B;old large:" + std::to_string(stats.largeSpaceSize) + + "B;non-movable:" + std::to_string(stats.nonMovableSpaceSize) + + "-" + std::to_string(stats.nonMovableGarbageSize) + + "B;large:" + std::to_string(stats.largeSpaceSize) + "-" + std::to_string(stats.largeGarbageSize) + "B;garbage ratio:" + std::to_string(stats.garbageRatio) ).c_str()); @@ -942,5 +1176,31 @@ void WCollector::CollectSmallSpace() collectorResources_.GetFinalizerProcessor().NotifyToReclaimGarbage(); } -bool WCollector::ShouldIgnoreRequest(GCRequest& request) { return request.ShouldBeIgnored(); } +void ArkCollector::SetGCThreadQosPriority(common::PriorityMode mode) +{ +#ifdef ENABLE_QOS + LOG_COMMON(DEBUG) << "SetGCThreadQosPriority gettid " << gettid(); + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::SetGCThreadQosPriority", ""); + switch (mode) { + case PriorityMode::STW: { + OHOS::QOS::SetQosForOtherThread(OHOS::QOS::QosLevel::QOS_USER_INTERACTIVE, gettid()); + break; + } + case PriorityMode::FOREGROUND: { + OHOS::QOS::SetQosForOtherThread(OHOS::QOS::QosLevel::QOS_USER_INITIATED, gettid()); + break; + } + case PriorityMode::BACKGROUND: { + OHOS::QOS::ResetQosForOtherThread(gettid()); + break; + } + default: + UNREACHABLE(); + break; + } + common::Taskpool::GetCurrentTaskpool()->SetThreadPriority(mode); +#endif +} + +bool ArkCollector::ShouldIgnoreRequest(GCRequest& request) { return request.ShouldBeIgnored(); } } // namespace common diff --git a/common_components/heap/w_collector/w_collector.h b/common_components/heap/ark_collector/ark_collector.h similarity index 64% rename from common_components/heap/w_collector/w_collector.h rename to common_components/heap/ark_collector/ark_collector.h index 574e53f2d768359ad12c5a6338d8dd9d28d3d502..9ec20d2284c41c50f15b895272c583bcf44157f9 100755 --- a/common_components/heap/w_collector/w_collector.h +++ b/common_components/heap/ark_collector/ark_collector.h @@ -13,20 +13,21 @@ * limitations under the License. */ -#ifndef COMMON_COMPONENTS_HEAP_W_COLLECTOR_WCOLLECTOR_H -#define COMMON_COMPONENTS_HEAP_W_COLLECTOR_WCOLLECTOR_H +#ifndef COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_ARKCOLLECTOR_H +#define COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_ARKCOLLECTOR_H #include -#include "common_components/heap/allocator/region_space.h" -#include "common_components/heap/collector/trace_collector.h" +#include "common_components/heap/allocator/regional_heap.h" +#include "common_components/heap/collector/copy_data_manager.h" +#include "common_components/heap/collector/marking_collector.h" #include "common_interfaces/base_runtime.h" namespace common { class CopyTable { public: - explicit CopyTable(RegionSpace& space) : theSpace(space) {} + explicit CopyTable(RegionalHeap& space) : theSpace(space) {} // if object is not relocated (forwarded or compacted), return nullptr. BaseObject* RouteObject(BaseObject* old, size_t size) @@ -40,7 +41,7 @@ public: return old->GetForwardingPointer(); } - RegionSpace& theSpace; + RegionalHeap& theSpace; }; enum class GCMode: uint8_t { @@ -49,15 +50,15 @@ enum class GCMode: uint8_t { STW = 2 }; -class WCollector : public TraceCollector { +class ArkCollector : public MarkingCollector { public: - explicit WCollector(Allocator& allocator, CollectorResources& resources) - : TraceCollector(allocator, resources), fwdTable_(reinterpret_cast(allocator)) + explicit ArkCollector(Allocator& allocator, CollectorResources& resources) + : MarkingCollector(allocator, resources), fwdTable_(reinterpret_cast(allocator)) { collectorType_ = CollectorType::SMOOTH_COLLECTOR; } - ~WCollector() override = default; + ~ArkCollector() override = default; void Init(const RuntimeParam& param) override { @@ -81,15 +82,22 @@ public: #endif } + void Fini() override + { + HeapBitmapManager::GetHeapBitmapManager().DestroyHeapBitmap(); + } + +// UDAV +//#ifdef PANDA_JS_ETS_HYBRID_MODE +// void TraceXRef(RefField<>& ref, WorkStack& workStack) const; +// void TraceObjectXRef(BaseObject* obj, WorkStack& workStack) override; +//#endif bool ShouldIgnoreRequest(GCRequest& request) override; - bool MarkObject(BaseObject* obj, size_t cellCount = 0) const override; + bool MarkObject(BaseObject* obj) const override; - TraceRefFieldVisitor CreateTraceObjectRefFieldsVisitor(WorkStack *workStack, WeakStack *weakStack) override; - void TraceObjectRefFields(BaseObject *obj, TraceRefFieldVisitor *data) override; -#ifdef PANDA_JS_ETS_HYBRID_MODE - void TraceXRef(RefField<>& ref, WorkStack& workStack) const; - void TraceObjectXRef(BaseObject* obj, WorkStack& workStack) override; -#endif + MarkingRefFieldVisitor CreateMarkingObjectRefFieldsVisitor(ParallelLocalMarkStack &workStack, + WeakStack &weakStack) override; + void MarkingObjectRefFields(BaseObject *obj, MarkingRefFieldVisitor *data) override; void FixObjectRefFields(BaseObject* obj) const override; void FixRefField(BaseObject* obj, RefField<>& field) const; @@ -102,13 +110,13 @@ public: void AddRawPointerObject(BaseObject* obj) override { - RegionSpace& space = reinterpret_cast(theAllocator_); + RegionalHeap& space = reinterpret_cast(theAllocator_); space.AddRawPointerObject(obj); } void RemoveRawPointerObject(BaseObject* obj) override { - RegionSpace& space = reinterpret_cast(theAllocator_); + RegionalHeap& space = reinterpret_cast(theAllocator_); space.RemoveRawPointerObject(obj); } @@ -118,8 +126,9 @@ public: { // filter const string object. if (Heap::IsHeapAddress(obj)) { - auto regionInfo = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); - return regionInfo->IsFromRegion(); + RegionDesc::InlinedRegionMetaData *objMetaRegion = + RegionDesc::InlinedRegionMetaData::GetInlinedRegionMetaData(reinterpret_cast(obj)); + return objMetaRegion->IsFromRegion(); } return false; @@ -132,10 +141,11 @@ public: BaseObject* FindToVersion(BaseObject* obj) const override { - return const_cast(this)->fwdTable_.GetForwardingPointer(obj); + return const_cast(this)->fwdTable_.GetForwardingPointer(obj); } -protected: + void SetGCThreadQosPriority(common::PriorityMode mode); + BaseObject* CopyObjectImpl(BaseObject* obj); BaseObject* CopyObjectAfterExclusive(BaseObject* obj) override; @@ -154,34 +164,35 @@ protected: } } +protected: void CollectLargeGarbage() { COMMON_PHASE_TIMER("Collect large garbage"); - RegionSpace& space = reinterpret_cast(theAllocator_); + RegionalHeap& space = reinterpret_cast(theAllocator_); GCStats& stats = GetGCStats(); stats.largeSpaceSize = space.LargeObjectSize(); stats.largeGarbageSize = space.CollectLargeGarbage(); stats.collectedBytes += stats.largeGarbageSize; } - void CollectPinnedGarbage() + void CollectNonMovableGarbage() { - RegionSpace& space = reinterpret_cast(theAllocator_); + RegionalHeap& space = reinterpret_cast(theAllocator_); GCStats& stats = GetGCStats(); - stats.pinnedSpaceSize = space.PinnedSpaceSize(); - stats.collectedBytes += stats.pinnedGarbageSize; + stats.nonMovableSpaceSize = space.NonMovableSpaceSize(); + stats.collectedBytes += stats.nonMovableGarbageSize; } void CollectSmallSpace(); void ClearAllGCInfo(); void DoGarbageCollection() override; - void ProcessWeakReferences() override; void ProcessStringTable() override; void ProcessFinalizers() override; private: + friend class RemarkAndPreforwardVisitor; template bool TryUpdateRefFieldImpl(BaseObject* obj, RefField<>& ref, BaseObject*& oldRef, BaseObject*& newRef) const; @@ -198,26 +209,32 @@ private: void EnumRootsImpl(const common::RefFieldVisitor &visitor) { // assemble garbage candidates. - reinterpret_cast(theAllocator_).AssembleGarbageCandidates(); - reinterpret_cast(theAllocator_).PrepareTrace(); + reinterpret_cast(theAllocator_).AssembleGarbageCandidates(); + reinterpret_cast(theAllocator_).PrepareMarking(); COMMON_PHASE_TIMER("enum roots & update old pointers within"); TransitionToGCPhase(GCPhase::GC_PHASE_ENUM, true); rootsVisitFunc(visitor); } - CArrayList> EnumRootsFlip(const common::RefFieldVisitor &visitor); + CArrayList> EnumRootsFlip(STWParam& param, const common::RefFieldVisitor &visitor); - void TraceHeap(const CArrayList &collectedRoots); - void PostTrace(); - void RemarkAndPreforwardStaticRoots(WorkStack& workStack) override; + void MarkingHeap(const CArrayList &collectedRoots); + void PostMarking(); + void RemarkAndPreforwardStaticRoots(GlobalMarkStack &globalMarkStack) override; + void ParallelRemarkAndPreforward(GlobalMarkStack &globalMarkStack); void Preforward(); + void ConcurrentPreforward(); + + void PreforwardConcurrentRoots(); void PreforwardStaticWeakRoots(); void PreforwardConcurrencyModelRoots(); void PrepareFix(); + void ParallelFixHeap(); void FixHeap(); // roots and ref-fields WeakRefFieldVisitor GetWeakRefFieldVisitor(); + RefFieldVisitor GetPrefowardRefFieldVisitor(); void PreforwardFlip(); void CollectGarbageWithXRef(); @@ -228,4 +245,4 @@ private: }; } // namespace common -#endif // COMMON_COMPONENTS_HEAP_W_COLLECTOR_WCOLLECTOR_H +#endif // COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_ARKCOLLECTOR_H diff --git a/common_components/heap/w_collector/copy_barrier.cpp b/common_components/heap/ark_collector/copy_barrier.cpp similarity index 75% rename from common_components/heap/w_collector/copy_barrier.cpp rename to common_components/heap/ark_collector/copy_barrier.cpp index b09ac3ad8db40b30dbd039a86d79186d2deea397..f639f0ba7fc941ff2bcddfb25bcdb4c6ac61ba6f 100755 --- a/common_components/heap/w_collector/copy_barrier.cpp +++ b/common_components/heap/ark_collector/copy_barrier.cpp @@ -12,11 +12,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "common_components/heap/allocator/region_space.h" -#include "common_components/heap/w_collector/copy_barrier.h" +#include "common_components/heap/allocator/regional_heap.h" +#include "common_components/heap/ark_collector/copy_barrier.h" #include "common_components/base/sys_call.h" #include "common_components/common/scoped_object_lock.h" #include "common_components/mutator/mutator.h" +#include "heap/collector/collector_proxy.h" #if defined(COMMON_TSAN_SUPPORT) #include "common_components/sanitizer/sanitizer_interface.h" #endif @@ -26,28 +27,16 @@ BaseObject* CopyBarrier::ReadRefField(BaseObject* obj, RefField& field) c { do { RefField<> tmpField(field); - bool isWeak = tmpField.IsWeak(); - BaseObject* oldRef = tmpField.GetTargetObject(); - if (LIKELY_CC(!theCollector.IsFromObject(oldRef))) { - if (isWeak) { - return (BaseObject*)((uintptr_t)oldRef | TAG_WEAK); - } else { - return oldRef; - } + BaseObject* oldRef = reinterpret_cast(tmpField.GetAddress()); + if (LIKELY_CC(!static_cast(&theCollector)->IsFromObject(oldRef))) { + return oldRef; } + + auto weakMask = reinterpret_cast(oldRef) & TAG_WEAK; + oldRef = reinterpret_cast(reinterpret_cast(oldRef) & (~TAG_WEAK)); BaseObject* toObj = nullptr; - if (theCollector.IsUnmovableFromObject(oldRef)) { - if (isWeak) { - return (BaseObject*)((uintptr_t)oldRef | TAG_WEAK); - } else { - return oldRef; - } - } else if (theCollector.TryForwardRefField(obj, field, toObj)) { - if (isWeak) { - return (BaseObject*)((uintptr_t)toObj | TAG_WEAK); - } else { - return toObj; - } + if (static_cast(&theCollector)->TryForwardRefField(obj, field, toObj)) { + return (BaseObject*)((uintptr_t)toObj | weakMask); } } while (true); // unreachable path. @@ -59,13 +48,16 @@ BaseObject* CopyBarrier::ReadStaticRef(RefField& field) const { return Re // If the object is still alive, return its toSpace object; if not, return nullptr BaseObject* CopyBarrier::ReadStringTableStaticRef(RefField& field) const { + // Note: CMC GC assumes all objects in string table are not in young space. Based on the assumption, CMC GC skip + // read barrier in young GC + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + return reinterpret_cast(field.GetFieldValue()); + } + auto isSurvivor = [](BaseObject* obj) { - auto gcReason = Heap::GetHeap().GetGCReason(); - RegionDesc *regionInfo = - RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); - return ((gcReason == GC_REASON_YOUNG && !regionInfo->IsInYoungSpace()) || - regionInfo->IsNewObjectSinceTrace(obj) || - regionInfo->IsToRegion() || regionInfo->IsMarkedObject(obj)); + RegionDesc *regionInfo = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); + return (regionInfo->IsNewObjectSinceMarking(obj) || + regionInfo->IsToRegion() || regionInfo->IsMarkedObject(obj)); }; RefField<> tmpField(field); @@ -80,7 +72,7 @@ BaseObject* CopyBarrier::ReadStringTableStaticRef(RefField& field) const void CopyBarrier::ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress src, size_t size) const { CHECK_CC(!Heap::IsHeapAddress(dst)); - if (obj != nullptr) { + if (obj != nullptr) { //LCOV_EXCL_BR_LINE obj->ForEachRefInStruct( [this, obj](RefField& field) { BaseObject* target = ReadRefField(obj, field); @@ -104,10 +96,10 @@ void CopyBarrier::AtomicWriteRefField(BaseObject* obj, RefField& field, Ba { RefField<> newField(newRef); field.SetFieldValue(newField.GetFieldValue(), order); - if (obj != nullptr) { + if (obj != nullptr) { //LCOV_EXCL_BR_LINE DLOG(FBARRIER, "atomic write obj %p<%p>(%zu) ref@%p: %#zx", obj, obj->GetTypeInfo(), obj->GetSize(), &field, newField.GetFieldValue()); - } else { + } else { //LCOV_EXCL_BR_LINE DLOG(FBARRIER, "atomic write static ref@%p: %#zx", &field, newField.GetFieldValue()); } } @@ -129,9 +121,9 @@ bool CopyBarrier::CompareAndSwapRefField(BaseObject* obj, RefField& field, HeapAddress oldFieldValue = field.GetFieldValue(std::memory_order_seq_cst); RefField oldField(oldFieldValue); BaseObject* oldVersion = ReadRefField(nullptr, oldField); - while (oldVersion == oldRef) { + while (oldVersion == oldRef) { //LCOV_EXCL_BR_LINE RefField<> newField(newRef); - if (field.CompareExchange(oldFieldValue, newField.GetFieldValue(), succOrder, failOrder)) { + if (field.CompareExchange(oldFieldValue, newField.GetFieldValue(), succOrder, failOrder)) { //LCOV_EXCL_BR_LINE return true; } oldFieldValue = field.GetFieldValue(std::memory_order_seq_cst); diff --git a/common_components/heap/w_collector/copy_barrier.h b/common_components/heap/ark_collector/copy_barrier.h similarity index 89% rename from common_components/heap/w_collector/copy_barrier.h rename to common_components/heap/ark_collector/copy_barrier.h index 85c8cc0b2c7727d7a242398e6934f368d12865ad..08863d4f57f3571e0d7fae9e086bf64064acaf10 100755 --- a/common_components/heap/w_collector/copy_barrier.h +++ b/common_components/heap/ark_collector/copy_barrier.h @@ -13,10 +13,10 @@ * limitations under the License. */ -#ifndef COMMON_COMPONENTS_HEAP_W_COLLECTOR_COPY_BARRIER_H -#define COMMON_COMPONENTS_HEAP_W_COLLECTOR_COPY_BARRIER_H +#ifndef COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_COPY_BARRIER_H +#define COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_COPY_BARRIER_H -#include "common_components/heap/w_collector/idle_barrier.h" +#include "common_components/heap/ark_collector/idle_barrier.h" namespace common { // CopyBarrier is the barrier for concurrent forwarding. @@ -45,4 +45,4 @@ public: }; } // namespace common -#endif // COMMON_COMPONENTS_HEAP_W_COLLECTOR_COPY_BARRIER_H +#endif // COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_COPY_BARRIER_H diff --git a/common_components/heap/w_collector/enum_barrier.cpp b/common_components/heap/ark_collector/enum_barrier.cpp similarity index 94% rename from common_components/heap/w_collector/enum_barrier.cpp rename to common_components/heap/ark_collector/enum_barrier.cpp index d5f629ee909f30bc0f8c36f09df96ea9c7d3c608..45dde4bf47605224429fc3fc3cdf32f6516460af 100755 --- a/common_components/heap/w_collector/enum_barrier.cpp +++ b/common_components/heap/ark_collector/enum_barrier.cpp @@ -12,7 +12,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "common_components/heap/w_collector/enum_barrier.h" +#include "common_components/heap/ark_collector/enum_barrier.h" #include "common_components/mutator/mutator.h" #if defined(COMMON_TSAN_SUPPORT) @@ -20,7 +20,7 @@ #endif namespace common { -// Because gc thread will also have impact on tagged pointer in enum and trace phase, +// Because gc thread will also have impact on tagged pointer in enum and marking phase, // so we don't expect reading barrier have the ability to modify the referent field. BaseObject* EnumBarrier::ReadRefField(BaseObject* obj, RefField& field) const { @@ -35,6 +35,14 @@ void EnumBarrier::ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress src, CHECK_CC(memcpy_s(reinterpret_cast(dst), size, reinterpret_cast(src), size) == EOK); } +void EnumBarrier::WriteRoot(BaseObject *obj) const +{ + ASSERT(Heap::IsHeapAddress(obj)); + Mutator *mutator = Mutator::GetMutator(); + mutator->RememberObjectInSatbBuffer(obj); + DLOG(BARRIER, "write root obj %p", obj); +} + void EnumBarrier::WriteRefField(BaseObject* obj, RefField& field, BaseObject* ref) const { UpdateRememberSet(obj, ref); @@ -45,7 +53,7 @@ void EnumBarrier::WriteRefField(BaseObject* obj, RefField& field, BaseObj if (remeberedObject != nullptr) { mutator->RememberObjectInSatbBuffer(remeberedObject); } - if (ref != nullptr) { + if (ref != nullptr) { //LCOV_EXCL_BR_LINE mutator->RememberObjectInSatbBuffer(ref); } DLOG(BARRIER, "write obj %p ref@%p: 0x%zx -> %p", obj, &field, remeberedObject, ref); @@ -82,7 +90,9 @@ void EnumBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObje if (!Heap::IsTaggedObject((HeapAddress)ref)) { return; } - UpdateRememberSet(obj, ref); + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + UpdateRememberSet(obj, ref); + } ref = (BaseObject*)((uintptr_t)ref & ~(TAG_WEAK)); Mutator* mutator = Mutator::GetMutator(); mutator->RememberObjectInSatbBuffer(ref); diff --git a/common_components/heap/w_collector/enum_barrier.h b/common_components/heap/ark_collector/enum_barrier.h similarity index 88% rename from common_components/heap/w_collector/enum_barrier.h rename to common_components/heap/ark_collector/enum_barrier.h index 245dcdfbd6af50988f7486b6e241a6ca7a55a27d..525f0ff8f320a2e486bb4bea38ad49707badfba4 100755 --- a/common_components/heap/w_collector/enum_barrier.h +++ b/common_components/heap/ark_collector/enum_barrier.h @@ -13,10 +13,10 @@ * limitations under the License. */ -#ifndef COMMON_COMPONENTS_HEAP_W_COLLECTOR_ENUM_BARRIER_H -#define COMMON_COMPONENTS_HEAP_W_COLLECTOR_ENUM_BARRIER_H +#ifndef COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_ENUM_BARRIER_H +#define COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_ENUM_BARRIER_H -#include "common_components/heap/w_collector/idle_barrier.h" +#include "common_components/heap/ark_collector/idle_barrier.h" namespace common { // EnumBarrier is the barrier for concurrent enum phase @@ -28,6 +28,7 @@ public: BaseObject* ReadStaticRef(RefField& field) const override; void ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress src, size_t size) const override; + void WriteRoot(BaseObject *obj) const override; void WriteRefField(BaseObject* obj, RefField& field, BaseObject* ref) const override; void WriteBarrier(BaseObject* obj, RefField& field, BaseObject* ref) const override; @@ -49,4 +50,4 @@ public: }; } // namespace common -#endif // COMMON_COMPONENTS_HEAP_W_COLLECTOR_ENUM_BARRIER_H +#endif // COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_ENUM_BARRIER_H diff --git a/common_components/heap/w_collector/idle_barrier.cpp b/common_components/heap/ark_collector/idle_barrier.cpp similarity index 86% rename from common_components/heap/w_collector/idle_barrier.cpp rename to common_components/heap/ark_collector/idle_barrier.cpp index 0f50643a8e213c915c357738936fcc0f80e8a73f..b36155a99da04c478acdaef480e8793ce1302605 100755 --- a/common_components/heap/w_collector/idle_barrier.cpp +++ b/common_components/heap/ark_collector/idle_barrier.cpp @@ -12,7 +12,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "common_components/heap/w_collector/idle_barrier.h" +#include "common_components/heap/ark_collector/idle_barrier.h" #include "common_components/mutator/mutator.h" #if defined(COMMON_TSAN_SUPPORT) @@ -89,29 +89,38 @@ bool IdleBarrier::CompareAndSwapRefField(BaseObject* obj, RefField& field, void IdleBarrier::UpdateRememberSet(BaseObject* object, BaseObject* ref) const { - if (!Heap::IsHeapAddress(ref) || object == nullptr) { - return; - } - RegionDesc* objRegion = RegionDesc::GetRegionDescAt(reinterpret_cast((void*)object)); - RegionDesc* refRegion = RegionDesc::GetRegionDescAt(reinterpret_cast((void*)ref)); - if ((!objRegion->IsInYoungSpace() && refRegion->IsInYoungSpace()) || - (objRegion->IsInFromSpace() && refRegion->IsInRecentSpace())) { - if (objRegion->MarkRSetCardTable(object)) { + ASSERT(object != nullptr); + RegionDesc::InlinedRegionMetaData *objMetaRegion = RegionDesc::InlinedRegionMetaData::GetInlinedRegionMetaData( + reinterpret_cast(object)); + RegionDesc::InlinedRegionMetaData *refMetaRegion = RegionDesc::InlinedRegionMetaData::GetInlinedRegionMetaData( + reinterpret_cast(ref)); + if (!objMetaRegion->IsInYoungSpaceForWB() && refMetaRegion->IsInYoungSpaceForWB()) { + if (objMetaRegion->MarkRSetCardTable(object)) { DLOG(BARRIER, "update point-out remember set of region %p, obj %p, ref: %p<%p>", - objRegion, object, ref, ref->GetTypeInfo()); + objMetaRegion->GetRegionDesc(), object, ref, ref->GetTypeInfo()); } } } +void IdleBarrier::WriteRoot(BaseObject *obj) const +{ + DLOG(BARRIER, "write root obj %p", obj); +} + void IdleBarrier::WriteRefField(BaseObject* obj, RefField& field, BaseObject* ref) const { DLOG(BARRIER, "write obj %p ref@%p: %p => %p", obj, &field, field.GetTargetObject(), ref); - UpdateRememberSet(obj, ref); + if (Heap::IsTaggedObject((HeapAddress)ref)) { + UpdateRememberSet(obj, ref); + } field.SetTargetObject(ref); } void IdleBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObject* ref) const { + if (!Heap::IsTaggedObject((HeapAddress)ref)) { + return; + } UpdateRememberSet(obj, ref); DLOG(BARRIER, "write obj %p ref@%p: %p => %p", obj, &field, field.GetTargetObject(), ref); } diff --git a/common_components/heap/w_collector/idle_barrier.h b/common_components/heap/ark_collector/idle_barrier.h similarity index 91% rename from common_components/heap/w_collector/idle_barrier.h rename to common_components/heap/ark_collector/idle_barrier.h index 327a140cfc76906c89f1a1e02f70ae33cd3acf50..292c2601c2e321d7566367726bd12ae723e435fe 100755 --- a/common_components/heap/w_collector/idle_barrier.h +++ b/common_components/heap/ark_collector/idle_barrier.h @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef COMMON_COMPONENTS_HEAP_W_COLLECTOR_IDLE_BARRIER_H -#define COMMON_COMPONENTS_HEAP_W_COLLECTOR_IDLE_BARRIER_H +#ifndef COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_IDLE_BARRIER_H +#define COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_IDLE_BARRIER_H #include "common_components/heap/barrier/barrier.h" @@ -28,6 +28,7 @@ public: BaseObject* ReadStaticRef(RefField& field) const override; void ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress src, size_t size) const override; + void WriteRoot(BaseObject *obj) const override; void WriteRefField(BaseObject* obj, RefField& field, BaseObject* ref) const override; void WriteBarrier(BaseObject* obj, RefField& field, BaseObject* ref) const override; @@ -51,4 +52,4 @@ public: }; } // namespace common -#endif // COMMON_COMPONENTS_HEAP_W_COLLECTOR_IDLE_BARRIER_H +#endif // COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_IDLE_BARRIER_H diff --git a/common_components/heap/w_collector/trace_barrier.cpp b/common_components/heap/ark_collector/marking_barrier.cpp similarity index 73% rename from common_components/heap/w_collector/trace_barrier.cpp rename to common_components/heap/ark_collector/marking_barrier.cpp index 0d145e2ec84786603492430eee49db776206296d..049349b3beed43a72e778d6c9a4bd73780b154d9 100755 --- a/common_components/heap/w_collector/trace_barrier.cpp +++ b/common_components/heap/ark_collector/marking_barrier.cpp @@ -12,7 +12,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "common_components/heap/w_collector/trace_barrier.h" +#include "common_components/heap/ark_collector/marking_barrier.h" #include "common_components/mutator/mutator.h" #if defined(COMMON_TSAN_SUPPORT) @@ -20,22 +20,30 @@ #endif namespace common { -// Because gc thread will also have impact on tagged pointer in enum and trace phase, +// Because gc thread will also have impact on tagged pointer in enum and marking phase, // so we don't expect reading barrier have the ability to modify the referent field. -BaseObject* TraceBarrier::ReadRefField(BaseObject* obj, RefField& field) const +BaseObject* MarkingBarrier::ReadRefField(BaseObject* obj, RefField& field) const { RefField<> tmpField(field); return (BaseObject*)tmpField.GetFieldValue(); } -BaseObject* TraceBarrier::ReadStaticRef(RefField& field) const { return ReadRefField(nullptr, field); } +BaseObject* MarkingBarrier::ReadStaticRef(RefField& field) const { return ReadRefField(nullptr, field); } -void TraceBarrier::ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress src, size_t size) const +void MarkingBarrier::ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress src, size_t size) const { CHECK_CC(memcpy_s(reinterpret_cast(dst), size, reinterpret_cast(src), size) == EOK); } -void TraceBarrier::WriteRefField(BaseObject* obj, RefField& field, BaseObject* ref) const +void MarkingBarrier::WriteRoot(BaseObject *obj) const +{ + ASSERT(Heap::IsHeapAddress(obj)); + Mutator *mutator = Mutator::GetMutator(); + mutator->RememberObjectInSatbBuffer(obj); + DLOG(BARRIER, "write root obj %p", obj); +} + +void MarkingBarrier::WriteRefField(BaseObject* obj, RefField& field, BaseObject* ref) const { UpdateRememberSet(obj, ref); RefField<> tmpField(field); @@ -51,7 +59,7 @@ void TraceBarrier::WriteRefField(BaseObject* obj, RefField& field, BaseOb field.SetFieldValue(newField.GetFieldValue()); } #ifdef ARK_USE_SATB_BARRIER -void TraceBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObject* ref) const +void MarkingBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObject* ref) const { RefField<> tmpField(field); BaseObject* rememberedObject = nullptr; @@ -75,12 +83,14 @@ void TraceBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObj DLOG(BARRIER, "write obj %p ref-field@%p: %#zx -> %p", obj, &field, rememberedObject, ref); } #else -void TraceBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObject* ref) const +void MarkingBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObject* ref) const { if (!Heap::IsTaggedObject((HeapAddress)ref)) { return; } - UpdateRememberSet(obj, ref); + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + UpdateRememberSet(obj, ref); + } ref = (BaseObject*)((uintptr_t)ref & ~(TAG_WEAK)); Mutator* mutator = Mutator::GetMutator(); mutator->RememberObjectInSatbBuffer(ref); @@ -88,17 +98,17 @@ void TraceBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObj } #endif -void TraceBarrier::WriteStaticRef(RefField& field, BaseObject* ref) const +void MarkingBarrier::WriteStaticRef(RefField& field, BaseObject* ref) const { std::atomic_thread_fence(std::memory_order_seq_cst); RefField<> newField(ref); field.SetFieldValue(newField.GetFieldValue()); } -void TraceBarrier::WriteStruct(BaseObject* obj, HeapAddress dst, size_t dstLen, HeapAddress src, size_t srcLen) const +void MarkingBarrier::WriteStruct(BaseObject* obj, HeapAddress dst, size_t dstLen, HeapAddress src, size_t srcLen) const { CHECK_CC(obj != nullptr); - if (obj != nullptr) { + if (obj != nullptr) { //LCOV_EXCL_BR_LINE ASSERT_LOGF(dst > reinterpret_cast(obj), "WriteStruct struct addr is less than obj!"); Mutator* mutator = Mutator::GetMutator(); if (mutator != nullptr) { @@ -118,7 +128,7 @@ void TraceBarrier::WriteStruct(BaseObject* obj, HeapAddress dst, size_t dstLen, #endif } -BaseObject* TraceBarrier::AtomicReadRefField(BaseObject* obj, RefField& field, MemoryOrder order) const +BaseObject* MarkingBarrier::AtomicReadRefField(BaseObject* obj, RefField& field, MemoryOrder order) const { BaseObject* target = nullptr; RefField oldField(field.GetFieldValue(order)); @@ -127,8 +137,8 @@ BaseObject* TraceBarrier::AtomicReadRefField(BaseObject* obj, RefField& fi return target; } -void TraceBarrier::AtomicWriteRefField(BaseObject* obj, RefField& field, BaseObject* newRef, - MemoryOrder order) const +void MarkingBarrier::AtomicWriteRefField(BaseObject* obj, RefField& field, BaseObject* newRef, + MemoryOrder order) const { RefField<> oldField(field.GetFieldValue(order)); HeapAddress oldValue = oldField.GetFieldValue(); @@ -146,8 +156,8 @@ void TraceBarrier::AtomicWriteRefField(BaseObject* obj, RefField& field, B } } -BaseObject* TraceBarrier::AtomicSwapRefField(BaseObject* obj, RefField& field, BaseObject* newRef, - MemoryOrder order) const +BaseObject* MarkingBarrier::AtomicSwapRefField(BaseObject* obj, RefField& field, BaseObject* newRef, + MemoryOrder order) const { RefField<> newField(newRef); HeapAddress oldValue = field.Exchange(newField.GetFieldValue(), order); @@ -160,8 +170,8 @@ BaseObject* TraceBarrier::AtomicSwapRefField(BaseObject* obj, RefField& fi return oldRef; } -bool TraceBarrier::CompareAndSwapRefField(BaseObject* obj, RefField& field, BaseObject* oldRef, - BaseObject* newRef, MemoryOrder succOrder, MemoryOrder failOrder) const +bool MarkingBarrier::CompareAndSwapRefField(BaseObject* obj, RefField& field, BaseObject* oldRef, + BaseObject* newRef, MemoryOrder succOrder, MemoryOrder failOrder) const { HeapAddress oldFieldValue = field.GetFieldValue(std::memory_order_seq_cst); RefField oldField(oldFieldValue); @@ -182,8 +192,8 @@ bool TraceBarrier::CompareAndSwapRefField(BaseObject* obj, RefField& field return false; } -void TraceBarrier::CopyStructArray(BaseObject* dstObj, HeapAddress dstField, MIndex dstSize, BaseObject* srcObj, - HeapAddress srcField, MIndex srcSize) const +void MarkingBarrier::CopyStructArray(BaseObject* dstObj, HeapAddress dstField, MIndex dstSize, BaseObject* srcObj, + HeapAddress srcField, MIndex srcSize) const { LOG_COMMON(FATAL) << "Unresolved fatal"; UNREACHABLE_CC(); diff --git a/common_components/heap/w_collector/trace_barrier.h b/common_components/heap/ark_collector/marking_barrier.h similarity index 81% rename from common_components/heap/w_collector/trace_barrier.h rename to common_components/heap/ark_collector/marking_barrier.h index 9397728a9729d8044dd5022a9360b2a756da00e5..8af0cc7e5c799e9d2023e29297499e39b549d3bd 100755 --- a/common_components/heap/w_collector/trace_barrier.h +++ b/common_components/heap/ark_collector/marking_barrier.h @@ -13,22 +13,23 @@ * limitations under the License. */ -#ifndef COMMON_COMPONENTS_HEAP_W_COLLECTOR_TRACE_BARRIER_H -#define COMMON_COMPONENTS_HEAP_W_COLLECTOR_TRACE_BARRIER_H +#ifndef COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_MARKING_BARRIER_H +#define COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_MARKING_BARRIER_H -#include "common_components/heap/w_collector/idle_barrier.h" +#include "common_components/heap/ark_collector/idle_barrier.h" namespace common { -// TraceBarrier is the barrier for concurrent marking phase. +// MarkingBarrier is the barrier for concurrent marking phase. // rename to TracingBarrier. Marking is confusing in consideration of MarkObject. -class TraceBarrier : public IdleBarrier { +class MarkingBarrier : public IdleBarrier { public: - explicit TraceBarrier(Collector& collector) : IdleBarrier(collector) {} + explicit MarkingBarrier(Collector& collector) : IdleBarrier(collector) {} BaseObject* ReadRefField(BaseObject* obj, RefField& field) const override; BaseObject* ReadStaticRef(RefField& field) const override; void ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress src, size_t size) const override; + void WriteRoot(BaseObject *obj) const override; void WriteRefField(BaseObject* obj, RefField& field, BaseObject* ref) const override; void WriteBarrier(BaseObject* obj, RefField& field, BaseObject* ref) const override; void WriteStruct(BaseObject* obj, HeapAddress dst, size_t dstLen, HeapAddress src, size_t srcLen) const override; @@ -47,4 +48,4 @@ public: }; } // namespace common -#endif // COMMON_COMPONENTS_HEAP_W_COLLECTOR_MARK_BARRIER_H +#endif // COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_MARK_BARRIER_H diff --git a/common_components/heap/w_collector/post_trace_barrier.cpp b/common_components/heap/ark_collector/post_marking_barrier.cpp similarity index 63% rename from common_components/heap/w_collector/post_trace_barrier.cpp rename to common_components/heap/ark_collector/post_marking_barrier.cpp index 2325aa04ea13fec95c1fe3f8277fb431b803236e..0afa07f836ab4a4ed70e9f8ba8a6616570466749 100755 --- a/common_components/heap/w_collector/post_trace_barrier.cpp +++ b/common_components/heap/ark_collector/post_marking_barrier.cpp @@ -12,9 +12,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "common_components/heap/w_collector/post_trace_barrier.h" +#include "common_components/heap/ark_collector/post_marking_barrier.h" -#include "common_components/heap/allocator/region_space.h" +#include "common_components/heap/allocator/regional_heap.h" #include "common_components/mutator/mutator.h" #include "heap/space/young_space.h" #if defined(COMMON_TSAN_SUPPORT) @@ -22,24 +22,27 @@ #endif namespace common { -BaseObject* PostTraceBarrier::ReadRefField(BaseObject* obj, RefField& field) const +BaseObject* PostMarkingBarrier::ReadRefField(BaseObject* obj, RefField& field) const { RefField<> tmpField(field); return (BaseObject*)tmpField.GetFieldValue(); } -BaseObject* PostTraceBarrier::ReadStaticRef(RefField& field) const { return ReadRefField(nullptr, field); } +BaseObject* PostMarkingBarrier::ReadStaticRef(RefField& field) const { return ReadRefField(nullptr, field); } // If the object is still alive, return it; if not, return nullptr -BaseObject* PostTraceBarrier::ReadStringTableStaticRef(RefField &field) const +BaseObject* PostMarkingBarrier::ReadStringTableStaticRef(RefField &field) const { + // Note: CMC GC assumes all objects in string table are not in young space. Based on the assumption, CMC GC skip + // read barrier in young GC + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + return reinterpret_cast(field.GetFieldValue()); + } + auto isSurvivor = [](BaseObject* obj) { - const GCReason gcReason = Heap::GetHeap().GetGCReason(); - RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); - return (gcReason == GC_REASON_YOUNG && !region->IsInYoungSpace()) - || region->IsMarkedObject(obj) - || region->IsNewObjectSinceTrace(obj); + return region->IsMarkedObject(obj) || region->IsNewObjectSinceMarking(obj); }; auto obj = ReadRefField(nullptr, field); @@ -50,31 +53,33 @@ BaseObject* PostTraceBarrier::ReadStringTableStaticRef(RefField &field) c } } -void PostTraceBarrier::ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress src, size_t size) const +void PostMarkingBarrier::ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress src, size_t size) const { CHECK_CC(memcpy_s(reinterpret_cast(dst), size, reinterpret_cast(src), size) == EOK); } -void PostTraceBarrier::WriteRefField(BaseObject* obj, RefField& field, BaseObject* ref) const +void PostMarkingBarrier::WriteRefField(BaseObject* obj, RefField& field, BaseObject* ref) const { RefField<> newField(ref); UpdateRememberSet(obj, ref); field.SetFieldValue(newField.GetFieldValue()); } -void PostTraceBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObject* ref) const +void PostMarkingBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObject* ref) const { - UpdateRememberSet(obj, ref); + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + UpdateRememberSet(obj, ref); + } } -void PostTraceBarrier::WriteStaticRef(RefField& field, BaseObject* ref) const +void PostMarkingBarrier::WriteStaticRef(RefField& field, BaseObject* ref) const { RefField<> newField(ref); field.SetFieldValue(newField.GetFieldValue()); } -void PostTraceBarrier::WriteStruct(BaseObject* obj, HeapAddress dst, size_t dstLen, - HeapAddress src, size_t srcLen) const +void PostMarkingBarrier::WriteStruct(BaseObject* obj, HeapAddress dst, size_t dstLen, + HeapAddress src, size_t srcLen) const { CHECK_CC(obj != nullptr); CHECK_CC(memcpy_s(reinterpret_cast(dst), dstLen, reinterpret_cast(src), srcLen) == EOK); @@ -85,7 +90,7 @@ void PostTraceBarrier::WriteStruct(BaseObject* obj, HeapAddress dst, size_t dstL #endif } -BaseObject* PostTraceBarrier::AtomicReadRefField(BaseObject* obj, RefField& field, MemoryOrder order) const +BaseObject* PostMarkingBarrier::AtomicReadRefField(BaseObject* obj, RefField& field, MemoryOrder order) const { BaseObject* target = nullptr; RefField oldField(field.GetFieldValue(order)); @@ -95,8 +100,8 @@ BaseObject* PostTraceBarrier::AtomicReadRefField(BaseObject* obj, RefField return target; } -void PostTraceBarrier::AtomicWriteRefField(BaseObject* obj, RefField& field, BaseObject* newRef, - MemoryOrder order) const +void PostMarkingBarrier::AtomicWriteRefField(BaseObject* obj, RefField& field, BaseObject* newRef, + MemoryOrder order) const { RefField<> oldField(field.GetFieldValue(order)); HeapAddress oldValue = oldField.GetFieldValue(); @@ -111,8 +116,8 @@ void PostTraceBarrier::AtomicWriteRefField(BaseObject* obj, RefField& fiel } } -BaseObject* PostTraceBarrier::AtomicSwapRefField(BaseObject* obj, RefField& field, BaseObject* newRef, - MemoryOrder order) const +BaseObject* PostMarkingBarrier::AtomicSwapRefField(BaseObject* obj, RefField& field, BaseObject* newRef, + MemoryOrder order) const { RefField<> newField(newRef); HeapAddress oldValue = field.Exchange(newField.GetFieldValue(), order); @@ -123,8 +128,8 @@ BaseObject* PostTraceBarrier::AtomicSwapRefField(BaseObject* obj, RefField return oldRef; } -bool PostTraceBarrier::CompareAndSwapRefField(BaseObject* obj, RefField& field, BaseObject* oldRef, - BaseObject* newRef, MemoryOrder succOrder, MemoryOrder failOrder) const +bool PostMarkingBarrier::CompareAndSwapRefField(BaseObject* obj, RefField& field, BaseObject* oldRef, + BaseObject* newRef, MemoryOrder succOrder, MemoryOrder failOrder) const { HeapAddress oldFieldValue = field.GetFieldValue(std::memory_order_seq_cst); RefField oldField(oldFieldValue); @@ -141,8 +146,8 @@ bool PostTraceBarrier::CompareAndSwapRefField(BaseObject* obj, RefField& f return false; } -void PostTraceBarrier::CopyStructArray(BaseObject* dstObj, HeapAddress dstField, MIndex dstSize, BaseObject* srcObj, - HeapAddress srcField, MIndex srcSize) const +void PostMarkingBarrier::CopyStructArray(BaseObject* dstObj, HeapAddress dstField, MIndex dstSize, BaseObject* srcObj, + HeapAddress srcField, MIndex srcSize) const { #ifndef NDEBUG if (!dstObj->HasRefField()) { diff --git a/common_components/heap/w_collector/post_trace_barrier.h b/common_components/heap/ark_collector/post_marking_barrier.h similarity index 83% rename from common_components/heap/w_collector/post_trace_barrier.h rename to common_components/heap/ark_collector/post_marking_barrier.h index 924d18afedad0bd73396562312a293d84aad396d..d141e46fcfc9ae4a0a962d193f77dd4c25e41757 100755 --- a/common_components/heap/w_collector/post_trace_barrier.h +++ b/common_components/heap/ark_collector/post_marking_barrier.h @@ -13,17 +13,17 @@ * limitations under the License. */ -#ifndef COMMON_COMPONENTS_HEAP_W_COLLECTOR_POST_TRACE_BARRIER_H -#define COMMON_COMPONENTS_HEAP_W_COLLECTOR_POST_TRACE_BARRIER_H +#ifndef COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_POST_MARKING_BARRIER_H +#define COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_POST_MARKING_BARRIER_H -#include "common_components/heap/w_collector/idle_barrier.h" +#include "common_components/heap/ark_collector/idle_barrier.h" namespace common { -// PostTraceBarrier is the barrier for concurrent marking phase. +// PostMarkingBarrier is the barrier for concurrent marking phase. // rename to TracingBarrier. Marking is confusing in consideration of MarkObject. -class PostTraceBarrier : public IdleBarrier { +class PostMarkingBarrier : public IdleBarrier { public: - explicit PostTraceBarrier(Collector& collector) : IdleBarrier(collector) {} + explicit PostMarkingBarrier(Collector& collector) : IdleBarrier(collector) {} BaseObject* ReadRefField(BaseObject* obj, RefField& field) const override; BaseObject* ReadStaticRef(RefField& field) const override; @@ -49,4 +49,4 @@ public: }; } // namespace common -#endif // COMMON_COMPONENTS_HEAP_W_COLLECTOR_MARK_BARRIER_H +#endif // COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_MARK_BARRIER_H diff --git a/common_components/heap/w_collector/preforward_barrier.cpp b/common_components/heap/ark_collector/preforward_barrier.cpp similarity index 80% rename from common_components/heap/w_collector/preforward_barrier.cpp rename to common_components/heap/ark_collector/preforward_barrier.cpp index 07bd39f084dadbdf6307167d9f0a9637e89aabf9..ec6affc5fc229bc12ffefe078d30974035b7843b 100755 --- a/common_components/heap/w_collector/preforward_barrier.cpp +++ b/common_components/heap/ark_collector/preforward_barrier.cpp @@ -12,12 +12,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "common_components/heap/w_collector/preforward_barrier.h" +#include "common_components/heap/ark_collector/preforward_barrier.h" -#include "common_components/heap/allocator/region_space.h" +#include "common_components/heap/allocator/regional_heap.h" #include "common_components/base/sys_call.h" #include "common_components/common/scoped_object_lock.h" #include "common_components/mutator/mutator.h" +#include "heap/collector/collector_proxy.h" #if defined(COMMON_TSAN_SUPPORT) #include "common_components/sanitizer/sanitizer_interface.h" #endif @@ -27,28 +28,16 @@ BaseObject* PreforwardBarrier::ReadRefField(BaseObject* obj, RefField& fi { do { RefField<> tmpField(field); - bool isWeak = tmpField.IsWeak(); - BaseObject* oldRef = tmpField.GetTargetObject(); - if (LIKELY_CC(!theCollector.IsFromObject(oldRef))) { - if (isWeak) { - return (BaseObject*)((uintptr_t)oldRef | TAG_WEAK); - } else { - return oldRef; - } + BaseObject* oldRef = reinterpret_cast(tmpField.GetAddress()); + if (LIKELY_CC(!static_cast(&theCollector)->IsFromObject(oldRef))) { + return oldRef; } + + auto weakMask = reinterpret_cast(oldRef) & TAG_WEAK; + oldRef = reinterpret_cast(reinterpret_cast(oldRef) & (~TAG_WEAK)); BaseObject* toObj = nullptr; - if (theCollector.IsUnmovableFromObject(oldRef)) { - if (isWeak) { - return (BaseObject*)((uintptr_t)oldRef | TAG_WEAK); - } else { - return oldRef; - } - } else if (theCollector.TryForwardRefField(obj, field, toObj)) { - if (isWeak) { - return (BaseObject*)((uintptr_t)toObj | TAG_WEAK); - } else { - return toObj; - } + if (static_cast(&theCollector)->TryForwardRefField(obj, field, toObj)) { + return (BaseObject*)((uintptr_t)toObj | weakMask); } } while (true); // unreachable path. @@ -60,13 +49,17 @@ BaseObject* PreforwardBarrier::ReadStaticRef(RefField& field) const { ret // If the object is still alive, return its toSpace object; if not, return nullptr BaseObject* PreforwardBarrier::ReadStringTableStaticRef(RefField& field) const { + // Note: CMC GC assumes all objects in string table are not in young space. Based on the assumption, CMC GC skip + // read barrier in young GC + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + return reinterpret_cast(field.GetFieldValue()); + } + auto isSurvivor = [](BaseObject* obj) { auto gcReason = Heap::GetHeap().GetGCReason(); - RegionDesc *regionInfo = - RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); - return ((gcReason == GC_REASON_YOUNG && !regionInfo->IsInYoungSpace()) || - regionInfo->IsNewObjectSinceTrace(obj) || - regionInfo->IsToRegion() || regionInfo->IsMarkedObject(obj)); + RegionDesc *regionInfo = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); + return (regionInfo->IsNewObjectSinceMarking(obj) || + regionInfo->IsToRegion() || regionInfo->IsMarkedObject(obj)); }; RefField<> tmpField(field); diff --git a/common_components/heap/w_collector/preforward_barrier.h b/common_components/heap/ark_collector/preforward_barrier.h similarity index 86% rename from common_components/heap/w_collector/preforward_barrier.h rename to common_components/heap/ark_collector/preforward_barrier.h index 6abd8d8854e46cfc330965bd9f90469c66a52c77..9bcc26c73c478898583cc638f50012ff77816f51 100755 --- a/common_components/heap/w_collector/preforward_barrier.h +++ b/common_components/heap/ark_collector/preforward_barrier.h @@ -13,11 +13,11 @@ * limitations under the License. */ -#ifndef COMMON_COMPONENTS_HEAP_W_COLLECTOR_TRACE_FIX_BARRIER_H -#define COMMON_COMPONENTS_HEAP_W_COLLECTOR_TRACE_FIX_BARRIER_H +#ifndef COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_MARKING_FIX_BARRIER_H +#define COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_MARKING_FIX_BARRIER_H -#include "common_components/heap/allocator/region_space.h" -#include "common_components/heap/w_collector/idle_barrier.h" +#include "common_components/heap/allocator/regional_heap.h" +#include "common_components/heap/ark_collector/idle_barrier.h" namespace common { // PreforwardBarrier is the barrier for concurrent copying gc in fixup stage @@ -46,4 +46,4 @@ public: }; } // namespace common -#endif // COMMON_COMPONENTS_HEAP_W_COLLECTOR_TRACE_FIX_BARRIER_H +#endif // COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_MARKING_FIX_BARRIER_H diff --git a/common_components/heap/w_collector/remark_barrier.cpp b/common_components/heap/ark_collector/remark_barrier.cpp similarity index 88% rename from common_components/heap/w_collector/remark_barrier.cpp rename to common_components/heap/ark_collector/remark_barrier.cpp index d52f4ac0974294c7c329fdb765e0c1ef07a43c25..24b19cf7a5fa1c7fd99801c6646c445e91c4d058 100644 --- a/common_components/heap/w_collector/remark_barrier.cpp +++ b/common_components/heap/ark_collector/remark_barrier.cpp @@ -12,15 +12,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "common_components/heap/w_collector/remark_barrier.h" -#include "common_components/heap/allocator/region_space.h" +#include "common_components/heap/ark_collector/remark_barrier.h" +#include "common_components/heap/allocator/regional_heap.h" #include "common_components/mutator/mutator.h" #if defined(ARKCOMMON_TSAN_SUPPORT) #include "common_components/sanitizer/sanitizer_interface.h" #endif namespace common { -// Because gc thread will also have impact on tagged pointer in enum and trace phase, +// Because gc thread will also have impact on tagged pointer in enum and marking phase, // so we don't expect reading barrier have the ability to modify the referent field. BaseObject* RemarkBarrier::ReadRefField(BaseObject* obj, RefField& field) const { @@ -34,13 +34,16 @@ BaseObject* RemarkBarrier::ReadStaticRef(RefField& field) const { return // If the object is still alive, return it; if not, return nullptr BaseObject* RemarkBarrier::ReadStringTableStaticRef(RefField &field) const { + // Note: CMC GC assumes all objects in string table are not in young space. Based on the assumption, CMC GC skip + // read barrier in young GC + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + return reinterpret_cast(field.GetFieldValue()); + } + auto isSurvivor = [](BaseObject* obj) { - const GCReason gcReason = Heap::GetHeap().GetGCReason(); - RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); - return (gcReason == GC_REASON_YOUNG && !region->IsInYoungSpace()) - || region->IsMarkedObject(obj) - || region->IsNewObjectSinceTrace(obj); + return region->IsMarkedObject(obj) || region->IsNewObjectSinceMarking(obj); }; auto obj = ReadRefField(nullptr, field); @@ -51,10 +54,18 @@ BaseObject* RemarkBarrier::ReadStringTableStaticRef(RefField &field) cons } } -void RemarkBarrier::ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress src, size_t size) const - { - CHECK_CC(memcpy_s(reinterpret_cast(dst), size, reinterpret_cast(src), size) == EOK); - } +void RemarkBarrier::ReadStruct(HeapAddress dst, BaseObject *obj, HeapAddress src, size_t size) const +{ + CHECK_CC(memcpy_s(reinterpret_cast(dst), size, reinterpret_cast(src), size) == EOK); +} + +void RemarkBarrier::WriteRoot(BaseObject *obj) const +{ + ASSERT(Heap::IsHeapAddress(obj)); + Mutator *mutator = Mutator::GetMutator(); + mutator->RememberObjectInSatbBuffer(obj); + DLOG(BARRIER, "write root obj %p", obj); +} void RemarkBarrier::WriteRefField(BaseObject *obj, RefField &field, BaseObject *ref) const { @@ -120,7 +131,7 @@ void RemarkBarrier::WriteStaticRef(RefField& field, BaseObject* ref) cons void RemarkBarrier::WriteStruct(BaseObject* obj, HeapAddress dst, size_t dstLen, HeapAddress src, size_t srcLen) const { CHECK_CC(obj != nullptr); - if (obj != nullptr) { + if (obj != nullptr) { //LCOV_EXCL_BR_LINE ASSERT_LOGF(dst > reinterpret_cast(obj), "WriteStruct struct addr is less than obj!"); Mutator* mutator = Mutator::GetMutator(); if (mutator != nullptr) { @@ -216,4 +227,4 @@ void RemarkBarrier::CopyStructArray(BaseObject *dstObj, HeapAddress dstField, } } // namespace panda - \ No newline at end of file + diff --git a/common_components/heap/w_collector/remark_barrier.h b/common_components/heap/ark_collector/remark_barrier.h similarity index 93% rename from common_components/heap/w_collector/remark_barrier.h rename to common_components/heap/ark_collector/remark_barrier.h index a0b275ba747cca23fed97091dd2e924d2648c891..7a7a78a5a946b6501094da822b5e0c241ff14757 100644 --- a/common_components/heap/w_collector/remark_barrier.h +++ b/common_components/heap/ark_collector/remark_barrier.h @@ -15,10 +15,10 @@ #ifndef ARK_COMMON_REMARK_BARRIER_H #define ARK_COMMON_REMARK_BARRIER_H -#include "common_components/heap/w_collector/idle_barrier.h" +#include "common_components/heap/ark_collector/idle_barrier.h" namespace common { -// TraceBarrier is the barrier for concurrent marking phase. +// MarkingBarrier is the barrier for concurrent marking phase. // rename to TracingBarrier. Marking is confusing in consideration of MarkObject. class RemarkBarrier : public IdleBarrier { public: @@ -29,6 +29,7 @@ public: void ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress src, size_t size) const override; BaseObject* ReadStringTableStaticRef(RefField &field) const override; + void WriteRoot(BaseObject *obj) const override; void WriteRefField(BaseObject* obj, RefField& field, BaseObject* ref) const override; void WriteBarrier(BaseObject* obj, RefField& field, BaseObject* ref) const override; void WriteStruct(BaseObject* obj, HeapAddress dst, size_t dstLen, HeapAddress src, size_t srcLen) const override; @@ -47,4 +48,4 @@ public: }; } // namespace panda #endif // ~ARK_COMMON_MARK_BARRIER_H - \ No newline at end of file + diff --git a/common_components/heap/ark_collector/tests/BUILD.gn b/common_components/heap/ark_collector/tests/BUILD.gn new file mode 100755 index 0000000000000000000000000000000000000000..f3a0377d9d0344e55591a386e3a8a0525dd59491 --- /dev/null +++ b/common_components/heap/ark_collector/tests/BUILD.gn @@ -0,0 +1,232 @@ +# Copyright (c) 2025 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//arkcompiler/ets_runtime/common_components/tests/test_helper.gni") + +module_output_path = "ets_runtime" + +host_unittest_action("Copy_Barrier_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "copy_barrier_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +host_unittest_action("Enum_Barrier_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "enum_barrier_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +host_unittest_action("Idle_Barrier_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "idle_barrier_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +host_unittest_action("Post_Marking_Barrier_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "post_marking_barrier_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +host_unittest_action("Preforward_Barrier_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "preforward_barrier_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +host_unittest_action("Remark_Barrier_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "remark_barrier_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +host_unittest_action("Marking_Barrier_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "marking_barrier_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +host_unittest_action("Ark_Collector_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "ark_collector_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "bounds_checking_function:libsec_shared", + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +group("unittest") { + testonly = true + + # deps file + deps = [ + ":Ark_Collector_Test", + ":Copy_Barrier_Test", + ":Enum_Barrier_Test", + ":Idle_Barrier_Test", + ":Post_Marking_Barrier_Test", + ":Preforward_Barrier_Test", + ":Remark_Barrier_Test", + ":Marking_Barrier_Test", + ] +} + +group("host_unittest") { + testonly = true + + # deps file + deps = [ + ":Ark_Collector_TestAction", + ":Copy_Barrier_TestAction", + ":Enum_Barrier_TestAction", + ":Idle_Barrier_TestAction", + ":Post_Marking_Barrier_TestAction", + ":Preforward_Barrier_TestAction", + ":Remark_Barrier_TestAction", + ":Marking_Barrier_TestAction", + ] +} \ No newline at end of file diff --git a/common_components/heap/ark_collector/tests/ark_collector_test.cpp b/common_components/heap/ark_collector/tests/ark_collector_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dc3e611ce06d5a7ec91eca70feca13a417b97fcd --- /dev/null +++ b/common_components/heap/ark_collector/tests/ark_collector_test.cpp @@ -0,0 +1,543 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/tests/test_helper.h" + +#include "common_components/heap/ark_collector/ark_collector.h" +#include "common_components/heap/ark_collector/ark_collector.cpp" +#include "common_components/heap/collector/collector_proxy.h" +#include "common_components/heap/heap_manager.h" +#include "common_components/heap/allocator/region_desc.h" +#include "common_components/mutator/mutator_manager-inl.h" + +using namespace common; + +namespace common::test { +using SuspensionType = MutatorBase::SuspensionType; +class ArkCollectorTest : public common::test::BaseTestWithScope { +protected: + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() + {} + + void SetUp() override + { + MutatorManager::Instance().CreateRuntimeMutator(ThreadType::ARK_PROCESSOR); + } + + void TearDown() override + { + MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::ARK_PROCESSOR); + } +}; + +std::unique_ptr GetArkCollector() +{ + CollectorResources &resources = Heap::GetHeap().GetCollectorResources(); + Allocator &allocator = Heap::GetHeap().GetAllocator(); + + return std::make_unique(allocator, resources); +} + +HWTEST_F_L0(ArkCollectorTest, IsUnmovableFromObjectTest0) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + BaseObject *obj = nullptr; + EXPECT_FALSE(arkCollector->IsUnmovableFromObject(obj)); +} + +HWTEST_F_L0(ArkCollectorTest, IsUnmovableFromObjectTest1) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject *obj = reinterpret_cast(addr); + + new (obj) BaseObject(); + + EXPECT_FALSE(arkCollector->IsUnmovableFromObject(obj)); +} + +HWTEST_F_L0(ArkCollectorTest, IsUnmovableFromObjectTest2) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::NONMOVABLE_OBJECT, true); + BaseObject *obj = reinterpret_cast(addr); + + new (obj) BaseObject(); + + RegionDesc *region = RegionDesc::GetAliveRegionDescAt(addr); + + bool isMarked = region->GetOrAllocResurrectBitmap()->MarkBits(0); + region->SetResurrectedRegionFlag(1); + region->SetRegionType(RegionDesc::RegionType::EXEMPTED_FROM_REGION); + + EXPECT_FALSE(isMarked); + + EXPECT_TRUE(arkCollector->IsUnmovableFromObject(obj)); +} + +HWTEST_F_L0(ArkCollectorTest, ForwardUpdateRawRefTest0) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject *obj = reinterpret_cast(addr); + + new (obj) BaseObject(); + + common::ObjectRef root = {obj}; + + BaseObject *oldObj = arkCollector->ForwardUpdateRawRef(root); + EXPECT_EQ(oldObj, obj); +} + +void FlipTest() +{ + MutatorManager &mutatorManager = MutatorManager::Instance(); + ThreadHolder::CreateAndRegisterNewThreadHolder(nullptr); + bool stwCallbackExecuted = false; + auto stwTest = [&mutatorManager, &stwCallbackExecuted]() { + EXPECT_TRUE(mutatorManager.WorldStopped()); + stwCallbackExecuted = true; + }; + FlipFunction mutatorTest = [&mutatorManager, &stwCallbackExecuted](Mutator &mutator) { + EXPECT_TRUE(mutator.HasSuspensionRequest(SuspensionType::SUSPENSION_FOR_RUNNING_CALLBACK)); + EXPECT_FALSE(mutatorManager.WorldStopped()); + EXPECT_TRUE(stwCallbackExecuted); + }; + STWParam param; + param.stwReason = "flip-test"; + mutatorManager.FlipMutators(param, stwTest, &mutatorTest); +} + +HWTEST_F_L0(ArkCollectorTest, FlipTest) +{ + std::thread t1(FlipTest); + t1.join(); +} + +HWTEST_F_L0(ArkCollectorTest, IsUnmovableFromObject_ReturnsFalseForNullptr) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + BaseObject* obj = nullptr; + EXPECT_FALSE(arkCollector->IsUnmovableFromObject(obj)); +} + +class TestableArkCollector : public ArkCollector { +public: + using ArkCollector::ForwardObject; + + explicit TestableArkCollector(Allocator& allocator, CollectorResources& resources) + : ArkCollector(allocator, resources), currentGCPhase_(GCPhase::GC_PHASE_COPY) {} + + void SetCurrentGCPhaseForTest(GCPhase phase) + { + currentGCPhase_ = phase; + } + + GCPhase GetCurrentGCPhaseForTest() const + { + return currentGCPhase_; + } + +private: + GCPhase currentGCPhase_; +}; + + +class DummyObject : public BaseObject { +public: + const common::TypeInfo* GetTypeInfo() const { return nullptr; } + size_t GetSize() const { return sizeof(DummyObject); } + + void SetClass(uintptr_t cls) + { + stateWord_.StoreStateWord(static_cast(cls)); + } + +private: + class BaseStateWord { + public: + using StateWordType = uint64_t; + + void StoreStateWord(StateWordType word) + { + stateWord_ = word; + } + + StateWordType LoadStateWord() const + { + return stateWord_; + } + + private: + StateWordType stateWord_{0}; + }; + + BaseStateWord stateWord_; +}; + +HWTEST_F_L0(ArkCollectorTest, ForwardObject_WithUnmovedObject_ReturnsSameAddress) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + TestableArkCollector* testableCollector = reinterpret_cast(arkCollector.get()); + + testableCollector->SetCurrentGCPhaseForTest(GCPhase::GC_PHASE_COPY); + EXPECT_EQ(testableCollector->GetCurrentGCPhaseForTest(), GCPhase::GC_PHASE_COPY); +} + +HWTEST_F_L0(ArkCollectorTest, MarkingRefField_TEST1) +{ + constexpr uint64_t TAG_BITS_SHIFT = 48; + constexpr uint64_t TAG_MARK = 0xFFFFULL << TAG_BITS_SHIFT; + constexpr uint64_t TAG_SPECIAL = 0x02ULL; + constexpr uint64_t TAG_BOOLEAN = 0x04ULL; + constexpr uint64_t TAG_HEAP_OBJECT_MASK = TAG_MARK | TAG_SPECIAL | TAG_BOOLEAN; + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject *obj = reinterpret_cast(addr | TAG_HEAP_OBJECT_MASK); + RefField<> field(obj); + + GlobalMarkStack globalMarkStack; + ParallelMarkingMonitor monitor(0, 0); + ParallelLocalMarkStack markStack(&globalMarkStack, &monitor); + WeakStack weakStack; + MarkingRefField(nullptr, field, markStack, weakStack, GCReason::GC_REASON_YOUNG); + EXPECT_FALSE(Heap::IsTaggedObject(field.GetFieldValue())); +} + +HWTEST_F_L0(ArkCollectorTest, MarkingRefField_TEST2) +{ + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::ALIVE_REGION_FIRST); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + GlobalMarkStack globalMarkStack; + ParallelMarkingMonitor monitor(0, 0); + ParallelLocalMarkStack markStack(&globalMarkStack, &monitor); + WeakStack weakStack; + MarkingRefField(nullptr, field, markStack, weakStack, GCReason::GC_REASON_APPSPAWN); + EXPECT_FALSE(region->IsInOldSpace()); + BaseObject *temp; + while (markStack.Pop(&temp)) {} +} + +HWTEST_F_L0(ArkCollectorTest, MarkingRefField_TEST3) +{ + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::OLD_REGION); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + GlobalMarkStack globalMarkStack; + ParallelMarkingMonitor monitor(0, 0); + ParallelLocalMarkStack markStack(&globalMarkStack, &monitor); + WeakStack weakStack; + MarkingRefField(nullptr, field, markStack, weakStack, GCReason::GC_REASON_APPSPAWN); + EXPECT_TRUE(region->IsInOldSpace()); + BaseObject *temp; + while (markStack.Pop(&temp)) {} +} + +HWTEST_F_L0(ArkCollectorTest, MarkingRefField_TEST4) +{ + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::ALIVE_REGION_FIRST); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + GlobalMarkStack globalMarkStack; + ParallelMarkingMonitor monitor(0, 0); + ParallelLocalMarkStack markStack(&globalMarkStack, &monitor); + WeakStack weakStack; + MarkingRefField(nullptr, field, markStack, weakStack, GCReason::GC_REASON_YOUNG); + EXPECT_FALSE(region->IsInOldSpace()); + BaseObject *temp; + while (markStack.Pop(&temp)) {} +} + +HWTEST_F_L0(ArkCollectorTest, MarkingRefField_TEST5) +{ + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::OLD_REGION); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + GlobalMarkStack globalMarkStack; + ParallelMarkingMonitor monitor(0, 0); + ParallelLocalMarkStack markStack(&globalMarkStack, &monitor); + WeakStack weakStack; + MarkingRefField(nullptr, field, markStack, weakStack, GCReason::GC_REASON_YOUNG); + EXPECT_TRUE(region->IsInOldSpace()); +} + +HWTEST_F_L0(ArkCollectorTest, MarkingRefField_TEST6) +{ + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionAllocPtr(addr - 1); + region->SetMarkingLine(); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + GlobalMarkStack globalMarkStack; + ParallelMarkingMonitor monitor(0, 0); + ParallelLocalMarkStack markStack(&globalMarkStack, &monitor); + MarkingRefField(obj, obj, field, markStack, region); + EXPECT_TRUE(region->IsNewObjectSinceMarking(obj)); +} +class TestCreateMarkingArkCollector : public MarkingCollector { +public: + using MarkingCollector::SetGCReason; + explicit TestCreateMarkingArkCollector(Allocator& allocator, CollectorResources& resources) + : MarkingCollector(allocator, resources) {} + BaseObject* ForwardObject(BaseObject*) override { return nullptr; } + bool IsFromObject(BaseObject*) const override { return false; } + bool IsUnmovableFromObject(BaseObject*) const override { return false; } + BaseObject* FindToVersion(BaseObject* obj) const override { return nullptr; } + bool TryUpdateRefField(BaseObject*, RefField<>&, BaseObject*&) const override { return false; } + bool TryForwardRefField(BaseObject*, RefField<>&, BaseObject*&) const override { return false; } + bool TryUntagRefField(BaseObject*, RefField<>&, BaseObject*&) const override { return false; } + RefField<> GetAndTryTagRefField(BaseObject*) const override { return RefField<>(nullptr); } + bool IsOldPointer(RefField<>&) const override { return false; } + void AddRawPointerObject(BaseObject*) override {} + void RemoveRawPointerObject(BaseObject*) override {} + bool MarkObject(BaseObject* obj) const override { return false; } + void MarkingObjectRefFields(BaseObject *obj, MarkingRefFieldVisitor *data) override {} + BaseObject* CopyObjectAfterExclusive(BaseObject* obj) override { return nullptr; } + void DoGarbageCollection() override {} + bool IsCurrentPointer(RefField<>&) const override { return false; } + MarkingRefFieldVisitor CreateMarkingObjectRefFieldsVisitor(ParallelLocalMarkStack &workStack, + WeakStack &weakStack) override + { + return MarkingRefFieldVisitor(); + } +}; + +HWTEST_F_L0(ArkCollectorTest, CreateMarkingObjectRefFieldsVisitor_TEST1) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + GlobalMarkStack globalMarkStack; + ParallelMarkingMonitor monitor(0, 0); + ParallelLocalMarkStack markStack(&globalMarkStack, &monitor); + WeakStack weakStack; + TestCreateMarkingArkCollector* collector = reinterpret_cast(arkCollector.get()); + collector->SetGCReason(GCReason::GC_REASON_YOUNG); + auto visitor = arkCollector->CreateMarkingObjectRefFieldsVisitor(markStack, weakStack); + EXPECT_TRUE(visitor.GetRefFieldVisitor() != nullptr); +} + +HWTEST_F_L0(ArkCollectorTest, FixRefField_TEST1) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + constexpr uint64_t TAG_BITS_SHIFT = 48; + constexpr uint64_t TAG_MARK = 0xFFFFULL << TAG_BITS_SHIFT; + constexpr uint64_t TAG_SPECIAL = 0x02ULL; + constexpr uint64_t TAG_BOOLEAN = 0x04ULL; + constexpr uint64_t TAG_HEAP_OBJECT_MASK = TAG_MARK | TAG_SPECIAL | TAG_BOOLEAN; + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject *obj = reinterpret_cast(addr | TAG_HEAP_OBJECT_MASK); + RefField<> field(obj); + arkCollector->FixRefField(obj, field); + EXPECT_FALSE(Heap::IsTaggedObject(field.GetFieldValue())); +} + +HWTEST_F_L0(ArkCollectorTest, FixRefField_TEST2) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + BaseObject* obj = reinterpret_cast(0); + RefField<> field(obj); + arkCollector->FixRefField(obj, field); + EXPECT_FALSE(Heap::IsHeapAddress(obj)); +} + +class TestStaticObject : public BaseObjectOperatorInterfaces { +public: + size_t GetSize(const BaseObject *object) const override { return 0; } + bool IsValidObject(const BaseObject *object) const override { return false; } + void ForEachRefField(const BaseObject *object, const RefFieldVisitor &visitor) const override {} + void SetForwardingPointerAfterExclusive(BaseObject *object, BaseObject *fwdPtr) override {} + BaseObject *GetForwardingPointer(const BaseObject *object) const override + { + return const_cast(object); + } +}; + +HWTEST_F_L0(ArkCollectorTest, ForwardUpdateRawRef_TEST1) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::FROM_REGION); + Mutator::GetMutator()->SetMutatorPhase(GCPhase::GC_PHASE_PRECOPY); + BaseObject* obj = reinterpret_cast(addr); + EXPECT_FALSE(obj->IsForwarded()); + obj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + obj->SetLanguageType(LanguageType::STATIC); + EXPECT_TRUE(obj->IsForwarded()); + TestStaticObject staticObject; + obj->RegisterStatic(&staticObject); + + alignas(RefField<>) char rootBuffer[sizeof(RefField<>)] = {0}; + ObjectRef& root = *new (rootBuffer) ObjectRef(); + root.object = obj; + auto ret = arkCollector->ForwardUpdateRawRef(root); + EXPECT_TRUE(arkCollector->IsFromObject(obj)); + EXPECT_EQ(ret, obj); +} + +class TestForwardNullObject : public TestStaticObject { +public: + BaseObject *GetForwardingPointer(const BaseObject *object) const override { return nullptr; } +}; + +HWTEST_F_L0(ArkCollectorTest, ForwardObject_TEST1) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::FROM_REGION); + Mutator::GetMutator()->SetMutatorPhase(GCPhase::GC_PHASE_PRECOPY); + BaseObject* obj = reinterpret_cast(addr); + EXPECT_FALSE(obj->IsForwarded()); + obj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + obj->SetLanguageType(LanguageType::STATIC); + EXPECT_TRUE(obj->IsForwarded()); + TestForwardNullObject staticObject; + obj->RegisterStatic(&staticObject); + + auto ret = arkCollector->ForwardObject(obj); + EXPECT_EQ(ret, obj); +} + +HWTEST_F_L0(ArkCollectorTest, CopyObjectImpl_TEST1) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::FROM_REGION); + Mutator::GetMutator()->SetMutatorPhase(GCPhase::GC_PHASE_PRECOPY); + BaseObject* obj = reinterpret_cast(addr); + EXPECT_FALSE(obj->IsForwarded()); + obj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + obj->SetLanguageType(LanguageType::STATIC); + EXPECT_TRUE(obj->IsForwarded()); + TestForwardNullObject staticObject; + obj->RegisterStatic(&staticObject); + + auto ret = arkCollector->CopyObjectImpl(obj); + EXPECT_TRUE(ret == nullptr); +} + +HWTEST_F_L0(ArkCollectorTest, TryUpdateRefFieldImpl_TEST1) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + RefField<> field(nullptr); + BaseObject* obj = nullptr; + bool ret = arkCollector->TryUpdateRefField(nullptr, field, obj); + EXPECT_FALSE(ret); +} + +HWTEST_F_L0(ArkCollectorTest, TryUpdateRefFieldImpl_TEST2) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::FROM_REGION); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + bool ret = arkCollector->TryUpdateRefField(nullptr, field, obj); + EXPECT_FALSE(ret); +} + +HWTEST_F_L0(ArkCollectorTest, TryUpdateRefFieldImpl_TEST3) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::FROM_REGION); + Mutator::GetMutator()->SetMutatorPhase(GCPhase::GC_PHASE_PRECOPY); + BaseObject* obj = reinterpret_cast(addr); + EXPECT_FALSE(obj->IsForwarded()); + obj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + obj->SetLanguageType(LanguageType::STATIC); + EXPECT_TRUE(obj->IsForwarded()); + TestForwardNullObject staticObject; + obj->RegisterStatic(&staticObject); + + RefField field(obj); + bool ret = arkCollector->TryForwardRefField(nullptr, field, obj); + EXPECT_FALSE(ret); +} + +HWTEST_F_L0(ArkCollectorTest, TryUpdateRefFieldImpl_TEST4) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::FROM_REGION); + Mutator::GetMutator()->SetMutatorPhase(GCPhase::GC_PHASE_PRECOPY); + BaseObject* obj = reinterpret_cast(addr); + EXPECT_FALSE(obj->IsForwarded()); + obj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + obj->SetLanguageType(LanguageType::STATIC); + EXPECT_TRUE(obj->IsForwarded()); + TestStaticObject staticObject; + obj->RegisterStatic(&staticObject); + + RefField field(obj); + bool ret = arkCollector->TryForwardRefField(nullptr, field, obj); + EXPECT_TRUE(ret); +} +} // namespace common::test diff --git a/common_components/heap/ark_collector/tests/copy_barrier_test.cpp b/common_components/heap/ark_collector/tests/copy_barrier_test.cpp new file mode 100755 index 0000000000000000000000000000000000000000..280fd8758a6d5cf114f2c127e247c289791f459f --- /dev/null +++ b/common_components/heap/ark_collector/tests/copy_barrier_test.cpp @@ -0,0 +1,365 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/heap/ark_collector/copy_barrier.h" +#include "common_components/heap/ark_collector/tests/mock_barrier_collector.h" +#include "common_components/mutator/mutator_manager.h" +#include "common_components/tests/test_helper.h" +#include "common_components/heap/heap_manager.h" +#include "common_interfaces/base_runtime.h" + +using namespace common; + +namespace common::test { +class CopyBarrierTest : public common::test::BaseTestWithScope { +protected: + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() {} + + void SetUp() override + { + MutatorManager::Instance().CreateRuntimeMutator(ThreadType::ARK_PROCESSOR); + } + + void TearDown() override + { + MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::ARK_PROCESSOR); + } +}; + +HWTEST_F_L0(CopyBarrierTest, ReadRefField_TEST1) +{ + MockCollector collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject* resultObj = copyBarrier->ReadRefField(obj, field); + ASSERT_TRUE(resultObj != nullptr); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(CopyBarrierTest, ReadRefField_TEST2) +{ + MockCollector collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject* resultObj = copyBarrier->ReadRefField(nullptr, field); + ASSERT_TRUE(resultObj != nullptr); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(CopyBarrierTest, ReadRefField_TEST3) +{ + MockCollector collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj, true); + + BaseObject* resultObj = copyBarrier->ReadRefField(obj, field); + ASSERT_TRUE(resultObj != nullptr); + constexpr uint64_t TAG_WEAK = 0x01ULL; + BaseObject* newObj = reinterpret_cast(reinterpret_cast(obj) | TAG_WEAK); + EXPECT_EQ(resultObj, newObj); +} + +HWTEST_F_L0(CopyBarrierTest, ReadRefField_TEST4) +{ + MockCollectorForwardTest collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(nullptr); + + BaseObject* resultObj = copyBarrier->ReadRefField(obj, field); + ASSERT_TRUE(resultObj == nullptr); +} + +HWTEST_F_L0(CopyBarrierTest, ReadRefField_TEST5) +{ + MockCollectorForwardTest collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj, true); + + BaseObject* resultObj = copyBarrier->ReadRefField(obj, field); + ASSERT_TRUE(resultObj != nullptr); +} + +HWTEST_F_L0(CopyBarrierTest, ReadStaticRef_TEST1) +{ + MockCollector collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject* resultObj = copyBarrier->ReadStaticRef(field); + ASSERT_TRUE(resultObj != nullptr); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(CopyBarrierTest, ReadStringTableStaticRef_TEST1) +{ + MockCollector collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + RefField field(nullptr); + + BaseObject* resultObj = copyBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj == nullptr); +} + +HWTEST_F_L0(CopyBarrierTest, ReadStringTableStaticRef_TEST2) +{ + MockCollector collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RegionDesc *regionInfo = RegionDesc::GetRegionDescAt(addr); + regionInfo->SetRegionAllocPtr(addr - 1); + regionInfo->SetMarkingLine(); + RefField field(obj); + + BaseObject* resultObj = copyBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj != nullptr); +} + +HWTEST_F_L0(CopyBarrierTest, ReadStringTableStaticRef_TEST3) +{ + MockCollector collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RegionDesc *regionInfo = RegionDesc::GetRegionDescAt(addr); + regionInfo->SetRegionType(RegionDesc::RegionType::ALIVE_REGION_FIRST); + RefField field(obj); + + BaseObject* resultObj = copyBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj == nullptr); +} + +HWTEST_F_L0(CopyBarrierTest, ReadStringTableStaticRef_TEST4) +{ + MockCollector collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + Heap::GetHeap().SetGCReason(GC_REASON_YOUNG); + BaseObject* resultObj = copyBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj != nullptr); +} + +HWTEST_F_L0(CopyBarrierTest, ReadStruct_TEST1) +{ + MockCollector collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + + constexpr size_t size = sizeof(BaseObject); + uint8_t dstBuffer[size] = {}; + HeapAddress dst = reinterpret_cast(dstBuffer); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + + copyBarrier->ReadStruct(dst, obj, src, size); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} + +HWTEST_F_L0(CopyBarrierTest, ReadStruct_TEST2) +{ + MockCollector collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + + constexpr size_t size = sizeof(BaseObject); + uint8_t dstBuffer[size] = {}; + HeapAddress dst = reinterpret_cast(dstBuffer); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + + copyBarrier->ReadStruct(dst, nullptr, src, size); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} + +HWTEST_F_L0(CopyBarrierTest, AtomicWriteRefField_TEST1) +{ + MockCollector collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + copyBarrier->AtomicWriteRefField(oldObj, oldField, newObj, std::memory_order_relaxed); + EXPECT_EQ(oldField.GetFieldValue(), neWAddress); +} + +HWTEST_F_L0(CopyBarrierTest, AtomicWriteRefField_TEST2) +{ + MockCollector collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + copyBarrier->AtomicWriteRefField(nullptr, oldField, newObj, std::memory_order_relaxed); + EXPECT_EQ(oldField.GetFieldValue(), neWAddress); +} + +HWTEST_F_L0(CopyBarrierTest, CompareAndSwapRefField_TEST1) +{ + MockCollector collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + bool result = copyBarrier->CompareAndSwapRefField(oldObj, oldField, oldObj, newObj, + std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_TRUE(result); +} + +HWTEST_F_L0(CopyBarrierTest, CompareAndSwapRefField_TEST2) +{ + MockCollector collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + + bool result = copyBarrier->CompareAndSwapRefField(oldObj, oldField, oldObj, oldObj, + std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_TRUE(result); +} + +HWTEST_F_L0(CopyBarrierTest, CompareAndSwapRefField_TEST3) +{ + MockCollector collector; + auto copyBarrier = std::make_unique(collector); + ASSERT_TRUE(copyBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + bool result = copyBarrier->CompareAndSwapRefField(oldObj, newField, oldObj, newObj, + std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_FALSE(result); +} +} // namespace common::test diff --git a/common_components/heap/ark_collector/tests/enum_barrier_test.cpp b/common_components/heap/ark_collector/tests/enum_barrier_test.cpp new file mode 100755 index 0000000000000000000000000000000000000000..31fd6b0195c9cf860574de375c5bacfe9aea8b94 --- /dev/null +++ b/common_components/heap/ark_collector/tests/enum_barrier_test.cpp @@ -0,0 +1,437 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/heap/ark_collector/enum_barrier.h" +#include "common_components/heap/ark_collector/tests/mock_barrier_collector.h" +#include "common_components/mutator/mutator_manager.h" +#include "common_components/tests/test_helper.h" +#include "common_components/heap/heap_manager.h" +#include "common_interfaces/base_runtime.h" + +using namespace common; + +namespace common::test { +class EnumBarrierTest : public common::test::BaseTestWithScope { +protected: + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() {} + + void SetUp() override + { + MutatorManager::Instance().CreateRuntimeMutator(ThreadType::ARK_PROCESSOR); + } + + void TearDown() override + { + MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::ARK_PROCESSOR); + } +}; + +HWTEST_F_L0(EnumBarrierTest, ReadRefField_TEST1) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject* resultObj = enumBarrier->ReadRefField(obj, field); + ASSERT_TRUE(resultObj != nullptr); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(EnumBarrierTest, ReadRefField_TEST2) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject* resultObj = enumBarrier->ReadRefField(nullptr, field); + ASSERT_TRUE(resultObj != nullptr); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(EnumBarrierTest, ReadStaticRef_TEST1) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject* resultObj = enumBarrier->ReadStaticRef(field); + ASSERT_TRUE(resultObj != nullptr); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(EnumBarrierTest, WriteRefField_TEST1) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(addr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField field(oldObj); + MAddress oldAddress = field.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + enumBarrier->WriteRefField(oldObj, field, newObj); + MAddress newAddress = field.GetFieldValue(); + EXPECT_NE(newAddress, oldAddress); +} + +HWTEST_F_L0(EnumBarrierTest, WriteRefField_TEST2) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField field(nullptr); + MAddress oldAddress = field.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + enumBarrier->WriteRefField(oldObj, field, newObj); + MAddress newAddress = field.GetFieldValue(); + EXPECT_NE(newAddress, oldAddress); +} + +HWTEST_F_L0(EnumBarrierTest, WriteBarrier_TEST1) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + +#ifndef ARK_USE_SATB_BARRIER + constexpr uint64_t TAG_BITS_SHIFT = 48; + constexpr uint64_t TAG_MARK = 0xFFFFULL << TAG_BITS_SHIFT; + constexpr uint64_t TAG_SPECIAL = 0x02ULL; + constexpr uint64_t TAG_BOOLEAN = 0x04ULL; + constexpr uint64_t TAG_HEAP_OBJECT_MASK = TAG_MARK | TAG_SPECIAL | TAG_BOOLEAN; + + RefField<> field(MAddress(0)); + enumBarrier->WriteBarrier(nullptr, field, nullptr); + BaseObject *obj = reinterpret_cast(TAG_HEAP_OBJECT_MASK); + enumBarrier->WriteBarrier(obj, field, obj); + EXPECT_TRUE(obj != nullptr); +#endif +} + +HWTEST_F_L0(EnumBarrierTest, WriteBarrier_TEST2) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + +#ifdef ARK_USE_SATB_BARRIER + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField normalField(obj); + enumBarrier->WriteBarrier(obj, normalField, obj); + EXPECT_TRUE(obj != nullptr); + + HeapAddress weakAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* weakObj = reinterpret_cast(weakAddr); + RefField weakField(MAddress(0)); + enumBarrier->WriteBarrier(&weakObj, weakField, &weakObj); + EXPECT_TRUE(weakObj != nullptr); + + HeapAddress nonTaggedAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* nonTaggedObj = reinterpret_cast(nonTaggedAddr); + RefField nonTaggedField(&nonTaggedObj); + enumBarrier->WriteBarrier(nullptr, nonTaggedField, &nonTaggedObj); + EXPECT_TRUE(nonTaggedObj != nullptr); +#endif +} + +HWTEST_F_L0(EnumBarrierTest, WriteBarrier_TEST3) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + +#ifndef ARK_USE_SATB_BARRIER + constexpr uint64_t TAG_BITS_SHIFT = 48; + constexpr uint64_t TAG_MARK = 0xFFFFULL << TAG_BITS_SHIFT; + constexpr uint64_t TAG_SPECIAL = 0x02ULL; + constexpr uint64_t TAG_BOOLEAN = 0x04ULL; + constexpr uint64_t TAG_HEAP_OBJECT_MASK = TAG_MARK | TAG_SPECIAL | TAG_BOOLEAN; + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField<> field(obj); + Heap::GetHeap().SetGCReason(GC_REASON_YOUNG); + enumBarrier->WriteBarrier(obj, field, obj); + EXPECT_TRUE(obj != nullptr); +#endif +} + +HWTEST_F_L0(EnumBarrierTest, ReadStruct_TEST1) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + enumBarrier->ReadStruct(dst, obj, src, sizeof(BaseObject)); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} + +HWTEST_F_L0(EnumBarrierTest, WriteStruct_TEST1) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + enumBarrier->WriteStruct(obj, dst, sizeof(BaseObject), src, sizeof(BaseObject)); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} + +HWTEST_F_L0(EnumBarrierTest, WriteStruct_TEST2) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + enumBarrier->WriteStruct(nullptr, dst, sizeof(BaseObject), src, sizeof(BaseObject)); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} + +HWTEST_F_L0(EnumBarrierTest, AtomicReadRefField_TEST1) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + constexpr size_t size = 100; + obj->SetSizeForwarded(size); + EXPECT_EQ(obj->GetSizeForwarded(), size); + RefField field(obj); + + BaseObject* resultObj = nullptr; + resultObj = enumBarrier->AtomicReadRefField(obj, field, std::memory_order_seq_cst); + ASSERT_TRUE(resultObj != nullptr); +} + +HWTEST_F_L0(EnumBarrierTest, AtomicWriteRefField_TEST1) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + enumBarrier->AtomicWriteRefField(oldObj, oldField, newObj, std::memory_order_relaxed); + EXPECT_EQ(oldField.GetFieldValue(), neWAddress); +} + +HWTEST_F_L0(EnumBarrierTest, AtomicWriteRefField_TEST2) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + enumBarrier->AtomicWriteRefField(nullptr, oldField, newObj, std::memory_order_relaxed); + EXPECT_EQ(oldField.GetFieldValue(), neWAddress); +} + +HWTEST_F_L0(EnumBarrierTest, AtomicSwapRefField_TEST1) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + BaseObject* resultObj = nullptr; + resultObj = enumBarrier->AtomicSwapRefField(oldObj, oldField, newObj, std::memory_order_relaxed); + ASSERT_TRUE(resultObj != nullptr); + EXPECT_EQ(oldField.GetFieldValue(), newField.GetFieldValue()); +} + +HWTEST_F_L0(EnumBarrierTest, CompareAndSwapRefField_TEST1) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + bool result = enumBarrier->CompareAndSwapRefField(oldObj, oldField, oldObj, newObj, + std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_TRUE(result); +} + +HWTEST_F_L0(EnumBarrierTest, CompareAndSwapRefField_TEST2) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + + bool result = enumBarrier->CompareAndSwapRefField(oldObj, oldField, oldObj, oldObj, + std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_TRUE(result); +} + +HWTEST_F_L0(EnumBarrierTest, CompareAndSwapRefField_TEST3) +{ + MockCollector collector; + auto enumBarrier = std::make_unique(collector); + ASSERT_TRUE(enumBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + bool result = enumBarrier->CompareAndSwapRefField(oldObj, newField, oldObj, newObj, + std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_FALSE(result); +} + +} // namespace common::test diff --git a/common_components/heap/w_collector/tests/idle_barrier_test.cpp b/common_components/heap/ark_collector/tests/idle_barrier_test.cpp similarity index 31% rename from common_components/heap/w_collector/tests/idle_barrier_test.cpp rename to common_components/heap/ark_collector/tests/idle_barrier_test.cpp index 989a8583aa5d30b644b19569aadbea1293a94a78..7e2f0240d148e534490dbe51c92ced90129825b1 100755 --- a/common_components/heap/w_collector/tests/idle_barrier_test.cpp +++ b/common_components/heap/ark_collector/tests/idle_barrier_test.cpp @@ -13,269 +13,294 @@ * limitations under the License. */ -#include "common_components/heap/w_collector/idle_barrier.h" -#include "common_components/heap/heap.h" +#include "common_components/heap/ark_collector/idle_barrier.h" +#include "common_components/heap/ark_collector/tests/mock_barrier_collector.h" +#include "common_components/mutator/mutator_manager.h" #include "common_components/tests/test_helper.h" +#include "common_components/heap/heap_manager.h" +#include "common_interfaces/base_runtime.h" using namespace common; namespace common::test { class IdleBarrierTest : public common::test::BaseTestWithScope { +protected: + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() {} + + void SetUp() override + { + MutatorManager::Instance().CreateRuntimeMutator(ThreadType::ARK_PROCESSOR); + } + + void TearDown() override + { + MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::ARK_PROCESSOR); + } }; HWTEST_F_L0(IdleBarrierTest, ReadStruct_TEST0) { - Collector& collector = Heap::GetHeap().GetCollector(); + MockCollector collector; auto idleBarrier = std::make_unique(collector); ASSERT_TRUE(idleBarrier != nullptr); - BaseObject obj; - constexpr size_t size = 16; - uint8_t srcBuffer[size] = {}; - uint8_t dstBuffer[size] = {}; - srcBuffer[0] = 1; - HeapAddress src = reinterpret_cast(srcBuffer); - HeapAddress dst = reinterpret_cast(dstBuffer); - idleBarrier->ReadStruct(dst, &obj, src, size); - EXPECT_EQ(dstBuffer[0], 1); - EXPECT_EQ(srcBuffer[0], dstBuffer[0]); + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + idleBarrier->ReadStruct(dst, obj, src, sizeof(BaseObject)); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); } HWTEST_F_L0(IdleBarrierTest, AtomicWriteRefField_TEST0) { - Collector& collector = Heap::GetHeap().GetCollector(); + MockCollector collector; auto idleBarrier = std::make_unique(collector); ASSERT_TRUE(idleBarrier != nullptr); - BaseObject oldObj; + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); MAddress oldAddress = oldField.GetFieldValue(); - BaseObject newObj; + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); MAddress neWAddress = newField.GetFieldValue(); EXPECT_NE(oldAddress, neWAddress); - idleBarrier->AtomicWriteRefField(&oldObj, oldField, &newObj, std::memory_order_relaxed); + idleBarrier->AtomicWriteRefField(oldObj, oldField, newObj, std::memory_order_relaxed); EXPECT_EQ(oldField.GetFieldValue(), neWAddress); } -HWTEST_F_L0(IdleBarrierTest, AtomicWriteRefField_TEST1) { - Collector& collector = Heap::GetHeap().GetCollector(); +HWTEST_F_L0(IdleBarrierTest, AtomicWriteRefField_TEST1) +{ + MockCollector collector; auto idleBarrier = std::make_unique(collector); ASSERT_TRUE(idleBarrier != nullptr); - BaseObject oldObj; + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); MAddress oldAddress = oldField.GetFieldValue(); - BaseObject newObj; + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); MAddress neWAddress = newField.GetFieldValue(); EXPECT_NE(oldAddress, neWAddress); - idleBarrier->AtomicWriteRefField(nullptr, oldField, &newObj, std::memory_order_relaxed); + idleBarrier->AtomicWriteRefField(nullptr, oldField, newObj, std::memory_order_relaxed); EXPECT_EQ(oldField.GetFieldValue(), neWAddress); } HWTEST_F_L0(IdleBarrierTest, AtomicSwapRefField_TEST0) { - Collector& collector = Heap::GetHeap().GetCollector(); + MockCollector collector; auto idleBarrier = std::make_unique(collector); ASSERT_TRUE(idleBarrier != nullptr); - BaseObject oldObj; + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); MAddress oldAddress = oldField.GetFieldValue(); - BaseObject newObj; + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); MAddress neWAddress = newField.GetFieldValue(); EXPECT_NE(oldAddress, neWAddress); - BaseObject *retObj = idleBarrier->AtomicSwapRefField(&oldObj, oldField, &newObj, std::memory_order_relaxed); + BaseObject *retObj = idleBarrier->AtomicSwapRefField(oldObj, oldField, newObj, std::memory_order_relaxed); ASSERT_TRUE(retObj != nullptr); EXPECT_EQ(oldField.GetFieldValue(), newField.GetFieldValue()); } -HWTEST_F_L0(IdleBarrierTest, CompareAndSwapRefField_TEST0) -{ - Collector& collector = Heap::GetHeap().GetCollector(); - auto idleBarrier = std::make_unique(collector); - ASSERT_TRUE(idleBarrier != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - bool result = idleBarrier->CompareAndSwapRefField(&oldObj, oldField, &oldObj, &newObj, - std::memory_order_seq_cst, std::memory_order_seq_cst); - ASSERT_FALSE(result); -} - HWTEST_F_L0(IdleBarrierTest, CompareAndSwapRefField_TEST1) { - Collector& collector = Heap::GetHeap().GetCollector(); + MockCollector collector; auto idleBarrier = std::make_unique(collector); ASSERT_TRUE(idleBarrier != nullptr); - BaseObject oldObj; + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); - bool result = idleBarrier->CompareAndSwapRefField(&oldObj, oldField, &oldObj, &oldObj, + bool result = idleBarrier->CompareAndSwapRefField(oldObj, oldField, oldObj, oldObj, std::memory_order_seq_cst, std::memory_order_seq_cst); ASSERT_TRUE(result); } HWTEST_F_L0(IdleBarrierTest, CompareAndSwapRefField_TEST2) { - Collector& collector = Heap::GetHeap().GetCollector(); + MockCollector collector; auto idleBarrier = std::make_unique(collector); ASSERT_TRUE(idleBarrier != nullptr); - BaseObject oldObj; + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); MAddress oldAddress = oldField.GetFieldValue(); - BaseObject newObj; + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); MAddress neWAddress = newField.GetFieldValue(); EXPECT_NE(oldAddress, neWAddress); - bool result = idleBarrier->CompareAndSwapRefField(&oldObj, newField, &oldObj, &newObj, + bool result = idleBarrier->CompareAndSwapRefField(oldObj, newField, oldObj, newObj, std::memory_order_seq_cst, std::memory_order_seq_cst); ASSERT_FALSE(result); } HWTEST_F_L0(IdleBarrierTest, WriteRefField_TEST0) { - Collector& collector = Heap::GetHeap().GetCollector(); + MockCollector collector; auto idleBarrier = std::make_unique(collector); ASSERT_TRUE(idleBarrier != nullptr); - BaseObject oldObj; + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); MAddress oldAddress = oldField.GetFieldValue(); - BaseObject newObj; + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); MAddress neWAddress = newField.GetFieldValue(); EXPECT_NE(oldAddress, neWAddress); - idleBarrier->WriteRefField(&oldObj, oldField, &newObj); + idleBarrier->WriteRefField(oldObj, oldField, newObj); EXPECT_EQ(oldField.GetFieldValue(), neWAddress); } -HWTEST_F_L0(IdleBarrierTest, WriteStruct_TEST0) +HWTEST_F_L0(IdleBarrierTest, WriteRefField_TEST1) +{ + MockCollector collector; + auto idleBarrier = std::make_unique(collector); + ASSERT_TRUE(idleBarrier != nullptr); + + constexpr uint64_t TAG_BITS_SHIFT = 48; + constexpr uint64_t TAG_MARK = 0xFFFFULL << TAG_BITS_SHIFT; + constexpr uint64_t TAG_SPECIAL = 0x02ULL; + constexpr uint64_t TAG_BOOLEAN = 0x04ULL; + constexpr uint64_t TAG_HEAP_OBJECT_MASK = TAG_MARK | TAG_SPECIAL | TAG_BOOLEAN; + + RefField<> field(MAddress(0)); + BaseObject *obj = reinterpret_cast(TAG_HEAP_OBJECT_MASK); + idleBarrier->WriteRefField(obj, field, obj); + EXPECT_TRUE(obj != nullptr); +} + +HWTEST_F_L0(IdleBarrierTest, WriteBarrier_TEST1) { - Collector& collector = Heap::GetHeap().GetCollector(); + MockCollector collector; auto idleBarrier = std::make_unique(collector); ASSERT_TRUE(idleBarrier != nullptr); - BaseObject obj; - constexpr size_t size = 16; - uint8_t srcBuffer[size] = {}; - uint8_t dstBuffer[size] = {}; - srcBuffer[0] = 1; - HeapAddress src = reinterpret_cast(srcBuffer); - HeapAddress dst = reinterpret_cast(dstBuffer); - - idleBarrier->WriteStruct(&obj, dst, size, src, size); - EXPECT_EQ(dstBuffer[0], 1); - EXPECT_EQ(srcBuffer[0], dstBuffer[0]); + constexpr uint64_t TAG_BITS_SHIFT = 48; + constexpr uint64_t TAG_MARK = 0xFFFFULL << TAG_BITS_SHIFT; + constexpr uint64_t TAG_SPECIAL = 0x02ULL; + constexpr uint64_t TAG_BOOLEAN = 0x04ULL; + constexpr uint64_t TAG_HEAP_OBJECT_MASK = TAG_MARK | TAG_SPECIAL | TAG_BOOLEAN; + + RefField<> field(MAddress(0)); + BaseObject *obj = reinterpret_cast(TAG_HEAP_OBJECT_MASK); + idleBarrier->WriteBarrier(obj, field, obj); + EXPECT_TRUE(obj != nullptr); } -HWTEST_F_L0(IdleBarrierTest, WriteStaticRef_TEST0) +HWTEST_F_L0(IdleBarrierTest, WriteBarrier_TEST2) { - Collector& collector = Heap::GetHeap().GetCollector(); + MockCollector collector; auto idleBarrier = std::make_unique(collector); ASSERT_TRUE(idleBarrier != nullptr); - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField field(&oldObj); - MAddress oldAddress = field.GetFieldValue(); + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField<> field(obj); + idleBarrier->WriteBarrier(obj, field, obj); + EXPECT_TRUE(obj != nullptr); +} - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - idleBarrier->WriteStaticRef(field, &newObj); - MAddress newAddress = field.GetFieldValue(); - EXPECT_NE(newAddress, oldAddress); +HWTEST_F_L0(IdleBarrierTest, WriteStruct_TEST0) +{ + MockCollector collector; + auto idleBarrier = std::make_unique(collector); + ASSERT_TRUE(idleBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + idleBarrier->WriteStruct(obj, dst, sizeof(BaseObject), src, sizeof(BaseObject)); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); } HWTEST_F_L0(IdleBarrierTest, CopyStructArray_TEST0) { - Collector& collector = Heap::GetHeap().GetCollector(); + MockCollector collector; auto idleBarrier = std::make_unique(collector); ASSERT_TRUE(idleBarrier != nullptr); - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - - constexpr size_t size = 16; - uint8_t srcBuffer[size] = {}; - uint8_t dstBuffer[size] = {}; - srcBuffer[0] = 1; - HeapAddress src = reinterpret_cast(srcBuffer); - HeapAddress dst = reinterpret_cast(dstBuffer); - - idleBarrier->CopyStructArray(&oldObj, dst, size, &newObj, src, size); - EXPECT_EQ(dstBuffer[0], 1); - EXPECT_EQ(srcBuffer[0], dstBuffer[0]); + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + idleBarrier->CopyStructArray(obj, dst, sizeof(BaseObject), obj, src, sizeof(BaseObject)); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); } -} // namespace common::test \ No newline at end of file +} // namespace common::test diff --git a/common_components/heap/ark_collector/tests/marking_barrier_test.cpp b/common_components/heap/ark_collector/tests/marking_barrier_test.cpp new file mode 100755 index 0000000000000000000000000000000000000000..be88b96801d9001207f183d1bf8dccb473a71e0d --- /dev/null +++ b/common_components/heap/ark_collector/tests/marking_barrier_test.cpp @@ -0,0 +1,442 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/heap/ark_collector/marking_barrier.h" +#include "common_components/heap/ark_collector/tests/mock_barrier_collector.h" +#include "common_components/mutator/mutator_manager.h" +#include "common_components/tests/test_helper.h" +#include "common_components/heap/heap_manager.h" +#include "common_interfaces/base_runtime.h" + +using namespace common; + +namespace common::test { +class MarkingBarrierTest : public BaseTestWithScope { +protected: + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() {} + + void SetUp() override + { + MutatorManager::Instance().CreateRuntimeMutator(ThreadType::ARK_PROCESSOR); + } + + void TearDown() override + { + MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::ARK_PROCESSOR); + } +}; + +HWTEST_F_L0(MarkingBarrierTest, ReadRefField_TEST1) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject *resultObj = markingBarrier->ReadRefField(obj, field); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(MarkingBarrierTest, ReadRefField_TEST2) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject *resultObj = markingBarrier->ReadRefField(nullptr, field); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(MarkingBarrierTest, ReadStaticRef_TEST1) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject *resultObj = markingBarrier->ReadStaticRef(field); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(MarkingBarrierTest, ReadStruct_TEST1) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + markingBarrier->ReadStruct(dst, obj, src, sizeof(BaseObject)); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} + +HWTEST_F_L0(MarkingBarrierTest, WriteRefField_TEST1) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + + RefField field(oldObj); + BaseObject *target = field.GetTargetObject(); + EXPECT_TRUE(target != nullptr); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + + markingBarrier->WriteRefField(oldObj, field, newObj); + + MAddress newAddress = field.GetFieldValue(); + MAddress expectedAddress = RefField<>(newObj).GetFieldValue(); + EXPECT_EQ(newAddress, expectedAddress); +} + +HWTEST_F_L0(MarkingBarrierTest, WriteRefField_TEST2) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + + RefField field(MAddress(0)); + BaseObject *target = field.GetTargetObject(); + EXPECT_TRUE(target == nullptr); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + + markingBarrier->WriteRefField(oldObj, field, newObj); + + MAddress newAddress = field.GetFieldValue(); + MAddress expectedAddress = RefField<>(newObj).GetFieldValue(); + EXPECT_EQ(newAddress, expectedAddress); +} + +HWTEST_F_L0(MarkingBarrierTest, WriteBarrier_TEST1) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + +#ifndef ARK_USE_SATB_BARRIER + constexpr uint64_t TAG_BITS_SHIFT = 48; + constexpr uint64_t TAG_MARK = 0xFFFFULL << TAG_BITS_SHIFT; + constexpr uint64_t TAG_SPECIAL = 0x02ULL; + constexpr uint64_t TAG_BOOLEAN = 0x04ULL; + constexpr uint64_t TAG_HEAP_OBJECT_MASK = TAG_MARK | TAG_SPECIAL | TAG_BOOLEAN; + + RefField<> field(MAddress(0)); + markingBarrier->WriteBarrier(nullptr, field, nullptr); + BaseObject *obj = reinterpret_cast(TAG_HEAP_OBJECT_MASK); + markingBarrier->WriteBarrier(obj, field, obj); + EXPECT_TRUE(obj != nullptr); +#endif +} + +HWTEST_F_L0(MarkingBarrierTest, WriteBarrier_TEST2) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + +#ifdef ARK_USE_SATB_BARRIER + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField normalField(obj); + markingBarrier->WriteBarrier(obj, normalField, obj); + EXPECT_TRUE(obj != nullptr); + + BaseObject weakObj; + RefField weakField(MAddress(0)); + markingBarrier->WriteBarrier(&weakObj, weakField, &weakObj); + EXPECT_TRUE(weakObj != nullptr); + + BaseObject nonTaggedObj; + RefField nonTaggedField(&nonTaggedObj); + markingBarrier->WriteBarrier(nullptr, nonTaggedField, &nonTaggedObj); + EXPECT_TRUE(nonTaggedObj != nullptr); +#endif +} + +HWTEST_F_L0(MarkingBarrierTest, WriteBarrier_TEST3) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + +#ifndef ARK_USE_SATB_BARRIER + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField<> field(obj); + Heap::GetHeap().SetGCReason(GC_REASON_YOUNG); + markingBarrier->WriteBarrier(obj, field, obj); + EXPECT_TRUE(obj != nullptr); +#endif +} + +HWTEST_F_L0(MarkingBarrierTest, WriteStruct_TEST1) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + markingBarrier->WriteStruct(obj, dst, sizeof(BaseObject), src, sizeof(BaseObject)); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} + +HWTEST_F_L0(MarkingBarrierTest, WriteStruct_TEST2) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + + auto mutator = ThreadLocal::GetMutator(); + ThreadLocal::SetMutator(nullptr); + markingBarrier->WriteStruct(obj, dst, sizeof(BaseObject), src, sizeof(BaseObject)); + ThreadLocal::SetMutator(mutator); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} + +HWTEST_F_L0(MarkingBarrierTest, AtomicReadRefField_TEST1) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + constexpr size_t size = 100; + obj->SetSizeForwarded(size); + EXPECT_EQ(obj->GetSizeForwarded(), size); + RefField field(obj); + + BaseObject *resultObj = nullptr; + resultObj = markingBarrier->AtomicReadRefField(obj, field, std::memory_order_seq_cst); + ASSERT_TRUE(resultObj != nullptr); +} + +HWTEST_F_L0(MarkingBarrierTest, AtomicWriteRefField_TEST1) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + markingBarrier->AtomicWriteRefField(oldObj, oldField, newObj, std::memory_order_relaxed); + EXPECT_EQ(oldField.GetFieldValue(), neWAddress); +} + +HWTEST_F_L0(MarkingBarrierTest, AtomicWriteRefField_TEST2) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + markingBarrier->AtomicWriteRefField(nullptr, oldField, newObj, std::memory_order_relaxed); + EXPECT_EQ(oldField.GetFieldValue(), neWAddress); +} + +HWTEST_F_L0(MarkingBarrierTest, AtomicSwapRefField_TEST1) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + BaseObject *resultObj = nullptr; + resultObj = markingBarrier->AtomicSwapRefField(oldObj, oldField, newObj, std::memory_order_relaxed); + EXPECT_EQ(oldField.GetFieldValue(), newField.GetFieldValue()); +} + +HWTEST_F_L0(MarkingBarrierTest, CompareAndSwapRefField_TEST1) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + bool result = markingBarrier->CompareAndSwapRefField( + oldObj, oldField, oldObj, newObj, std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_TRUE(result); +} + +HWTEST_F_L0(MarkingBarrierTest, CompareAndSwapRefField_TEST2) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + + bool result = markingBarrier->CompareAndSwapRefField( + oldObj, oldField, oldObj, oldObj, std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_TRUE(result); +} + +HWTEST_F_L0(MarkingBarrierTest, CompareAndSwapRefField_TEST3) +{ + MockCollector collector; + auto markingBarrier = std::make_unique(collector); + ASSERT_TRUE(markingBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + bool result = markingBarrier->CompareAndSwapRefField( + oldObj, newField, oldObj, newObj, std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_FALSE(result); +} +} // namespace common::test diff --git a/common_components/heap/ark_collector/tests/mock_barrier_collector.h b/common_components/heap/ark_collector/tests/mock_barrier_collector.h new file mode 100755 index 0000000000000000000000000000000000000000..de64b51aa5a545bf89df90972c5c432c398e386e --- /dev/null +++ b/common_components/heap/ark_collector/tests/mock_barrier_collector.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_MOCK_BARRIER_COLLECTOR_H +#define COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_MOCK_BARRIER_COLLECTOR_H + +#include "common_components/heap/collector/collector.h" + +namespace common { +class MockCollector : public Collector { +public: + void Init(const RuntimeParam& param) override {} + void RunGarbageCollection(uint64_t, GCReason, GCType) override {} + BaseObject* ForwardObject(BaseObject*) override + { + return nullptr; + } + bool ShouldIgnoreRequest(GCRequest&) override + { + return false; + } + bool IsFromObject(BaseObject*) const override + { + return false; + } + bool IsUnmovableFromObject(BaseObject*) const override + { + return false; + } + BaseObject* FindToVersion(BaseObject*) const override + { + return nullptr; + } + + bool TryUpdateRefField(BaseObject* obj, RefField<>& field, BaseObject*& toVersion) const override + { + toVersion = reinterpret_cast(field.GetFieldValue()); + return true; + } + + bool TryForwardRefField(BaseObject*, RefField<>&, BaseObject*&) const override + { + return false; + } + bool TryUntagRefField(BaseObject*, RefField<>&, BaseObject*&) const override + { + return false; + } + RefField<> GetAndTryTagRefField(BaseObject*) const override + { + return RefField<>(nullptr); + } + bool IsOldPointer(RefField<>&) const override + { + return false; + } + bool IsCurrentPointer(RefField<>&) const override + { + return false; + } + void AddRawPointerObject(BaseObject*) override {} + void RemoveRawPointerObject(BaseObject*) override {} +}; + +class MockCollectorForwardTest : public MockCollector { +public: + bool IsFromObject(BaseObject*) const override + { + return true; + } + bool TryForwardRefField(BaseObject*, RefField<>&, BaseObject*&) const override + { + static bool isForward = false; + if (!isForward) { + isForward = true; + return false; + } + + return true; + } +}; +} // namespace common + +#endif // COMMON_COMPONENTS_HEAP_ARK_COLLECTOR_MOCK_BARRIER_COLLECTOR_H \ No newline at end of file diff --git a/common_components/heap/ark_collector/tests/post_marking_barrier_test.cpp b/common_components/heap/ark_collector/tests/post_marking_barrier_test.cpp new file mode 100755 index 0000000000000000000000000000000000000000..140515f1ebbb3a792706cafcdfe88f95d08e89ed --- /dev/null +++ b/common_components/heap/ark_collector/tests/post_marking_barrier_test.cpp @@ -0,0 +1,415 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/heap/ark_collector/post_marking_barrier.h" +#include "common_components/heap/ark_collector/tests/mock_barrier_collector.h" +#include "common_components/mutator/mutator_manager.h" +#include "common_components/tests/test_helper.h" +#include "common_components/heap/heap_manager.h" +#include "common_interfaces/base_runtime.h" + +using namespace common; + +namespace common::test { +class PostMarkingBarrierTest : public ::testing::Test { +protected: + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() {} + + void SetUp() override + { + MutatorManager::Instance().CreateRuntimeMutator(ThreadType::ARK_PROCESSOR); + } + + void TearDown() override + { + MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::ARK_PROCESSOR); + } +}; + +HWTEST_F_L0(PostMarkingBarrierTest, ReadRefField_TEST1) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject *resultObj = postMarkingBarrier->ReadRefField(obj, field); + ASSERT_TRUE(resultObj != nullptr); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(PostMarkingBarrierTest, ReadRefField_TEST2) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject *resultObj = postMarkingBarrier->ReadRefField(nullptr, field); + ASSERT_TRUE(resultObj != nullptr); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(PostMarkingBarrierTest, ReadStaticRef_TEST1) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject *resultObj = postMarkingBarrier->ReadStaticRef(field); + ASSERT_TRUE(resultObj != nullptr); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(PostMarkingBarrierTest, ReadStringTableStaticRef_TEST1) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + RefField field(nullptr); + + BaseObject* resultObj = postMarkingBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj == nullptr); +} + +HWTEST_F_L0(PostMarkingBarrierTest, ReadStringTableStaticRef_TEST2) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RegionDesc *regionInfo = RegionDesc::GetRegionDescAt(addr); + regionInfo->SetRegionAllocPtr(addr - 1); + regionInfo->SetMarkingLine(); + RefField field(obj); + + BaseObject* resultObj = postMarkingBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj != nullptr); +} + +HWTEST_F_L0(PostMarkingBarrierTest, ReadStringTableStaticRef_TEST3) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RegionDesc *regionInfo = RegionDesc::GetRegionDescAt(addr); + regionInfo->SetRegionType(RegionDesc::RegionType::ALIVE_REGION_FIRST); + RefField field(obj); + + BaseObject* resultObj = postMarkingBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj == nullptr); +} + +HWTEST_F_L0(PostMarkingBarrierTest, ReadStringTableStaticRef_TEST4) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + Heap::GetHeap().SetGCReason(GC_REASON_YOUNG); + BaseObject* resultObj = postMarkingBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj != nullptr); +} + +HWTEST_F_L0(PostMarkingBarrierTest, WriteRefField_TEST1) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(addr); + RefField field(oldObj); + MAddress oldAddr = field.GetFieldValue(); + + HeapAddress newObjAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newObjAddr); + postMarkingBarrier->WriteRefField(oldObj, field, newObj); + MAddress newAddr = field.GetFieldValue(); + + EXPECT_NE(oldAddr, newAddr); + EXPECT_EQ(newAddr, reinterpret_cast(newObj)); +} + +HWTEST_F_L0(PostMarkingBarrierTest, WriteBarrier_TEST1) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField<> field(obj); + postMarkingBarrier->WriteBarrier(obj, field, obj); + EXPECT_TRUE(obj != nullptr); +} + +HWTEST_F_L0(PostMarkingBarrierTest, WriteBarrier_TEST2) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField<> field(obj); + Heap::GetHeap().SetGCReason(GC_REASON_YOUNG); + postMarkingBarrier->WriteBarrier(obj, field, obj); + EXPECT_TRUE(obj != nullptr); +} + +HWTEST_F_L0(PostMarkingBarrierTest, ReadStruct_TEST1) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + postMarkingBarrier->ReadStruct(dst, obj, src, sizeof(BaseObject)); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} + +HWTEST_F_L0(PostMarkingBarrierTest, WriteStruct_TEST1) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + postMarkingBarrier->WriteStruct(obj, dst, sizeof(BaseObject), src, sizeof(BaseObject)); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} + +HWTEST_F_L0(PostMarkingBarrierTest, AtomicReadRefField_TEST1) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject* result = postMarkingBarrier->AtomicReadRefField(obj, field, std::memory_order_seq_cst); + EXPECT_EQ(result, obj); +} + +HWTEST_F_L0(PostMarkingBarrierTest, AtomicWriteRefField_TEST1) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + + postMarkingBarrier->AtomicWriteRefField(oldObj, oldField, newObj, std::memory_order_relaxed); + + EXPECT_NE(oldField.GetFieldValue(), oldAddress); + EXPECT_EQ(oldField.GetFieldValue(), reinterpret_cast(newObj)); +} + +HWTEST_F_L0(PostMarkingBarrierTest, AtomicWriteRefField_TEST2) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + + postMarkingBarrier->AtomicWriteRefField(nullptr, oldField, newObj, std::memory_order_relaxed); + + EXPECT_NE(oldField.GetFieldValue(), oldAddress); + EXPECT_EQ(oldField.GetFieldValue(), reinterpret_cast(newObj)); +} + +HWTEST_F_L0(PostMarkingBarrierTest, AtomicSwapRefField_TEST1) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + RefField field(oldObj); + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + BaseObject* result = postMarkingBarrier->AtomicSwapRefField( + oldObj, field, newObj, std::memory_order_relaxed); + EXPECT_EQ(result, oldObj); + EXPECT_EQ(field.GetFieldValue(), reinterpret_cast(newObj)); +} + +HWTEST_F_L0(PostMarkingBarrierTest, CompareAndSwapRefField_TEST1) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + bool result = postMarkingBarrier->CompareAndSwapRefField( + oldObj, oldField, oldObj, newObj, std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_TRUE(result); + EXPECT_EQ(oldField.GetFieldValue(), neWAddress); +} + +HWTEST_F_L0(PostMarkingBarrierTest, CompareAndSwapRefField_TEST2) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + + MAddress initialAddress = oldField.GetFieldValue(); + + bool result = postMarkingBarrier->CompareAndSwapRefField( + oldObj, oldField, oldObj, oldObj, std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_TRUE(result); + EXPECT_EQ(oldField.GetFieldValue(), initialAddress); +} + +HWTEST_F_L0(PostMarkingBarrierTest, CompareAndSwapRefField_TEST3) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + bool result = postMarkingBarrier->CompareAndSwapRefField(oldObj, newField, oldObj, newObj, + std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_FALSE(result); +} + +HWTEST_F_L0(PostMarkingBarrierTest, CopyStructArray_TEST1) +{ + MockCollector collector; + auto postMarkingBarrier = std::make_unique(collector); + ASSERT_TRUE(postMarkingBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + postMarkingBarrier->CopyStructArray(obj, dst, sizeof(BaseObject), obj, src, sizeof(BaseObject)); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} +} // namespace common::test diff --git a/common_components/heap/ark_collector/tests/preforward_barrier_test.cpp b/common_components/heap/ark_collector/tests/preforward_barrier_test.cpp new file mode 100755 index 0000000000000000000000000000000000000000..11851db59c9f7f4dbacc0a40ecd409b64b4f076b --- /dev/null +++ b/common_components/heap/ark_collector/tests/preforward_barrier_test.cpp @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/heap/ark_collector/preforward_barrier.h" +#include "common_components/heap/ark_collector/tests/mock_barrier_collector.h" +#include "common_components/mutator/mutator_manager.h" +#include "common_components/tests/test_helper.h" +#include "common_components/heap/heap_manager.h" +#include "common_interfaces/base_runtime.h" + +using namespace common; + +namespace common::test { +class PreforwardBarrierTest : public BaseTestWithScope { +protected: + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() {} + + void SetUp() override + { + MutatorManager::Instance().CreateRuntimeMutator(ThreadType::ARK_PROCESSOR); + } + + void TearDown() override + { + MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::ARK_PROCESSOR); + } +}; + +HWTEST_F_L0(PreforwardBarrierTest, ReadRefField_TEST1) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject* resultObj = preforwardBarrier->ReadRefField(obj, field); + ASSERT_TRUE(resultObj != nullptr); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(PreforwardBarrierTest, ReadRefField_TEST2) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject* resultObj = preforwardBarrier->ReadRefField(nullptr, field); + ASSERT_TRUE(resultObj != nullptr); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(PreforwardBarrierTest, ReadRefField_TEST3) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj, true); + + BaseObject* resultObj = preforwardBarrier->ReadRefField(obj, field); + ASSERT_TRUE(resultObj != nullptr); + constexpr uint64_t TAG_WEAK = 0x01ULL; + BaseObject* newObj = reinterpret_cast(reinterpret_cast(obj) | TAG_WEAK); + EXPECT_EQ(resultObj, newObj); +} + +HWTEST_F_L0(PreforwardBarrierTest, ReadRefField_TEST4) +{ + MockCollectorForwardTest collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(nullptr); + + BaseObject* resultObj = preforwardBarrier->ReadRefField(obj, field); + ASSERT_TRUE(resultObj == nullptr); +} + +HWTEST_F_L0(PreforwardBarrierTest, ReadRefField_TEST5) +{ + MockCollectorForwardTest collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj, true); + + BaseObject* resultObj = preforwardBarrier->ReadRefField(obj, field); + ASSERT_TRUE(resultObj != nullptr); +} + +HWTEST_F_L0(PreforwardBarrierTest, ReadStaticRef_TEST1) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject* resultObj = preforwardBarrier->ReadStaticRef(field); + ASSERT_TRUE(resultObj != nullptr); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(PreforwardBarrierTest, ReadStringTableStaticRef_TEST1) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + RefField field(nullptr); + + BaseObject* resultObj = preforwardBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj == nullptr); +} + +HWTEST_F_L0(PreforwardBarrierTest, ReadStringTableStaticRef_TEST2) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RegionDesc *regionInfo = RegionDesc::GetRegionDescAt(addr); + regionInfo->SetRegionAllocPtr(addr - 1); + regionInfo->SetMarkingLine(); + RefField field(obj); + + BaseObject* resultObj = preforwardBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj != nullptr); +} + +HWTEST_F_L0(PreforwardBarrierTest, ReadStringTableStaticRef_TEST3) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RegionDesc *regionInfo = RegionDesc::GetRegionDescAt(addr); + regionInfo->SetRegionType(RegionDesc::RegionType::ALIVE_REGION_FIRST); + RefField field(obj); + + BaseObject* resultObj = preforwardBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj == nullptr); +} + +HWTEST_F_L0(PreforwardBarrierTest, ReadStringTableStaticRef_TEST4) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + Heap::GetHeap().SetGCReason(GC_REASON_YOUNG); + BaseObject* resultObj = preforwardBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj != nullptr); +} + +HWTEST_F_L0(PreforwardBarrierTest, ReadStruct_TEST1) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + preforwardBarrier->ReadStruct(dst, obj, src, sizeof(BaseObject)); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} + +HWTEST_F_L0(PreforwardBarrierTest, ReadStruct_TEST2) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + preforwardBarrier->ReadStruct(dst, nullptr, src, sizeof(BaseObject)); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} + +HWTEST_F_L0(PreforwardBarrierTest, AtomicReadRefField_TEST1) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + constexpr size_t size = 100; + obj->SetSizeForwarded(size); + EXPECT_EQ(obj->GetSizeForwarded(), size); + RefField field(obj); + + BaseObject* resultObj = nullptr; + resultObj = preforwardBarrier->AtomicReadRefField(obj, field, std::memory_order_seq_cst); + ASSERT_TRUE(resultObj != nullptr); +} + +HWTEST_F_L0(PreforwardBarrierTest, AtomicWriteRefField_TEST1) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + preforwardBarrier->AtomicWriteRefField(oldObj, oldField, newObj, std::memory_order_relaxed); + EXPECT_EQ(oldField.GetFieldValue(), neWAddress); +} + +HWTEST_F_L0(PreforwardBarrierTest, AtomicWriteRefField_TEST2) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + preforwardBarrier->AtomicWriteRefField(nullptr, oldField, newObj, std::memory_order_relaxed); + EXPECT_EQ(oldField.GetFieldValue(), neWAddress); +} + +HWTEST_F_L0(PreforwardBarrierTest, CompareAndSwapRefField_TEST1) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + bool result = preforwardBarrier->CompareAndSwapRefField(oldObj, oldField, oldObj, newObj, + std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_TRUE(result); +} + +HWTEST_F_L0(PreforwardBarrierTest, CompareAndSwapRefField_TEST2) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + + bool result = preforwardBarrier->CompareAndSwapRefField(oldObj, oldField, oldObj, oldObj, + std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_TRUE(result); +} + +HWTEST_F_L0(PreforwardBarrierTest, CompareAndSwapRefField_TEST3) +{ + MockCollector collector; + auto preforwardBarrier = std::make_unique(collector); + ASSERT_TRUE(preforwardBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + bool result = preforwardBarrier->CompareAndSwapRefField(oldObj, newField, oldObj, newObj, + std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_FALSE(result); +} +} // namespace common::test diff --git a/common_components/heap/ark_collector/tests/remark_barrier_test.cpp b/common_components/heap/ark_collector/tests/remark_barrier_test.cpp new file mode 100755 index 0000000000000000000000000000000000000000..0aaea4d50cb0caf9a4ee88ef538f6795ca9603cf --- /dev/null +++ b/common_components/heap/ark_collector/tests/remark_barrier_test.cpp @@ -0,0 +1,502 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/heap/ark_collector/remark_barrier.h" +#include "common_components/heap/ark_collector/tests/mock_barrier_collector.h" +#include "common_components/mutator/mutator_manager.h" +#include "common_components/tests/test_helper.h" +#include "common_components/heap/heap_manager.h" +#include "common_interfaces/base_runtime.h" + +using namespace common; + +namespace common::test { +class RemarkBarrierTest : public BaseTestWithScope { +protected: + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() {} + + void SetUp() override + { + MutatorManager::Instance().CreateRuntimeMutator(ThreadType::ARK_PROCESSOR); + } + + void TearDown() override + { + MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::ARK_PROCESSOR); + } +}; + +HWTEST_F_L0(RemarkBarrierTest, ReadRefField_TEST1) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject *resultObj = remarkBarrier->ReadRefField(obj, field); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(RemarkBarrierTest, ReadRefField_TEST2) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject *resultObj = remarkBarrier->ReadRefField(nullptr, field); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(RemarkBarrierTest, ReadStaticRef_TEST1) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + BaseObject *resultObj = remarkBarrier->ReadStaticRef(field); + EXPECT_EQ(resultObj, obj); +} + +HWTEST_F_L0(RemarkBarrierTest, ReadStringTableStaticRef_TEST1) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + RefField field(nullptr); + + BaseObject* resultObj = remarkBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj == nullptr); +} + +HWTEST_F_L0(RemarkBarrierTest, ReadStringTableStaticRef_TEST2) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RegionDesc *regionInfo = RegionDesc::GetRegionDescAt(addr); + regionInfo->SetRegionAllocPtr(addr - 1); + regionInfo->SetMarkingLine(); + RefField field(obj); + + BaseObject* resultObj = remarkBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj != nullptr); +} + +HWTEST_F_L0(RemarkBarrierTest, ReadStringTableStaticRef_TEST3) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RegionDesc *regionInfo = RegionDesc::GetRegionDescAt(addr); + regionInfo->SetRegionType(RegionDesc::RegionType::ALIVE_REGION_FIRST); + RefField field(obj); + + BaseObject* resultObj = remarkBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj == nullptr); +} + +HWTEST_F_L0(RemarkBarrierTest, ReadStringTableStaticRef_TEST4) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField field(obj); + + Heap::GetHeap().SetGCReason(GC_REASON_YOUNG); + BaseObject* resultObj = remarkBarrier->ReadStringTableStaticRef(field); + ASSERT_TRUE(resultObj != nullptr); +} + +HWTEST_F_L0(RemarkBarrierTest, ReadStruct_TEST1) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + remarkBarrier->ReadStruct(dst, obj, src, sizeof(BaseObject)); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} + +HWTEST_F_L0(RemarkBarrierTest, WriteRefField_TEST1) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + + RefField field(oldObj); + BaseObject *target = field.GetTargetObject(); + EXPECT_TRUE(target != nullptr); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + + remarkBarrier->WriteRefField(oldObj, field, newObj); + + MAddress newAddress = field.GetFieldValue(); + MAddress expectedAddress = RefField<>(newObj).GetFieldValue(); + EXPECT_EQ(newAddress, expectedAddress); +} + +HWTEST_F_L0(RemarkBarrierTest, WriteRefField_TEST2) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + + RefField field(MAddress(0)); + BaseObject *target = field.GetTargetObject(); + EXPECT_TRUE(target == nullptr); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + + remarkBarrier->WriteRefField(oldObj, field, newObj); + + MAddress newAddress = field.GetFieldValue(); + MAddress expectedAddress = RefField<>(newObj).GetFieldValue(); + EXPECT_EQ(newAddress, expectedAddress); +} + +HWTEST_F_L0(RemarkBarrierTest, WriteBarrier_TEST1) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + +#ifndef ARK_USE_SATB_BARRIER + constexpr uint64_t TAG_BITS_SHIFT = 48; + constexpr uint64_t TAG_MARK = 0xFFFFULL << TAG_BITS_SHIFT; + constexpr uint64_t TAG_SPECIAL = 0x02ULL; + constexpr uint64_t TAG_BOOLEAN = 0x04ULL; + constexpr uint64_t TAG_HEAP_OBJECT_MASK = TAG_MARK | TAG_SPECIAL | TAG_BOOLEAN; + + RefField<> field(MAddress(0)); + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + addr |= TAG_HEAP_OBJECT_MASK; + BaseObject* obj = reinterpret_cast(addr); + remarkBarrier->WriteBarrier(obj, field, obj); + EXPECT_TRUE(obj != nullptr); +#endif +} + +HWTEST_F_L0(RemarkBarrierTest, WriteBarrier_TEST2) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + +#ifdef ARK_USE_SATB_BARRIER + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField normalField(obj); + remarkBarrier->WriteBarrier(obj, normalField, obj); + EXPECT_TRUE(obj != nullptr); + + BaseObject weakObj; + RefField weakField(MAddress(0)); + remarkBarrier->WriteBarrier(&weakObj, weakField, &weakObj); + EXPECT_TRUE(weakObj != nullptr); + + BaseObject nonTaggedObj; + RefField nonTaggedField(&nonTaggedObj); + remarkBarrier->WriteBarrier(nullptr, nonTaggedField, &nonTaggedObj); + EXPECT_TRUE(nonTaggedObj != nullptr); +#endif +} + +HWTEST_F_L0(RemarkBarrierTest, WriteBarrier_TEST3) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + +#ifndef ARK_USE_SATB_BARRIER + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + RefField<> field(obj); + remarkBarrier->WriteBarrier(obj, field, obj); + EXPECT_TRUE(obj != nullptr); +#endif +} + +HWTEST_F_L0(RemarkBarrierTest, WriteStruct_TEST1) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + remarkBarrier->WriteStruct(obj, dst, sizeof(BaseObject), src, sizeof(BaseObject)); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} + +HWTEST_F_L0(RemarkBarrierTest, WriteStruct_TEST2) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + HeapAddress src = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* srcObj = reinterpret_cast(src); + srcObj->SetForwardState(BaseStateWord::ForwardState::FORWARDING); + HeapAddress dst = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* dstObj = reinterpret_cast(dst); + dstObj->SetForwardState(BaseStateWord::ForwardState::FORWARDED); + EXPECT_NE(dstObj->IsForwarding(), srcObj->IsForwarding()); + + auto mutator = ThreadLocal::GetMutator(); + ThreadLocal::SetMutator(nullptr); + remarkBarrier->WriteStruct(obj, dst, sizeof(BaseObject), src, sizeof(BaseObject)); + ThreadLocal::SetMutator(mutator); + EXPECT_EQ(dstObj->IsForwarding(), srcObj->IsForwarding()); +} + +HWTEST_F_L0(RemarkBarrierTest, AtomicReadRefField_TEST1) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* obj = reinterpret_cast(addr); + constexpr size_t size = 100; + obj->SetSizeForwarded(size); + EXPECT_EQ(obj->GetSizeForwarded(), size); + RefField field(obj); + + BaseObject *resultObj = nullptr; + resultObj = remarkBarrier->AtomicReadRefField(obj, field, std::memory_order_seq_cst); + ASSERT_TRUE(resultObj != nullptr); +} + +HWTEST_F_L0(RemarkBarrierTest, AtomicWriteRefField_TEST1) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + remarkBarrier->AtomicWriteRefField(oldObj, oldField, newObj, std::memory_order_relaxed); + EXPECT_EQ(oldField.GetFieldValue(), neWAddress); +} + +HWTEST_F_L0(RemarkBarrierTest, AtomicWriteRefField_TEST2) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + remarkBarrier->AtomicWriteRefField(nullptr, oldField, newObj, std::memory_order_relaxed); + EXPECT_EQ(oldField.GetFieldValue(), neWAddress); +} + +HWTEST_F_L0(RemarkBarrierTest, AtomicSwapRefField_TEST1) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + BaseObject *resultObj = nullptr; + resultObj = remarkBarrier->AtomicSwapRefField(oldObj, oldField, newObj, std::memory_order_relaxed); + EXPECT_EQ(oldField.GetFieldValue(), newField.GetFieldValue()); +} + +HWTEST_F_L0(RemarkBarrierTest, CompareAndSwapRefField_TEST1) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + bool result = remarkBarrier->CompareAndSwapRefField( + oldObj, oldField, oldObj, newObj, std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_TRUE(result); +} + +HWTEST_F_L0(RemarkBarrierTest, CompareAndSwapRefField_TEST2) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + + bool result = remarkBarrier->CompareAndSwapRefField( + oldObj, oldField, oldObj, oldObj, std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_TRUE(result); +} + +HWTEST_F_L0(RemarkBarrierTest, CompareAndSwapRefField_TEST3) +{ + MockCollector collector; + auto remarkBarrier = std::make_unique(collector); + ASSERT_TRUE(remarkBarrier != nullptr); + + HeapAddress oldAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* oldObj = reinterpret_cast(oldAddr); + constexpr size_t oldSize = 100; + oldObj->SetSizeForwarded(oldSize); + EXPECT_EQ(oldObj->GetSizeForwarded(), oldSize); + RefField oldField(oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + + HeapAddress newAddr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject* newObj = reinterpret_cast(newAddr); + constexpr size_t newSize = 200; + newObj->SetSizeForwarded(newSize); + EXPECT_EQ(newObj->GetSizeForwarded(), newSize); + RefField newField(newObj); + MAddress neWAddress = newField.GetFieldValue(); + EXPECT_NE(oldAddress, neWAddress); + + bool result = remarkBarrier->CompareAndSwapRefField( + oldObj, newField, oldObj, newObj, std::memory_order_seq_cst, std::memory_order_seq_cst); + ASSERT_FALSE(result); +} +} // namespace common::test diff --git a/common_components/heap/barrier/barrier.cpp b/common_components/heap/barrier/barrier.cpp index 2d7d02983684b3ceeecc630ac6d8fdf79c288359..64dbf856385da26d254985eadace264246b401f9 100755 --- a/common_components/heap/barrier/barrier.cpp +++ b/common_components/heap/barrier/barrier.cpp @@ -36,6 +36,11 @@ void Barrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObject* DLOG(BARRIER, "write obj %p ref-field@%p: %p => %p", obj, &field, field.GetTargetObject(), ref); } +void Barrier::WriteRoot(BaseObject *obj) const +{ + DLOG(BARRIER, "write root obj %p", obj); +} + void Barrier::WriteStruct(BaseObject* obj, HeapAddress dst, size_t dstLen, HeapAddress src, size_t srcLen) const { LOGF_CHECK(memcpy_s(reinterpret_cast(dst), dstLen, reinterpret_cast(src), srcLen) == EOK) << diff --git a/common_components/heap/barrier/barrier.h b/common_components/heap/barrier/barrier.h index 84e78ecc557a162160dc5ebb7d3251a0be43b3b3..db753a32cf248828b0fa95d7396bf33a85b2a6b0 100755 --- a/common_components/heap/barrier/barrier.h +++ b/common_components/heap/barrier/barrier.h @@ -35,6 +35,7 @@ public: virtual void ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress src, size_t size) const; + virtual void WriteRoot(BaseObject* obj) const; virtual void WriteRefField(BaseObject* obj, RefField& field, BaseObject* ref) const; virtual void WriteBarrier(BaseObject* obj, RefField& field, BaseObject* ref) const; diff --git a/common_components/heap/barrier/tests/barrier_test.cpp b/common_components/heap/barrier/tests/barrier_test.cpp index d78a62b39bed28db45bf1ebd3b6dc962bd630ef7..f5f361144535d0724322d91bcb465f7180c6dc9f 100644 --- a/common_components/heap/barrier/tests/barrier_test.cpp +++ b/common_components/heap/barrier/tests/barrier_test.cpp @@ -41,7 +41,7 @@ public: class MockCollector : public Collector { public: void Init(const RuntimeParam& param) override {} - void RunGarbageCollection(uint64_t, GCReason) override {} + void RunGarbageCollection(uint64_t, GCReason, GCType) override {} BaseObject* ForwardObject(BaseObject*) override { return nullptr; @@ -106,7 +106,6 @@ protected: } }; - HWTEST_F_L0(BarrierTest, ReadRefField_ReturnsExpectedValue) { uint64_t value = reinterpret_cast(dummyObj.get()); RefField field(value); @@ -115,7 +114,6 @@ HWTEST_F_L0(BarrierTest, ReadRefField_ReturnsExpectedValue) { EXPECT_EQ(result, dummyObj.get()); } - HWTEST_F_L0(BarrierTest, WriteRefField_SetsTargetObject) { uint64_t initValue = 0; RefField field(initValue); @@ -125,7 +123,6 @@ HWTEST_F_L0(BarrierTest, WriteRefField_SetsTargetObject) { EXPECT_EQ(field.GetTargetObject(), newRef); } - HWTEST_F_L0(BarrierTest, WriteStaticRef_SetsTargetObject) { uint64_t initValue = 0; RefField field(initValue); @@ -135,7 +132,6 @@ HWTEST_F_L0(BarrierTest, WriteStaticRef_SetsTargetObject) { EXPECT_EQ(field.GetTargetObject(), newRef); } - HWTEST_F_L0(BarrierTest, AtomicWriteRefField_UpdatesWithMemoryOrder) { uint64_t initValue = 0; RefField field(initValue); @@ -145,7 +141,6 @@ HWTEST_F_L0(BarrierTest, AtomicWriteRefField_UpdatesWithMemoryOrder) { EXPECT_EQ(field.GetTargetObject(std::memory_order_relaxed), newRef); } - HWTEST_F_L0(BarrierTest, CompareAndSwapRefField_WorksWithSuccessAndFailure) { uint64_t initValue = 0; RefField field(initValue); @@ -156,4 +151,117 @@ HWTEST_F_L0(BarrierTest, CompareAndSwapRefField_WorksWithSuccessAndFailure) { std::memory_order_seq_cst, std::memory_order_relaxed); EXPECT_TRUE(result); EXPECT_EQ(field.GetTargetObject(std::memory_order_relaxed), newRef); +} + +HWTEST_F_L0(BarrierTest, WriteStruct_HandlesDifferentLengths) { + size_t srcBufferSize = 512; + size_t dstBufferSize = 1024; + char* srcBuffer = new char[srcBufferSize]; + char* dstBuffer = new char[dstBufferSize]; + + for (size_t i = 0; i < srcBufferSize; ++i) { + srcBuffer[i] = static_cast(i % 256); + } + + barrier.WriteStruct(nullptr, reinterpret_cast(dstBuffer), dstBufferSize, + reinterpret_cast(srcBuffer), srcBufferSize); + + EXPECT_EQ(memcmp(dstBuffer, srcBuffer, srcBufferSize), 0); + + for (size_t i = srcBufferSize; i < dstBufferSize; ++i) { + EXPECT_EQ(dstBuffer[i], 0); + } + + delete[] srcBuffer; + delete[] dstBuffer; +} + +HWTEST_F_L0(BarrierTest, ReadStaticRef_ReturnsExpectedValue) { + uint64_t value = reinterpret_cast(dummyObj.get()); + RefField field(value); + + BaseObject* result = barrier.ReadStaticRef(field); + EXPECT_EQ(result, dummyObj.get()); +} + +HWTEST_F_L0(BarrierTest, AtomicSwapRefField_ExchangesCorrectly) { + uint64_t initValue = reinterpret_cast(dummyObj.get()); + RefField field(initValue); + BaseObject* newRef = reinterpret_cast(0x1234567890); + BaseObject* oldValue = barrier.AtomicSwapRefField(nullptr, field, newRef, std::memory_order_seq_cst); + + EXPECT_EQ(oldValue, dummyObj.get()); + EXPECT_EQ(field.GetTargetObject(std::memory_order_relaxed), newRef); +} + +HWTEST_F_L0(BarrierTest, AtomicReadRefField_ReadsCorrectly) { + uint64_t initValue = reinterpret_cast(dummyObj.get()); + RefField field(initValue); + + BaseObject* value = barrier.AtomicReadRefField(nullptr, field, std::memory_order_seq_cst); + + EXPECT_EQ(value, dummyObj.get()); +} + +HWTEST_F_L0(BarrierTest, CopyStructArray_CopiesDataCorrectly) { + constexpr size_t arraySize = 100; + char* srcBuffer = new char[arraySize]; + char* dstBuffer = new char[arraySize]; + + for (size_t i = 0; i < arraySize; ++i) { + srcBuffer[i] = static_cast(i % 256); + } + + BaseObject srcObj; + BaseObject dstObj; + + HeapAddress srcFieldAddr = reinterpret_cast(srcBuffer); + HeapAddress dstFieldAddr = reinterpret_cast(dstBuffer); + + barrier.CopyStructArray(&dstObj, dstFieldAddr, arraySize, &srcObj, srcFieldAddr, arraySize); + + EXPECT_EQ(memcmp(dstBuffer, srcBuffer, arraySize), 0); + + delete[] srcBuffer; + delete[] dstBuffer; +} + +HWTEST_F_L0(BarrierTest, ReadStruct_ReadsCorrectly) { + struct TestStruct { + int a; + double b; + }; + + TestStruct* initValue = new TestStruct{42, 3.14}; + RefField field(reinterpret_cast(initValue)); + + char dstBuffer[sizeof(TestStruct)]; + HeapAddress dstAddr = reinterpret_cast(dstBuffer); + + BaseObject dummyObj; + HeapAddress srcAddr = reinterpret_cast(field.GetTargetObject()); + + barrier.ReadStruct(dstAddr, &dummyObj, srcAddr, sizeof(TestStruct)); + + TestStruct* result = reinterpret_cast(dstBuffer); + EXPECT_EQ(result->a, initValue->a); + EXPECT_EQ(result->b, initValue->b); + + delete initValue; +} + +HWTEST_F_L0(BarrierTest, AtomicWriteStaticRef_NonConcurrent) +{ + DummyObject* targetObj = new DummyObject(); + DummyObject* initialObj = new DummyObject(); + + RefField field(reinterpret_cast(0x1)); + field.SetTargetObject(initialObj); + + MockCollector collector; + Barrier barrier(collector); + + barrier.AtomicWriteRefField(nullptr, field, targetObj, std::memory_order_relaxed); + + EXPECT_EQ(field.GetTargetObject(std::memory_order_relaxed), targetObj); } \ No newline at end of file diff --git a/common_components/heap/collector/collector.cpp b/common_components/heap/collector/collector.cpp index bc0652ebd281032a217be10167dbd5abdfd71dfd..fd6ee9f6c452414cefaa6e26080bd3f612bcbe83 100755 --- a/common_components/heap/collector/collector.cpp +++ b/common_components/heap/collector/collector.cpp @@ -36,10 +36,10 @@ const char* Collector::GetGCPhaseName(GCPhase phase) "stub phase", // reserved "start phase", // GC_PHASE_START "enum phase", // GC_PHASE_ENUM - "trace phase", // GC_PHASE_MARK + "marking phase", // GC_PHASE_MARK "remark-satb phase", // GC_PHASE_REMARK_SATB "final-mark phase", // GC_PHASE_FINAL_MARK - "post-trace phase", // GC_PHASE_POST_MARK + "post-marking phase", // GC_PHASE_POST_MARK "pre-copy phase", // GC_PHASE_PRECOPY "copy phase", // GC_PHASE_COPY "fix phase", // GC_PHASE_FIX @@ -51,9 +51,9 @@ Collector::Collector() {} const char* Collector::GetCollectorName() const { return COLLECTOR_NAME[collectorType_]; } -void Collector::RequestGC(GCReason reason, bool async) +void Collector::RequestGC(GCReason reason, bool async, GCType gcType) { - RequestGCInternal(reason, async); + RequestGCInternal(reason, async, gcType); return; } } // namespace common. diff --git a/common_components/heap/collector/collector.h b/common_components/heap/collector/collector.h index 519c3d10e09c5f5c1930b9a259ce168fb7ec9ec3..fe65398193a5b0ac6cfbb3aab5ecf5f4cba4cafb 100755 --- a/common_components/heap/collector/collector.h +++ b/common_components/heap/collector/collector.h @@ -55,7 +55,7 @@ public: // reason: Reason for GC. // async: Trigger from unsafe context, e.g., holding a lock, in the middle of an allocation. // In order to prevent deadlocks, async trigger only add one async gc task and will not block. - void RequestGC(GCReason reason, bool async); + void RequestGC(GCReason reason, bool async, GCType gcType); virtual GCPhase GetGCPhase() const { return gcPhase_.load(std::memory_order_acquire); } @@ -63,7 +63,7 @@ public: virtual void FixObjectRefFields(BaseObject*) const {} - virtual void RunGarbageCollection(uint64_t, GCReason) = 0; + virtual void RunGarbageCollection(uint64_t, GCReason, GCType) = 0; virtual GCStats& GetGCStats() { @@ -100,7 +100,7 @@ public: }; protected: - virtual void RequestGCInternal(GCReason, bool) + virtual void RequestGCInternal(GCReason, bool, GCType) { LOG_COMMON(FATAL) << "Unresolved fatal"; UNREACHABLE_CC(); diff --git a/common_components/heap/collector/collector_proxy.cpp b/common_components/heap/collector/collector_proxy.cpp index 27476aab7c036cd41cbe36549d65d3b6624f578b..89720ead1f635f6d6f66938740de02deeb77156c 100755 --- a/common_components/heap/collector/collector_proxy.cpp +++ b/common_components/heap/collector/collector_proxy.cpp @@ -17,32 +17,32 @@ namespace common { void CollectorProxy::Init(const RuntimeParam& param) { - wCollector_.Init(param); + arkCollector_.Init(param); if (currentCollector_ == nullptr) { - currentCollector_ = &wCollector_; + currentCollector_ = &arkCollector_; } } -void CollectorProxy::Fini() { wCollector_.Fini(); } +void CollectorProxy::Fini() { arkCollector_.Fini(); } -void CollectorProxy::RunGarbageCollection(uint64_t gcIndex, GCReason reason) +void CollectorProxy::RunGarbageCollection(uint64_t gcIndex, GCReason reason, GCType gcType) { switch (reason) { case GC_REASON_HEU: case GC_REASON_YOUNG: case GC_REASON_BACKUP: - currentCollector_ = &wCollector_; + currentCollector_ = &arkCollector_; break; case GC_REASON_OOM: case GC_REASON_FORCE: - currentCollector_ = &wCollector_; + currentCollector_ = &arkCollector_; break; default: - currentCollector_ = &wCollector_; + currentCollector_ = &arkCollector_; break; } - currentCollector_->SetGcStarted(true); - currentCollector_->RunGarbageCollection(gcIndex, reason); + currentCollector_->MarkGCStart(); + currentCollector_->RunGarbageCollection(gcIndex, reason, gcType); } } // namespace common diff --git a/common_components/heap/collector/collector_proxy.h b/common_components/heap/collector/collector_proxy.h index 317bca8bd9aea84fb036b6bfa8851ff1a92c60bc..cab99bbf034e617b5e8c6bc2dc66e2d0aa305e5c 100755 --- a/common_components/heap/collector/collector_proxy.h +++ b/common_components/heap/collector/collector_proxy.h @@ -16,9 +16,9 @@ #ifndef COMMON_COMPONENTS_HEAP_COLLECTOR_COLLECTOR_PROXY_H #define COMMON_COMPONENTS_HEAP_COLLECTOR_COLLECTOR_PROXY_H +#include "common_components/heap/ark_collector/ark_collector.h" #include "common_components/heap/collector/collector.h" #include "common_components/heap/collector/collector_resources.h" -#include "common_components/heap/w_collector/w_collector.h" namespace common { // CollectorProxy is a special kind of collector, it is derived from Base class Collector, thus behaves like a real @@ -28,7 +28,7 @@ namespace common { class CollectorProxy : public Collector { public: explicit CollectorProxy(Allocator& allocator, CollectorResources& resources) - : wCollector_(allocator, resources) + : arkCollector_(allocator, resources) { collectorType_ = CollectorType::PROXY_COLLECTOR; } @@ -43,11 +43,11 @@ public: void SetGCPhase(const GCPhase phase) override { currentCollector_->SetGCPhase(phase); } // dispatch garbage collection to the right collector - PUBLIC_API void RunGarbageCollection(uint64_t gcIndex, GCReason reason) override; + PUBLIC_API void RunGarbageCollection(uint64_t gcIndex, GCReason reason, GCType gcType) override; bool ShouldIgnoreRequest(GCRequest& request) override { return currentCollector_->ShouldIgnoreRequest(request); } - TraceCollector& GetCurrentCollector() const { return *currentCollector_; } + MarkingCollector& GetCurrentCollector() const { return *currentCollector_; } BaseObject* FindToVersion(BaseObject* obj) const override { return currentCollector_->FindToVersion(obj); } @@ -87,8 +87,8 @@ public: private: // supported collector set - TraceCollector* currentCollector_ = nullptr; - WCollector wCollector_; + ArkCollector* currentCollector_ = nullptr; + ArkCollector arkCollector_; }; } // namespace common diff --git a/common_components/heap/collector/collector_resources.cpp b/common_components/heap/collector/collector_resources.cpp index 34adfa649bf95c506d3cd963dbc36b057111f766..e73d163b718c2d0f7dd845231f0c815172cf0a91 100755 --- a/common_components/heap/collector/collector_resources.cpp +++ b/common_components/heap/collector/collector_resources.cpp @@ -146,21 +146,21 @@ void CollectorResources::PostIgnoredGcRequest(GCReason reason) } } -void CollectorResources::RequestAsyncGC(GCReason reason) +void CollectorResources::RequestAsyncGC(GCReason reason, GCType gcType) { // The gc request must be none blocked ASSERT_LOGF(!g_gcRequests[reason].IsSyncGC(), "trigger from unsafe context must be none blocked"); - GCRunner gcTask(GCTask::GCTaskType::GC_TASK_INVOKE_GC, reason); + GCRunner gcTask(GCTask::GCTaskType::GC_TASK_INVOKE_GC, reason, gcType); // we use async enqueue because this doesn't have locks, lowering the risk // of timeouts when entering safe region due to thread scheduling taskQueue_->EnqueueAsync(gcTask); } -void CollectorResources::RequestGCAndWait(GCReason reason) +void CollectorResources::RequestGCAndWait(GCReason reason, GCType gcType) { // Enter saferegion since current thread may blocked by locks. ScopedEnterSaferegion enterSaferegion(false); - GCRunner gcTask(GCTask::GCTaskType::GC_TASK_INVOKE_GC, reason); + GCRunner gcTask(GCTask::GCTaskType::GC_TASK_INVOKE_GC, reason, gcType); GCTaskQueue::GCTaskFilter filter = [](GCRunner& oldTask, GCRunner& newTask) { return oldTask.GetGCReason() == newTask.GetGCReason(); @@ -184,7 +184,7 @@ void CollectorResources::RequestGCAndWait(GCReason reason) gcFinishedCondVar_.wait(lock, pred); } -void CollectorResources::RequestGC(GCReason reason, bool async) +void CollectorResources::RequestGC(GCReason reason, bool async, GCType gcType) { if (!IsGCActive()) { return; @@ -201,9 +201,9 @@ void CollectorResources::RequestGC(GCReason reason, bool async) if (reason == GCReason::GC_REASON_NATIVE) { SetIsNativeGCInvoked(true); } - RequestAsyncGC(reason); + RequestAsyncGC(reason, gcType); } else { - RequestGCAndWait(reason); + RequestGCAndWait(reason, gcType); } } @@ -218,6 +218,25 @@ void CollectorResources::NotifyGCFinished(uint64_t gcIndex) BroadcastGCFinished(); } +void CollectorResources::MarkGCStart() +{ + std::unique_lock lock(gcFinishedCondMutex_); + + // Wait for any existing GC to finish - inline the wait logic + std::function pred = [this] { + return !IsGcStarted(); + }; + gcFinishedCondVar_.wait(lock, pred); + + // Now claim GC ownership + SetGcStarted(true); +} + +void CollectorResources::MarkGCFinish(uint64_t gcIndex) +{ + NotifyGCFinished(gcIndex); +} + void CollectorResources::WaitForGCFinish() { uint64_t startTime = TimeUtil::MicroSeconds(); diff --git a/common_components/heap/collector/collector_resources.h b/common_components/heap/collector/collector_resources.h index 12e4a5c28f89ea00e49a86a2099bfa698226c60f..ebfd7e582d33dd8fddc30844929a7cdca6fcec04 100755 --- a/common_components/heap/collector/collector_resources.h +++ b/common_components/heap/collector/collector_resources.h @@ -36,13 +36,10 @@ public: void Init(); void Fini(); void StopGCWork(); - void RequestGC(GCReason reason, bool async); + void RequestGC(GCReason reason, bool async, GCType gcType); void WaitForGCFinish(); // gc main loop void RunTaskLoop(); - // Notify that GC has finished. - // Must be called by gc thread only - void NotifyGCFinished(uint64_t gcIndex); uint32_t GetGCThreadCount(const bool isConcurrent) const; Taskpool *GetThreadPool() const { return gcThreadPool_; } @@ -71,15 +68,21 @@ public: void StartRuntimeThreads(); void StopRuntimeThreads(); + void MarkGCStart(); + void MarkGCFinish(uint64_t gcIndex = 0); + private: + // Notify that GC has finished. + void NotifyGCFinished(uint64_t gcIndex); + void StartGCThreads(); void TerminateGCTask(); void StopGCThreads(); // Notify the GC thread to start GC, and wait. // Called by mutator. // reason: The reason for this GC. - void RequestAsyncGC(GCReason reason); - void RequestGCAndWait(GCReason reason); + void RequestAsyncGC(GCReason reason, GCType gcType); + void RequestGCAndWait(GCReason reason, GCType gcType); void PostIgnoredGcRequest(GCReason reason); // the thread pool for parallel tracing. diff --git a/common_components/heap/collector/copy_data_manager.cpp b/common_components/heap/collector/copy_data_manager.cpp index 7d7ce4583486f2c8ef2edb4264294236f54d35fe..4aeec4197d115dc8095c1b2b18e64bd7ec169ec5 100755 --- a/common_components/heap/collector/copy_data_manager.cpp +++ b/common_components/heap/collector/copy_data_manager.cpp @@ -12,6 +12,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +#include "base/common.h" #ifdef _WIN64 #include #include @@ -31,25 +32,25 @@ HeapBitmapManager& HeapBitmapManager::GetHeapBitmapManager() { return *forwardDa void HeapBitmapManager::InitializeHeapBitmap() { + DCHECK_CC(!initialized); size_t maxHeapBytes = Heap::GetHeap().GetMaxCapacity(); size_t heapBitmapSize = RoundUp(GetHeapBitmapSize(maxHeapBytes), COMMON_PAGE_SIZE); allHeapBitmapSize_ = heapBitmapSize; #ifdef _WIN64 void* startAddress = VirtualAlloc(NULL, allHeapBitmapSize_, MEM_RESERVE, PAGE_READWRITE); - if (startAddress == NULL) { + if (startAddress == NULL) { //LCOV_EXCL_BR_LINE LOG_COMMON(FATAL) << "failed to initialize HeapBitmapManager"; UNREACHABLE_CC(); } #else void* startAddress = mmap(nullptr, allHeapBitmapSize_, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (startAddress == MAP_FAILED) { + if (startAddress == MAP_FAILED) { //LCOV_EXCL_BR_LINE LOG_COMMON(FATAL) << "failed to initialize HeapBitmapManager"; UNREACHABLE_CC(); } else { #ifndef __APPLE__ (void)madvise(startAddress, allHeapBitmapSize_, MADV_NOHUGEPAGE); - COMMON_PRCTL(startAddress, allHeapBitmapSize_, "forward_data"); #endif } #endif @@ -58,5 +59,21 @@ void HeapBitmapManager::InitializeHeapBitmap() heapBitmap_[0].InitializeMemory(heapBitmapStart_, heapBitmapSize, regionUnitCount_); os::PrctlSetVMA(startAddress, allHeapBitmapSize_, "ArkTS Heap CMCGC HeapBitMap"); + initialized = true; } + +void HeapBitmapManager::DestroyHeapBitmap() +{ +#ifdef _WIN64 + if (!VirtualFree(reinterpret_cast(heapBitmapStart_), 0, MEM_RELEASE)) { + LOG_COMMON(ERROR) << "VirtualFree error for HeapBitmapManager"; + } +#else + if (munmap(reinterpret_cast(heapBitmapStart_), allHeapBitmapSize_) != 0) { + LOG_COMMON(ERROR) << "munmap error for HeapBitmapManager"; + } +#endif + initialized = false; +} + } // namespace common diff --git a/common_components/heap/collector/copy_data_manager.h b/common_components/heap/collector/copy_data_manager.h index 2165073761a32627b28bc1a069c04857ac6ec065..74f3e508ea6238d4dd7ab1fa87b0832507b6eab2 100755 --- a/common_components/heap/collector/copy_data_manager.h +++ b/common_components/heap/collector/copy_data_manager.h @@ -16,6 +16,7 @@ #ifndef COMMON_COMPONENTS_HEAP_COLLECTOR_COPY_DATA_MANAGER_H #define COMMON_COMPONENTS_HEAP_COLLECTOR_COPY_DATA_MANAGER_H +#include "base/common.h" #include "common_components/base/immortal_wrapper.h" #include "common_components/heap/heap.h" #if defined(__linux__) || defined(PANDA_TARGET_OHOS) || defined(__APPLE__) @@ -43,7 +44,6 @@ class HeapBitmapManager { public: struct Zone { enum ZoneType : size_t { - LIVE_INFO, BIT_MAP, ZONE_TYPE_CNT, }; @@ -60,12 +60,6 @@ class HeapBitmapManager { void InitZones(size_t unitCount) { uintptr_t start = startAddress_; - allocZone_[Zone::ZoneType::LIVE_INFO].zoneStartAddress = start; - allocZone_[Zone::ZoneType::LIVE_INFO].zonePosition = start; -#if defined(_WIN64) - lastCommitEndAddr[Zone::ZoneType::LIVE_INFO].store(start); -#endif - start += unitCount * sizeof(RegionLiveDesc); allocZone_[Zone::ZoneType::BIT_MAP].zoneStartAddress = start; allocZone_[Zone::ZoneType::BIT_MAP].zonePosition = start; #if defined(_WIN64) @@ -90,7 +84,9 @@ class HeapBitmapManager { allocSpinLock.Unlock(); return startAddr; #else - return allocZone_[type].zonePosition.fetch_add(sz); + auto address = allocZone_[type].zonePosition.fetch_add(sz); + MemorySet(address, sz, 0, sz); + return address; #endif } void ReleaseMemory() @@ -99,10 +95,8 @@ class HeapBitmapManager { LOGE_IF(UNLIKELY_CC(!VirtualFree(reinterpret_cast(startAddress_), size_, MEM_DECOMMIT))) << "VirtualFree failed in ReturnPage, errno: " << GetLastError(); #elif defined(__APPLE__) - MemorySet(startAddress_, size_, 0, size_); (void)madvise(reinterpret_cast(startAddress_), size_, MADV_DONTNEED); #else - MemorySet(startAddress_, size_, 0, size_); DLOG(REGION, "clear copy-data @[%#zx+%zu, %#zx)", startAddress_, size_, startAddress_ + size_); if (madvise(reinterpret_cast(startAddress_), size_, MADV_DONTNEED) == 0) { DLOG(REGION, "release copy-data @[%#zx+%zu, %#zx)", startAddress_, size_, startAddress_ + size_); @@ -130,20 +124,13 @@ public: HeapBitmapManager() = default; ~HeapBitmapManager() { -#ifdef _WIN64 - if (!VirtualFree(reinterpret_cast(heapBitmapStart_), 0, MEM_RELEASE)) { - LOG_COMMON(ERROR) << "VirtualFree error for HeapBitmapManager"; - } -#else - if (munmap(reinterpret_cast(heapBitmapStart_), allHeapBitmapSize_) != 0) { - LOG_COMMON(ERROR) << "munmap error for HeapBitmapManager"; - } -#endif + CHECK_CC(!initialized); } static HeapBitmapManager& GetHeapBitmapManager(); void InitializeHeapBitmap(); + void DestroyHeapBitmap(); void ClearHeapBitmap() { heapBitmap_[0].ReleaseMemory(); } @@ -157,12 +144,6 @@ public: return bitmap; } - RegionLiveDesc* AllocateRegionLiveDesc() - { - return reinterpret_cast( - heapBitmap_[0].Allocate(Memory::Zone::ZoneType::LIVE_INFO, sizeof(RegionLiveDesc))); - } - private: size_t GetHeapBitmapSize(size_t heapSize) { @@ -174,14 +155,14 @@ private: constexpr uint8_t bitMarksSize = 64; // 3 bitmaps for each region: markBitmap,resurrectBitmap, enqueueBitmap. constexpr uint8_t nBitmapTypes = 3; - return unitCnt * sizeof(RegionLiveDesc) + - unitCnt * (sizeof(RegionBitmap) + (REGION_UNIT_SIZE / bitMarksSize)) * nBitmapTypes; + return unitCnt * (sizeof(RegionBitmap) + (REGION_UNIT_SIZE / bitMarksSize)) * nBitmapTypes; } Memory heapBitmap_[1]; size_t regionUnitCount_ = 0; uintptr_t heapBitmapStart_ = 0; size_t allHeapBitmapSize_ = 0; + bool initialized = false; }; } // namespace common #endif // COMMON_COMPONENTS_HEAP_COLLECTOR_COPY_DATA_MANAGER_H diff --git a/common_components/heap/collector/gc_infos.h b/common_components/heap/collector/gc_infos.h index a3ef4cddc7940d9e77f78ccd9c6873fa3c984b6f..c8d9f72d985b6c61c789a4ab5c017a7d3b2f3a81 100755 --- a/common_components/heap/collector/gc_infos.h +++ b/common_components/heap/collector/gc_infos.h @@ -37,7 +37,7 @@ public: REG_ROOT, SLOT_ROOT, }; - static GCInfoNode BuildNodeForTrace(uintptr_t startIP, uintptr_t ip, FrameAddress* fa) + static GCInfoNode BuildNodeForMarking(uintptr_t startIP, uintptr_t ip, FrameAddress* fa) { CString time = TimeUtil::GetTimestamp(); @@ -151,15 +151,15 @@ private: class CurrentGCInfo { public: ~CurrentGCInfo(){}; - void PushFrameInfoForTrace(const GCInfoNode& frameGCInfo) { gcInfosForTrace.push_back(frameGCInfo); } - void PushFrameInfoForTrace(const GCInfoNode&& frameGCInfo) { gcInfosForTrace.push_back(frameGCInfo); } + void PushFrameInfoForMarking(const GCInfoNode& frameGCInfo) { gcInfosForMarking.push_back(frameGCInfo); } + void PushFrameInfoForMarking(const GCInfoNode&& frameGCInfo) { gcInfosForMarking.push_back(frameGCInfo); } void PushFrameInfoForFix(const GCInfoNodeForFix& frameGCInfo) { gcInfosForFix.push_back(frameGCInfo); } void PushFrameInfoForFix(const GCInfoNodeForFix&& frameGCInfo) { gcInfosForFix.push_back(frameGCInfo); } void Clear() { - gcInfosForTrace.clear(); - std::vector().swap(gcInfosForTrace); + gcInfosForMarking.clear(); + std::vector().swap(gcInfosForMarking); gcInfosForFix.clear(); std::vector().swap(gcInfosForFix); } @@ -168,13 +168,13 @@ public: DLOG(ENUM, " fix roots info:"); std::for_each(gcInfosForFix.begin(), gcInfosForFix.end(), [](const GCInfoNodeForFix& info) { info.DumpFrameGCInfo(); }); - DLOG(ENUM, " trace roots info:"); - std::for_each(gcInfosForTrace.begin(), gcInfosForTrace.end(), + DLOG(ENUM, " marking roots info:"); + std::for_each(gcInfosForMarking.begin(), gcInfosForMarking.end(), [](const GCInfoNode& info) { info.DumpFrameGCInfo(); }); } private: - std::vector gcInfosForTrace; + std::vector gcInfosForMarking; std::vector gcInfosForFix; }; class GCInfos { @@ -191,10 +191,15 @@ public: } gcInfos.push_back(CurrentGCInfo()); } - void PushFrameInfoForTrace(const GCInfoNode& frameGCInfo) { GetCurrentGCInfo().PushFrameInfoForTrace(frameGCInfo); } - void PushFrameInfoForTrace(const GCInfoNode&& frameGCInfo) + + void PushFrameInfoForMarking(const GCInfoNode& frameGCInfo) + { + GetCurrentGCInfo().PushFrameInfoForMarking(frameGCInfo); + } + + void PushFrameInfoForMarking(const GCInfoNode&& frameGCInfo) { - GetCurrentGCInfo().PushFrameInfoForTrace(frameGCInfo); + GetCurrentGCInfo().PushFrameInfoForMarking(frameGCInfo); } void PushFrameInfoForFix(const GCInfoNodeForFix& infoNodeFoxFix) diff --git a/common_components/heap/collector/gc_request.cpp b/common_components/heap/collector/gc_request.cpp index 0083af47cd17607b7e5a0c7ce3b789a37a9a2f4a..8b41abc3af9b1f8b994411edc5df1cb20cdabebd 100755 --- a/common_components/heap/collector/gc_request.cpp +++ b/common_components/heap/collector/gc_request.cpp @@ -34,8 +34,8 @@ inline bool GCRequest::IsFrequentGC() const inline bool GCRequest::IsFrequentAsyncGC() const { - int64_t now = static_cast(TimeUtil::NanoSeconds()); - return static_cast(now - GCStats::GetPrevGCFinishTime()) < minIntervelNs; + uint64_t now = TimeUtil::NanoSeconds(); + return (now - GCStats::GetPrevGCFinishTime()) < minIntervelNs; } // heuristic gc is triggered by object allocation, @@ -55,6 +55,7 @@ bool GCRequest::ShouldBeIgnored() const case GC_REASON_OOM: case GC_REASON_FORCE: return IsFrequentGC(); + // UDAV: Candle XREF default: return false; } @@ -74,5 +75,6 @@ GCRequest g_gcRequests[] = { { GC_REASON_BACKGROUND, "backgound", false, true, LONG_MIN_HEU_GC_INTERVAL_NS, g_initHeuTriggerTimestamp }, { GC_REASON_HINT, "hint", false, true, LONG_MIN_HEU_GC_INTERVAL_NS, g_initHeuTriggerTimestamp }, { GC_REASON_XREF, "force_xref", true, false, 0, 0 }, + { GC_REASON_IDLE, "idle", false, true, LONG_MIN_HEU_GC_INTERVAL_NS, g_initHeuTriggerTimestamp } }; } // namespace common diff --git a/common_components/heap/collector/gc_request.h b/common_components/heap/collector/gc_request.h index 670faa1b7782025ba2ed2388ef23183cb65b9a29..b3269ee6ce0166d1faed894593a3d7c37c6020c9 100755 --- a/common_components/heap/collector/gc_request.h +++ b/common_components/heap/collector/gc_request.h @@ -19,35 +19,13 @@ #include #include "common_components/base/globals.h" +#include "common_interfaces/base_runtime.h" namespace common { // Minimum time between async GC (heuristic, native). constexpr uint64_t MIN_ASYNC_GC_INTERVAL_NS = SECOND_TO_NANO_SECOND; constexpr uint64_t LONG_MIN_HEU_GC_INTERVAL_NS = 200 * MILLI_SECOND_TO_NANO_SECOND; -// Used by Collector::RequestGC. -// It tells why GC is triggered. -// -// sync: Caller of Collector::RequestGC will wait until GC completes. -// async: Collector::RequestGC returns immediately and caller continues to run. -enum GCReason : uint32_t { - GC_REASON_USER = 0, // Triggered by user explicitly. - GC_REASON_OOM, // Out of memory. Failed to allocate object. - GC_REASON_BACKUP, // backup gc is triggered if no other reason triggers gc for a long time. - GC_REASON_HEU, // Statistics show it is worth doing GC. Does not have to be immediate. - GC_REASON_YOUNG, // Statistics show it is worth doing Young GC. Does not have to be immediate. - GC_REASON_NATIVE, // Native-Allocation-Registry shows it's worth doing GC. - GC_REASON_HEU_SYNC, // Just wait one gc request to reduce heap fragmentation. - GC_REASON_NATIVE_SYNC, // Just wait one gc request to reduce native heap consumption. - GC_REASON_FORCE, // force gc is triggered when runtime triggers gc actively. - GC_REASON_APPSPAWN, // appspawn gc is triggered when prefork. - GC_REASON_BACKGROUND, // trigger gc caused by switching to background. - GC_REASON_HINT, // trigger gc caused by hint gc. - GC_REASON_XREF, // force gc the whole heap include XRef. - GC_REASON_MAX, - GC_REASON_INVALID = std::numeric_limits::max(), -}; - struct GCRequest { const GCReason reason; const char* name; // Human-readable names of GC reasons. diff --git a/common_components/heap/collector/gc_stats.cpp b/common_components/heap/collector/gc_stats.cpp index 4c0280745622c144213b40492317ed851d383ab3..0f17a0afbe5be028bee11e26feba77e57c883bfa 100755 --- a/common_components/heap/collector/gc_stats.cpp +++ b/common_components/heap/collector/gc_stats.cpp @@ -34,18 +34,20 @@ void GCStats::Init() { isConcurrentMark = false; async = false; - stw1Time = 0; - stw2Time = 0; gcStartTime = TimeUtil::NanoSeconds(); gcEndTime = TimeUtil::NanoSeconds(); + + totalSTWTime = 0; + maxSTWTime = 0; + collectedObjects = 0; collectedBytes = 0; fromSpaceSize = 0; smallGarbageSize = 0; - pinnedSpaceSize = 0; - pinnedGarbageSize = 0; + nonMovableSpaceSize = 0; + nonMovableGarbageSize = 0; largeSpaceSize = 0; largeGarbageSize = 0; @@ -80,10 +82,11 @@ void GCStats::Dump() const std::string totalGCTime = PrettyOrderMathNano(gcEndTime - gcStartTime, "s"); std::ostringstream oss; oss << - "GC for " << g_gcRequests[reason].name << ": " << (async ? "async:" : "sync:") << " collected objects: " << - collectedObjects << "(" << collectedBytes << "->" << PrettyOrderInfo(collectedBytes, "B") << "), " << + "GC for " << g_gcRequests[reason].name << ": " << (async ? "async:" : "sync: ") << + "gcType: " << GCTypeToString(gcType) << ", collected bytes: " << + collectedBytes << "->" << PrettyOrderInfo(collectedBytes, "B") << ", " << "->" << PrettyOrderInfo(liveSize, "B") << "/" << heapSize << "->" << - PrettyOrderInfo(heapSize, "B") << "), max pause: " << MaxSTWTime() << + PrettyOrderInfo(heapSize, "B") << ", max pause: " << MaxSTWTime() << "->" << maxSTWTime << ", total pause: " << TotalSTWTime() << "->" << totalSTWTime << ", total GC time: " << (gcEndTime - gcStartTime) << "->" << totalGCTime; VLOG(INFO, oss.str().c_str()); diff --git a/common_components/heap/collector/gc_stats.h b/common_components/heap/collector/gc_stats.h index 69b016c89e5995df12f4282f316f691f3bc6159d..ceb21991b8dfa3789c51b3abcffba27366fd873e 100755 --- a/common_components/heap/collector/gc_stats.h +++ b/common_components/heap/collector/gc_stats.h @@ -25,6 +25,7 @@ #include "common_components/base/immortal_wrapper.h" #include "common_components/heap/collector/gc_request.h" #include "common_components/log/log.h" +#include "common_interfaces/base_runtime.h" namespace common { // statistics for previous gc. @@ -37,22 +38,14 @@ public: size_t GetThreshold() const { return heapThreshold; } - inline uint64_t TotalSTWTime() const - { - if (isConcurrentMark) { - return stw1Time + stw2Time; - } else { - return stw1Time; - } - } + inline uint64_t TotalSTWTime() const { return totalSTWTime; } - inline uint64_t MaxSTWTime() const + inline uint64_t MaxSTWTime() const { return maxSTWTime; } + + void recordSTWTime(uint64_t time) { - if (isConcurrentMark) { - return std::max(stw1Time, stw2Time); - } else { - return stw1Time; - } + totalSTWTime += time; + maxSTWTime = std::max(maxSTWTime, time); } void Dump() const; @@ -73,22 +66,24 @@ public: static uint64_t prevGcFinishTime; GCReason reason; + GCType gcType; bool isConcurrentMark; bool async; uint64_t gcStartTime; - uint64_t stw1Time; - uint64_t stw2Time; uint64_t gcEndTime; + uint64_t totalSTWTime; // total stw time(microseconds) + uint64_t maxSTWTime; // max stw time(microseconds) + size_t liveBytesBeforeGC; size_t liveBytesAfterGC; size_t fromSpaceSize; size_t smallGarbageSize; - size_t pinnedSpaceSize; - size_t pinnedGarbageSize; + size_t nonMovableSpaceSize; + size_t nonMovableGarbageSize; size_t largeSpaceSize; size_t largeGarbageSize; diff --git a/common_components/heap/collector/heuristic_gc_policy.cpp b/common_components/heap/collector/heuristic_gc_policy.cpp index 7001f340586f64f399792700c314eba68f55fcfa..51686fb115d0c943cc2abda30ec807f06a23b21f 100644 --- a/common_components/heap/collector/heuristic_gc_policy.cpp +++ b/common_components/heap/collector/heuristic_gc_policy.cpp @@ -28,44 +28,108 @@ void StartupStatusManager::OnAppStartup() Taskpool *threadPool = common::Taskpool::GetCurrentTaskpool(); threadPool->PostDelayedTask( std::make_unique(0, threadPool, STARTUP_DURATION_MS), STARTUP_DURATION_MS); + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, + "SmartGC: app startup just finished, CMC FinishGCRestrainTask create", ""); } void HeuristicGCPolicy::Init() { HeapParam &heapParam = BaseRuntime::GetInstance()->GetHeapParam(); heapSize_ = heapParam.heapSize * KB; +#ifndef PANDA_TARGET_32 + // 2: only half heapSize used allocate + heapSize_ = heapSize_ / 2; +#endif } -bool HeuristicGCPolicy::ShouldRestrainGCOnStartup() +bool HeuristicGCPolicy::ShouldRestrainGCOnStartupOrSensitive() { + // Startup + size_t allocated = Heap::GetHeap().GetAllocator().GetAllocatedBytes(); StartupStatus currentStatus = StartupStatusManager::GetStartupStatus(); - if (UNLIKELY_CC(currentStatus == StartupStatus::COLD_STARTUP)) { + if (UNLIKELY_CC(currentStatus == StartupStatus::COLD_STARTUP && + allocated < heapSize_ * COLD_STARTUP_PHASE1_GC_THRESHOLD_RATIO)) { return true; } - size_t allocated = Heap::GetHeap().GetAllocator().GetAllocatedBytes(); if (currentStatus == StartupStatus::COLD_STARTUP_PARTIALLY_FINISH && - allocated < heapSize_ * COLD_STARTUP_GC_THRESHOLD_RATIO) { + allocated < heapSize_ * COLD_STARTUP_PHASE2_GC_THRESHOLD_RATIO) { return true; } - return false; + // Sensitive + return ShouldRestrainGCInSensitive(allocated); +} + +StartupStatus HeuristicGCPolicy::GetStartupStatus() const +{ + return StartupStatusManager::GetStartupStatus(); } void HeuristicGCPolicy::TryHeuristicGC() { - if (UNLIKELY_CC(ShouldRestrainGCOnStartup())) { + if (UNLIKELY_CC(ShouldRestrainGCOnStartupOrSensitive())) { return; } + Collector& collector = Heap::GetHeap().GetCollector(); size_t threshold = collector.GetGCStats().GetThreshold(); size_t allocated = Heap::GetHeap().GetAllocator().GetAllocatedBytes(); if (allocated >= threshold) { if (collector.GetGCStats().shouldRequestYoung) { DLOG(ALLOC, "request heu gc: young %zu, threshold %zu", allocated, threshold); - collector.RequestGC(GC_REASON_YOUNG, true); + collector.RequestGC(GC_REASON_YOUNG, true, GC_TYPE_YOUNG); } else { DLOG(ALLOC, "request heu gc: allocated %zu, threshold %zu", allocated, threshold); - collector.RequestGC(GC_REASON_HEU, true); + collector.RequestGC(GC_REASON_HEU, true, GC_TYPE_FULL); + } + } +} + +void HeuristicGCPolicy::TryIdleGC() +{ + if (UNLIKELY_CC(ShouldRestrainGCOnStartupOrSensitive())) { + return; + } + + if (aliveSizeAfterGC_ == 0) { + return; + } + size_t allocated = Heap::GetHeap().GetAllocator().GetAllocatedBytes(); + size_t expectHeapSize = std::max(static_cast(aliveSizeAfterGC_ * IDLE_SPACE_SIZE_MIN_INC_RATIO), + aliveSizeAfterGC_ + IDLE_SPACE_SIZE_MIN_INC_STEP_FULL); + if (allocated >= expectHeapSize) { + DLOG(ALLOC, "request idle gc: allocated %zu, expectHeapSize %zu, aliveSizeAfterGC %zu", allocated, + expectHeapSize, aliveSizeAfterGC_); + Heap::GetHeap().GetCollector().RequestGC(GC_REASON_IDLE, true, GC_TYPE_FULL); + } +} + +bool HeuristicGCPolicy::ShouldRestrainGCInSensitive(size_t currentSize) +{ + AppSensitiveStatus current = GetSensitiveStatus(); + switch (current) { + case AppSensitiveStatus::NORMAL_SCENE: + return false; + case AppSensitiveStatus::ENTER_HIGH_SENSITIVE: { + if (GetRecordHeapObjectSizeBeforeSensitive() == 0) { + SetRecordHeapObjectSizeBeforeSensitive(currentSize); + } + if (Heap::GetHeap().GetCollector().GetGCStats().shouldRequestYoung) { + return false; + } + if (currentSize < (GetRecordHeapObjectSizeBeforeSensitive() + INC_OBJ_SIZE_IN_SENSITIVE)) { + return true; + } + return false; + } + case AppSensitiveStatus::EXIT_HIGH_SENSITIVE: { + if (CASSensitiveStatus(current, AppSensitiveStatus::NORMAL_SCENE)) { + // Set record heap obj size 0 after exit high senstive + SetRecordHeapObjectSizeBeforeSensitive(0); + } + return false; } + default: + return false; } } @@ -85,10 +149,10 @@ void HeuristicGCPolicy::CheckGCForNative() if (currentNativeSize > currentThreshold) { if (currentNativeSize > URGENCY_NATIVE_LIMIT) { // Native binding size is too large, should wait a sync finished. - Heap::GetHeap().GetCollector().RequestGC(GC_REASON_NATIVE_SYNC, false); + Heap::GetHeap().GetCollector().RequestGC(GC_REASON_NATIVE_SYNC, false, GC_TYPE_FULL); return; } - Heap::GetHeap().GetCollector().RequestGC(GC_REASON_NATIVE, true); + Heap::GetHeap().GetCollector().RequestGC(GC_REASON_NATIVE, true, GC_TYPE_FULL); } } void HeuristicGCPolicy::NotifyNativeFree(size_t bytes) @@ -137,17 +201,20 @@ void HeuristicGCPolicy::ChangeGCParams(bool isBackground) size_t allocated = Heap::GetHeap().GetAllocator().GetAllocatedBytes(); if (allocated > aliveSizeAfterGC_ && (allocated - aliveSizeAfterGC_) > BACKGROUND_LIMIT && allocated > MIN_BACKGROUND_GC_SIZE) { - Heap::GetHeap().GetCollector().RequestGC(GC_REASON_BACKGROUND, true); + Heap::GetHeap().GetCollector().RequestGC(GC_REASON_BACKGROUND, true, GC_TYPE_FULL); } common::Taskpool::GetCurrentTaskpool()->SetThreadPriority(common::PriorityMode::BACKGROUND); + BaseRuntime::GetInstance()->GetGCParam().multiplier = 1; } else { common::Taskpool::GetCurrentTaskpool()->SetThreadPriority(common::PriorityMode::FOREGROUND); + // 3: The front-end application waterline is 3 times + BaseRuntime::GetInstance()->GetGCParam().multiplier = 3; } } bool HeuristicGCPolicy::CheckAndTriggerHintGC(MemoryReduceDegree degree) { - if (UNLIKELY_CC(ShouldRestrainGCOnStartup())) { + if (UNLIKELY_CC(ShouldRestrainGCOnStartupOrSensitive())) { return false; } size_t allocated = Heap::GetHeap().GetAllocator().GetAllocatedBytes(); @@ -166,7 +233,7 @@ bool HeuristicGCPolicy::CheckAndTriggerHintGC(MemoryReduceDegree degree) if (expectHeapSize < allocated) { DLOG(ALLOC, "request heu gc by hint: allocated %zu, expectHeapSize %zu, aliveSizeAfterGC %zu", allocated, expectHeapSize, aliveSizeAfterGC_); - Heap::GetHeap().GetCollector().RequestGC(GC_REASON_HINT, true); + Heap::GetHeap().GetCollector().RequestGC(GC_REASON_HINT, true, GC_TYPE_FULL); return true; } return false; diff --git a/common_components/heap/collector/heuristic_gc_policy.h b/common_components/heap/collector/heuristic_gc_policy.h index b7816cde2dfcf428960f24e74d94c03885ec0181..eb80f71777ddab0feea541f35dc646f3d0218939 100644 --- a/common_components/heap/collector/heuristic_gc_policy.h +++ b/common_components/heap/collector/heuristic_gc_policy.h @@ -29,6 +29,12 @@ enum class StartupStatus: uint8_t { COLD_STARTUP_FINISH, }; +enum AppSensitiveStatus : uint8_t { + NORMAL_SCENE, + ENTER_HIGH_SENSITIVE, + EXIT_HIGH_SENSITIVE, +}; + class StartupStatusManager { public: static std::atomic startupStatus_; @@ -88,21 +94,73 @@ static constexpr size_t NOTIFY_NATIVE_INTERVAL = 32; #else static constexpr size_t NATIVE_INIT_THRESHOLD = 200 * MB; static constexpr size_t MAX_GLOBAL_NATIVE_LIMIT = 2 * GB; - static constexpr size_t MAX_NATIVE_STEP = 300 * MB; + static constexpr size_t MAX_NATIVE_STEP = 200 * MB; static constexpr size_t MAX_NATIVE_SIZE_INC = 1 * GB; static constexpr size_t NATIVE_IMMEDIATE_THRESHOLD = 2 * MB; #endif -static constexpr size_t URGENCY_NATIVE_LIMIT = (MAX_NATIVE_SIZE_INC + MAX_GLOBAL_NATIVE_LIMIT) / 2; +static constexpr size_t URGENCY_NATIVE_LIMIT = MAX_NATIVE_SIZE_INC + MAX_NATIVE_STEP * 2; // 2 is double. class HeuristicGCPolicy { public: - static constexpr double COLD_STARTUP_GC_THRESHOLD_RATIO = 0.25; + static constexpr double COLD_STARTUP_PHASE1_GC_THRESHOLD_RATIO = 0.3; + static constexpr double COLD_STARTUP_PHASE2_GC_THRESHOLD_RATIO = 0.125; void Init(); - bool ShouldRestrainGCOnStartup(); + bool ShouldRestrainGCOnStartupOrSensitive(); void TryHeuristicGC(); + void TryIdleGC(); + + bool ShouldRestrainGCInSensitive(size_t currentSize); + + void NotifyHighSensitive(bool isStart) + { + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "SmartGC: set high sensitive status: ", + std::to_string(isStart).c_str()); + isStart ? SetSensitiveStatus(AppSensitiveStatus::ENTER_HIGH_SENSITIVE) + : SetSensitiveStatus(AppSensitiveStatus::EXIT_HIGH_SENSITIVE); + LOG_COMMON(INFO) << "SmartGC: set high sensitive status: " << isStart; + } + + bool InSensitiveStatus() const + { + return GetSensitiveStatus() == AppSensitiveStatus::ENTER_HIGH_SENSITIVE; + } + + bool OnStartupEvent() const + { + return StartupStatusManager::GetStartupStatus() == StartupStatus::COLD_STARTUP || + StartupStatusManager::GetStartupStatus() == StartupStatus::COLD_STARTUP_PARTIALLY_FINISH; + } + + StartupStatus GetStartupStatus() const; + + void SetSensitiveStatus(AppSensitiveStatus status) + { + sensitiveStatus_.store(status, std::memory_order_release); + } + + AppSensitiveStatus GetSensitiveStatus() const + { + return sensitiveStatus_.load(std::memory_order_acquire); + } + + bool CASSensitiveStatus(AppSensitiveStatus expect, AppSensitiveStatus status) + { + return sensitiveStatus_.compare_exchange_strong(expect, status, std::memory_order_seq_cst); + } + + void SetRecordHeapObjectSizeBeforeSensitive(size_t objSize) + { + recordSizeBeforeSensitive_.store(objSize, std::memory_order_release); + } + + size_t GetRecordHeapObjectSizeBeforeSensitive() const + { + return recordSizeBeforeSensitive_.load(std::memory_order_acquire); + } + void NotifyNativeAllocation(size_t bytes); void NotifyNativeFree(size_t bytes); @@ -121,12 +179,21 @@ public: bool CheckAndTriggerHintGC(MemoryReduceDegree degree); +#if defined(PANDA_TARGET_32) + static constexpr size_t INC_OBJ_SIZE_IN_SENSITIVE = 40 * MB; +#else + static constexpr size_t INC_OBJ_SIZE_IN_SENSITIVE = 80 * MB; +#endif static constexpr size_t BACKGROUND_LIMIT = 2 * MB; static constexpr size_t MIN_BACKGROUND_GC_SIZE = 30 * MB; static constexpr double IDLE_MIN_INC_RATIO = 1.1f; static constexpr size_t LOW_DEGREE_STEP_IN_IDLE = 5 * MB; static constexpr size_t HIGH_DEGREE_STEP_IN_IDLE = 1 * MB; + + static constexpr double IDLE_SPACE_SIZE_MIN_INC_RATIO = 1.1f; + static constexpr size_t IDLE_SPACE_SIZE_MIN_INC_STEP_FULL = 1 * MB; + private: void CheckGCForNative(); @@ -136,6 +203,9 @@ private: std::atomic notifiedNativeSize_ = 0; std::atomic nativeHeapThreshold_ = NATIVE_INIT_THRESHOLD; std::atomic nativeHeapObjects_ = 0; + + std::atomic sensitiveStatus_ {AppSensitiveStatus::NORMAL_SCENE}; + std::atomic recordSizeBeforeSensitive_ {0}; }; } // namespace common diff --git a/common_components/heap/collector/trace_collector.cpp b/common_components/heap/collector/marking_collector.cpp similarity index 54% rename from common_components/heap/collector/trace_collector.cpp rename to common_components/heap/collector/marking_collector.cpp index 1fae4a44d5237cabff7c9e89f92bece27a29919c..0cd7626dda21b00d48fead46ab95b7534c62c319 100755 --- a/common_components/heap/collector/trace_collector.cpp +++ b/common_components/heap/collector/marking_collector.cpp @@ -13,7 +13,7 @@ * limitations under the License. */ -#include "common_components/heap/collector/trace_collector.h" +#include "common_components/heap/collector/marking_collector.h" #include #include @@ -24,8 +24,8 @@ #include namespace common { -const size_t TraceCollector::MAX_MARKING_WORK_SIZE = 16; // fork task if bigger -const size_t TraceCollector::MIN_MARKING_WORK_SIZE = 8; // forbid forking task if smaller +const size_t MarkingCollector::MAX_MARKING_WORK_SIZE = 16; // fork task if bigger +const size_t MarkingCollector::MIN_MARKING_WORK_SIZE = 8; // forbid forking task if smaller void StaticRootTable::VisitRoots(const RefFieldVisitor& visitor) { @@ -47,74 +47,51 @@ void StaticRootTable::VisitRoots(const RefFieldVisitor& visitor) } } -class GlobalWorkStackQueue { +template +class ConcurrentMarkingTask : public common::Task { public: - GlobalWorkStackQueue() = default; - ~GlobalWorkStackQueue() = default; + ConcurrentMarkingTask(uint32_t id, MarkingCollector &tc, ParallelMarkingMonitor &monitor, + GlobalMarkStack &globalMarkStack) + : Task(id), collector_(tc), monitor_(monitor), globalMarkStack_(globalMarkStack) + {} - void AddWorkStack(TraceCollector::WorkStack &&stack) - { - DCHECK_CC(!stack.empty()); - std::lock_guard guard(mtx_); - workStacks_.push_back(std::move(stack)); - } + ~ConcurrentMarkingTask() override = default; - TraceCollector::WorkStack PopWorkStack() + // run concurrent marking task. + bool Run([[maybe_unused]] uint32_t threadIndex) override { - std::unique_lock lock(mtx_); - while (true) { - if (!workStacks_.empty()) { - TraceCollector::WorkStack stack(std::move(workStacks_.back())); - workStacks_.pop_back(); - return stack; - } - if (finished_) { - return TraceCollector::WorkStack(); + ParallelLocalMarkStack markStack(&globalMarkStack_, &monitor_); + do { + if (!monitor_.TryStartStep()) { + break; } - cv_.wait(lock); - } - } - - TraceCollector::WorkStack DrainAllWorkStack() - { - std::unique_lock lock(mtx_); - while (!workStacks_.empty()) { - TraceCollector::WorkStack stack(std::move(workStacks_.back())); - workStacks_.pop_back(); - return stack; - } - return TraceCollector::WorkStack(); + collector_.ProcessMarkStack(threadIndex, markStack); + monitor_.FinishStep(); + } while (monitor_.WaitNextStepOrFinished()); + monitor_.NotifyFinishOne(); + return true; } - void NotifyFinish() - { - std::lock_guard guard(mtx_); - DCHECK_CC(!finished_); - finished_ = true; - cv_.notify_all(); - } private: - bool finished_ {false}; - std::condition_variable cv_; - std::mutex mtx_; - std::vector workStacks_; + MarkingCollector &collector_; + ParallelMarkingMonitor &monitor_; + GlobalMarkStack &globalMarkStack_; }; -template -class ConcurrentMarkingTask : public common::Task { +class ClearWeakStackTask : public common::Task { public: - ConcurrentMarkingTask(uint32_t id, TraceCollector &tc, Taskpool *pool, TaskPackMonitor &monitor, - GlobalWorkStackQueue &globalQueue) + ClearWeakStackTask(uint32_t id, MarkingCollector &tc, Taskpool *pool, TaskPackMonitor &monitor, + GlobalWeakStackQueue &globalQueue) : Task(id), collector_(tc), threadPool_(pool), monitor_(monitor), globalQueue_(globalQueue) {} // single work task without thread pool - ConcurrentMarkingTask(uint32_t id, TraceCollector& tc, TaskPackMonitor &monitor, - GlobalWorkStackQueue &globalQueue) + ClearWeakStackTask(uint32_t id, MarkingCollector& tc, TaskPackMonitor &monitor, + GlobalWeakStackQueue &globalQueue) : Task(id), collector_(tc), threadPool_(nullptr), monitor_(monitor), globalQueue_(globalQueue) {} - ~ConcurrentMarkingTask() override + ~ClearWeakStackTask() override { threadPool_ = nullptr; } @@ -123,114 +100,133 @@ public: bool Run([[maybe_unused]] uint32_t threadIndex) override { while (true) { - TraceCollector::WorkStack workStack = globalQueue_.PopWorkStack(); - if (workStack.empty()) { + WeakStack weakStack = globalQueue_.PopWorkStack(); + if (weakStack.empty()) { break; } - collector_.ProcessMarkStack(threadIndex, threadPool_, workStack, globalQueue_); + collector_.ProcessWeakStack(weakStack); } monitor_.NotifyFinishOne(); return true; } private: - TraceCollector &collector_; + MarkingCollector &collector_; Taskpool *threadPool_; TaskPackMonitor &monitor_; - GlobalWorkStackQueue &globalQueue_; + GlobalWeakStackQueue &globalQueue_; }; -void TraceCollector::TryForkTask(Taskpool *threadPool, WorkStack &workStack, GlobalWorkStackQueue &globalQueue) +void MarkingCollector::ProcessWeakStack(WeakStack &weakStack) { - size_t size = workStack.size(); - if (size > MIN_MARKING_WORK_SIZE) { - bool doFork = false; - size_t newSize = 0; - if (size > MAX_MARKING_WORK_SIZE) { - newSize = size >> 1; // give 1/2 the stack to the thread pool as a new work task - doFork = true; - } else if (size > MIN_MARKING_WORK_SIZE) { - constexpr uint8_t shiftForEight = 3; - newSize = size >> shiftForEight; // give 1/8 the stack to the thread pool as a new work task - doFork = true; + while (!weakStack.empty()) { + auto [fieldPointer, offset] = *weakStack.back(); + weakStack.pop_back(); + ASSERT_LOGF(offset % sizeof(RefField<>) == 0, "offset is not aligned"); + + RefField<> &field = reinterpret_cast&>(*fieldPointer); + RefField<> oldField(field); + + if (!Heap::IsTaggedObject(oldField.GetFieldValue())) { + continue; + } + BaseObject* targetObj = oldField.GetTargetObject(); + DCHECK_CC(Heap::IsHeapAddress(targetObj)); + + auto targetRegion = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(targetObj)); + if (targetRegion->IsMarkedObject(targetObj) || targetRegion->IsNewObjectSinceMarking(targetObj)) { + continue; } - if (doFork) { - WorkStackBuf *hSplit = workStack.split(newSize); - globalQueue.AddWorkStack(WorkStack(hSplit)); + BaseObject* obj = reinterpret_cast(reinterpret_cast(&field) - offset); + if (RegionDesc::GetAliveRegionType(reinterpret_cast(obj)) == RegionDesc::RegionType::FROM_REGION) { + BaseObject* toObj = obj->GetForwardingPointer(); + + // Make sure even the object that contains the weak reference is trimed before forwarding, the weak ref + // field is still within the object + if (toObj != nullptr && offset < obj->GetSizeForwarded()) { + RefField<>& toField = *reinterpret_cast*>(reinterpret_cast(toObj) + offset); + toField.ClearRef(oldField.GetFieldValue()); + } } + field.ClearRef(oldField.GetFieldValue()); } } template -void TraceCollector::ProcessMarkStack([[maybe_unused]] uint32_t threadIndex, Taskpool *threadPool, WorkStack &workStack, - GlobalWorkStackQueue &globalQueue) +void MarkingCollector::ProcessMarkStack([[maybe_unused]] uint32_t threadIndex, ParallelLocalMarkStack &markStack) { size_t nNewlyMarked = 0; WeakStack weakStack; - auto visitor = CreateTraceObjectRefFieldsVisitor(&workStack, &weakStack); - - TraceCollector::WorkStack remarkStack; - auto fetchFromSatbBuffer = [this, &workStack, &remarkStack]() { + auto visitor = CreateMarkingObjectRefFieldsVisitor(markStack, weakStack); + std::vector remarkStack; + auto fetchFromSatbBuffer = [this, &markStack, &remarkStack]() { SatbBuffer::Instance().TryFetchOneRetiredNode(remarkStack); + bool needProcess = false; while (!remarkStack.empty()) { BaseObject *obj = remarkStack.back(); remarkStack.pop_back(); if (Heap::IsHeapAddress(obj) && (!MarkObject(obj))) { - workStack.push_back(obj); + markStack.Push(obj); + needProcess = true; DLOG(TRACE, "tracing take from satb buffer: obj %p", obj); } } + return needProcess; }; size_t iterationCnt = 0; constexpr size_t maxIterationLoopNum = 1000; // loop until work stack empty. - do { - for (;;) { + while (true) { + BaseObject *object; + while (markStack.Pop(&object)) { ++nNewlyMarked; - if (workStack.empty()) { - break; - } - // get next object from work stack. - BaseObject *obj = workStack.back(); - workStack.pop_back(); - auto region = RegionDesc::GetRegionDescAt(reinterpret_cast((void *)obj)); - region->AddLiveByteCount(obj->GetSize()); - [[maybe_unused]] auto beforeSize = workStack.count(); - TraceObjectRefFields(obj, &visitor); -#ifdef PANDA_JS_ETS_HYBRID_MODE - if constexpr (ProcessXRef) { - TraceObjectXRef(obj, workStack); - } -#endif - DLOG(TRACE, "[tracing] visit finished, workstack size: before=%d, after=%d, newly added=%d", beforeSize, - workStack.count(), workStack.count() - beforeSize); - // try to fork new task if needed. - if (threadPool != nullptr) { - TryForkTask(threadPool, workStack, globalQueue); - } + auto region = RegionDesc::GetAliveRegionDescAt(static_cast(reinterpret_cast(object))); + region->AddLiveByteCount(object->GetSize()); + MarkingObjectRefFields(object, &visitor); } - // Try some task from satb buffer, bound the loop to make sure it converges in time - if (++iterationCnt < maxIterationLoopNum) { - fetchFromSatbBuffer(); - if (workStack.empty()) { - fetchFromSatbBuffer(); - } + if (++iterationCnt >= maxIterationLoopNum) { + break; + } + if (!fetchFromSatbBuffer()) { + break; } - } while (!workStack.empty()); + } + DCHECK_CC(markStack.IsEmpty()); // newly marked statistics. markedObjectCount_.fetch_add(nNewlyMarked, std::memory_order_relaxed); MergeWeakStack(weakStack); } -void TraceCollector::MergeWeakStack(WeakStack& weakStack) +void MarkingCollector::MergeWeakStack(WeakStack& weakStack) { std::lock_guard lock(weakStackLock_); - globalWeakStack_.insert(weakStack); + + // Preprocess the weak stack to minimize work during STW remark. + while (!weakStack.empty()) { + auto tuple = weakStack.back(); + weakStack.pop_back(); + + auto [weakFieldPointer, _] = *tuple; + RefField<> oldField(*weakFieldPointer); + + if (!Heap::IsTaggedObject(oldField.GetFieldValue())) { + continue; + } + auto obj = oldField.GetTargetObject(); + DCHECK_CC(Heap::IsHeapAddress(obj)); + + auto region = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); + if (region->IsNewObjectSinceMarking(obj) || region->IsMarkedObject(obj)) { + continue; + } + + globalWeakStack_.push_back(tuple); + } } -void TraceCollector::EnumConcurrencyModelRoots(RootSet& rootSet) const +void MarkingCollector::EnumConcurrencyModelRoots(RootSet& rootSet) const { LOG_COMMON(FATAL) << "Unresolved fatal"; UNREACHABLE_CC(); @@ -257,187 +253,188 @@ private: bool worldStopped_; }; -void TraceCollector::MergeAllocBufferRoots(WorkStack& workStack) +void MarkingCollector::TracingImpl(GlobalMarkStack &globalMarkStack, bool parallel, bool Remark) { - // hold mutator list lock to freeze mutator liveness, otherwise may access dead mutator fatally - MergeMutatorRootsScope lockScope; - theAllocator_.VisitAllocBuffers([&workStack](AllocationBuffer &buffer) { - buffer.MarkStack([&workStack](BaseObject *o) { workStack.push_back(o); }); - }); -} - -void TraceCollector::TracingImpl(WorkStack& workStack, bool parallel) -{ - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, ("CMCGC::TracingImpl_" + std::to_string(workStack.count())).c_str(), ""); - if (workStack.empty()) { - return; - } + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, ("CMCGC::TracingImpl_" + std::to_string(globalMarkStack.Count())).c_str(), + ""); // enable parallel marking if we have thread pool. Taskpool *threadPool = GetThreadPool(); ASSERT_LOGF(threadPool != nullptr, "thread pool is null"); if (parallel) { // parallel marking. - uint32_t parallelCount = GetGCThreadCount(true) - 1; + uint32_t parallelCount = 0; + // During the STW remark phase, Expect it to utilize all GC threads. + if (Remark) { + parallelCount = GetGCThreadCount(true); + } else { + parallelCount = GetGCThreadCount(true) - 1; + } uint32_t threadCount = parallelCount + 1; - TaskPackMonitor monitor(parallelCount, parallelCount); - GlobalWorkStackQueue globalQueue; + ParallelMarkingMonitor monitor(parallelCount, parallelCount); for (uint32_t i = 0; i < parallelCount; ++i) { -#ifdef PANDA_JS_ETS_HYBRID_MODE - if (gcReason_ == GCReason::GC_REASON_XREF) { - threadPool->PostTask(std::make_unique>(0, *this, threadPool, monitor, - globalQueue)); - } else { - threadPool->PostTask(std::make_unique>(0, *this, threadPool, monitor, - globalQueue)); - } -#else - threadPool->PostTask(std::make_unique>(0, *this, threadPool, monitor, - globalQueue)); -#endif + threadPool->PostTask(std::make_unique>(0, *this, monitor, globalMarkStack)); } - if (!AddConcurrentTracingWork(workStack, globalQueue, static_cast(threadCount))) { -#ifdef PANDA_JS_ETS_HYBRID_MODE - if (gcReason_ == GCReason::GC_REASON_XREF) { - ProcessMarkStack(0, threadPool, workStack, globalQueue); - } else { - ProcessMarkStack(0, threadPool, workStack, globalQueue); - } -#else - ProcessMarkStack(0, threadPool, workStack, globalQueue); -#endif - } - while (true) { - WorkStack stack = globalQueue.DrainAllWorkStack(); - if (stack.empty()) { + ParallelLocalMarkStack markStack(&globalMarkStack, &monitor); + do { + if (!monitor.TryStartStep()) { break; } -#ifdef PANDA_JS_ETS_HYBRID_MODE - if (gcReason_ == GCReason::GC_REASON_XREF) { - ProcessMarkStack(0, threadPool, stack, globalQueue); - } else { - ProcessMarkStack(0, threadPool, stack, globalQueue); - } -#else - ProcessMarkStack(0, threadPool, stack, globalQueue); -#endif - } - globalQueue.NotifyFinish(); + ProcessMarkStack(0, markStack); + monitor.FinishStep(); + } while (monitor.WaitNextStepOrFinished()); monitor.WaitAllFinished(); } else { // serial marking with a single mark task. - GlobalWorkStackQueue globalQueue; - WorkStack stack(std::move(workStack)); -#ifdef PANDA_JS_ETS_HYBRID_MODE - if (gcReason_ == GCReason::GC_REASON_XREF) { - ProcessMarkStack(0, nullptr, stack, globalQueue); - } else { - ProcessMarkStack(0, nullptr, stack, globalQueue); - } -#else - ProcessMarkStack(0, nullptr, stack, globalQueue); -#endif + // Fixme: this `ParallelLocalMarkStack` could be replaced with `SequentialLocalMarkStack`, and no need to + // use monitor, but this need to add template param to `ProcessMarkStack`. + // So for convenience just use a fake dummy parallel one. + ParallelMarkingMonitor dummyMonitor(0, 0); + ParallelLocalMarkStack markStack(&globalMarkStack, &dummyMonitor); + ProcessMarkStack(0, markStack); } } -bool TraceCollector::AddConcurrentTracingWork(WorkStack& workStack, GlobalWorkStackQueue &globalQueue, - size_t threadCount) +bool MarkingCollector::AddWeakStackClearWork(WeakStack &weakStack, + GlobalWeakStackQueue &globalQueue, + size_t threadCount) { - if (workStack.size() <= threadCount * MIN_MARKING_WORK_SIZE) { + if (weakStack.size() <= threadCount * MIN_MARKING_WORK_SIZE) { return false; // too less init tasks, which may lead to workload imbalance, add work rejected } DCHECK_CC(threadCount > 0); - const size_t chunkSize = std::min(workStack.size() / threadCount + 1, MIN_MARKING_WORK_SIZE); + const size_t chunkSize = std::min(weakStack.size() / threadCount + 1, MIN_MARKING_WORK_SIZE); // Split the current work stack into work tasks. - while (!workStack.empty()) { - WorkStackBuf *hSplit = workStack.split(chunkSize); - globalQueue.AddWorkStack(WorkStack(hSplit)); + while (!weakStack.empty()) { + WeakStackBuf *hSplit = weakStack.split(chunkSize); + globalQueue.AddWorkStack(WeakStack(hSplit)); } return true; } -bool TraceCollector::PushRootToWorkStack(RootSet *workStack, BaseObject *obj) +bool MarkingCollector::PushRootToWorkStack(LocalCollectStack &collectStack, BaseObject *obj) { - RegionDesc *regionInfo = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); - if (gcReason_ == GCReason::GC_REASON_YOUNG && regionInfo->IsInOldSpace()) { + RegionDesc *regionInfo = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); + if (gcReason_ == GCReason::GC_REASON_YOUNG && !regionInfo->IsInYoungSpace()) { DLOG(ENUM, "enum: skip old object %p<%p>(%zu)", obj, obj->GetTypeInfo(), obj->GetSize()); return false; } + // inline MarkObject bool marked = regionInfo->MarkObject(obj); if (!marked) { ASSERT(!regionInfo->IsGarbageRegion()); - regionInfo->AddLiveByteCount(obj->GetSize()); DLOG(TRACE, "mark obj %p<%p>(%zu) in region %p(%u)@%#zx, live %u", obj, obj->GetTypeInfo(), obj->GetSize(), regionInfo, regionInfo->GetRegionType(), regionInfo->GetRegionStart(), regionInfo->GetLiveByteCount()); - } - if (marked) { + collectStack.Push(obj); + return true; + } else { return false; } - workStack->push_back(obj); - return true; } -void TraceCollector::PushRootsToWorkStack(RootSet *workStack, const CArrayList &collectedRoots) +void MarkingCollector::PushRootsToWorkStack(LocalCollectStack &collectStack, + const CArrayList &collectedRoots) { OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, ("CMCGC::PushRootsToWorkStack_" + std::to_string(collectedRoots.size())).c_str(), ""); for (BaseObject *obj : collectedRoots) { - PushRootToWorkStack(workStack, obj); + PushRootToWorkStack(collectStack, obj); } } -void TraceCollector::TraceRoots(const CArrayList &collectedRoots) +void MarkingCollector::MarkingRoots(const CArrayList &collectedRoots) { - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::TraceRoots", ""); + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::MarkingRoots", ""); + + GlobalMarkStack globalMarkStack; + + { + LocalCollectStack collectStack(&globalMarkStack); - WorkStack workStack = NewWorkStack(); - PushRootsToWorkStack(&workStack, collectedRoots); + PushRootsToWorkStack(collectStack, collectedRoots); - if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::PushRootInRSet", ""); - auto func = [this, &workStack](BaseObject *object) { MarkRememberSetImpl(object, workStack); }; - RegionSpace &space = reinterpret_cast(Heap::GetHeap().GetAllocator()); - space.VisitRememberSet(func); + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::PushRootInRSet", ""); + auto func = [this, &collectStack](BaseObject *object) { MarkRememberSetImpl(object, collectStack); }; + RegionalHeap &space = reinterpret_cast(Heap::GetHeap().GetAllocator()); + space.MarkRememberSet(func); + } + + collectStack.Publish(); } - COMMON_PHASE_TIMER("TraceRoots"); - VLOG(DEBUG, "roots size: %zu", workStack.size()); + COMMON_PHASE_TIMER("MarkingRoots"); ASSERT_LOGF(GetThreadPool() != nullptr, "null thread pool"); // use fewer threads and lower priority for concurrent mark. const uint32_t maxWorkers = GetGCThreadCount(true) - 1; - VLOG(DEBUG, "Concurrent mark with %u threads, workStack: %zu", (maxWorkers + 1), workStack.size()); { COMMON_PHASE_TIMER("Concurrent marking"); - TracingImpl(workStack, maxWorkers > 0); + TracingImpl(globalMarkStack, maxWorkers > 0, false); } } -void TraceCollector::Remark() +void MarkingCollector::Remark() { - WorkStack workStack = NewWorkStack(); + GlobalMarkStack globalMarkStack; const uint32_t maxWorkers = GetGCThreadCount(true) - 1; OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::Remark[STW]", ""); COMMON_PHASE_TIMER("STW re-marking"); - RemarkAndPreforwardStaticRoots(workStack); - ConcurrentRemark(workStack, maxWorkers > 0); - TracingImpl(workStack, maxWorkers > 0); - MarkAwaitingJitFort(); - ProcessWeakReferences(); + RemarkAndPreforwardStaticRoots(globalMarkStack); + ConcurrentRemark(globalMarkStack, maxWorkers > 0); // Mark enqueue + TracingImpl(globalMarkStack, maxWorkers > 0, true); + MarkAwaitingJitFort(); // Mark awaiting + ClearWeakStack(maxWorkers > 0); - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::TraceRoots END", + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::MarkingRoots END", ("mark obejects:" + std::to_string(markedObjectCount_.load(std::memory_order_relaxed))).c_str()); VLOG(DEBUG, "mark %zu objects", markedObjectCount_.load(std::memory_order_relaxed)); } -bool TraceCollector::MarkSatbBuffer(WorkStack& workStack) +void MarkingCollector::ClearWeakStack(bool parallel) +{ + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::ProcessGlobalWeakStack", ""); + { + if (gcReason_ == GC_REASON_YOUNG || globalWeakStack_.empty()) { + return; + } + Taskpool *threadPool = GetThreadPool(); + ASSERT_LOGF(threadPool != nullptr, "thread pool is null"); + if (parallel) { + uint32_t parallelCount = GetGCThreadCount(true); + uint32_t threadCount = parallelCount + 1; + TaskPackMonitor monitor(parallelCount, parallelCount); + GlobalWeakStackQueue globalQueue; + for (uint32_t i = 0; i < parallelCount; ++i) { + threadPool->PostTask(std::make_unique(0, *this, threadPool, monitor, globalQueue)); + } + if (!AddWeakStackClearWork(globalWeakStack_, globalQueue, static_cast(threadCount))) { + ProcessWeakStack(globalWeakStack_); + } + bool exitLoop = false; + while (!exitLoop) { + WeakStack stack = globalQueue.DrainAllWorkStack(); + if (stack.empty()) { + exitLoop = true; + } + ProcessWeakStack(stack); + } + globalQueue.NotifyFinish(); + monitor.WaitAllFinished(); + } else { + ProcessWeakStack(globalWeakStack_); + } + } +} + +bool MarkingCollector::MarkSatbBuffer(GlobalMarkStack &globalMarkStack) { OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::MarkSatbBuffer", ""); COMMON_PHASE_TIMER("MarkSatbBuffer"); - auto visitSatbObj = [this, &workStack]() { - WorkStack remarkStack; + auto visitSatbObj = [this, &globalMarkStack]() { + std::vector remarkStack; auto func = [&remarkStack](Mutator& mutator) { const SatbBuffer::TreapNode* node = mutator.GetSatbBufferNode(); if (node != nullptr) { @@ -447,55 +444,54 @@ bool TraceCollector::MarkSatbBuffer(WorkStack& workStack) MutatorManager::Instance().VisitAllMutators(func); SatbBuffer::Instance().GetRetiredObjects(remarkStack); + LocalCollectStack collectStack(&globalMarkStack); while (!remarkStack.empty()) { // LCOV_EXCL_BR_LINE BaseObject* obj = remarkStack.back(); remarkStack.pop_back(); - if (Heap::IsHeapAddress(obj)) { - if (!this->MarkObject(obj)) { - workStack.push_back(obj); - DLOG(TRACE, "satb buffer add obj %p", obj); - } + if (Heap::IsHeapAddress(obj) && !this->MarkObject(obj)) { + collectStack.Push(obj); + DLOG(TRACE, "satb buffer add obj %p", obj); } } + collectStack.Publish(); }; visitSatbObj(); return true; } -void TraceCollector::MarkRememberSetImpl(BaseObject* object, WorkStack& workStack) +void MarkingCollector::MarkRememberSetImpl(BaseObject* object, LocalCollectStack &collectStack) { - // in Young GC: maybe we can skip the object if it has no ref to young space - object->ForEachRefField([this, &workStack, &object](RefField<>& field) { + object->ForEachRefField([this, &collectStack, &object](RefField<>& field) { BaseObject* targetObj = field.GetTargetObject(); if (Heap::IsHeapAddress(targetObj)) { - RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(targetObj)); + RegionDesc* region = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(targetObj)); if (region->IsInYoungSpace() && - !region->IsNewObjectSinceTrace(targetObj) && + !region->IsNewObjectSinceMarking(targetObj) && !this->MarkObject(targetObj)) { - workStack.push_back(targetObj); - DLOG(TRACE, "remember set trace obj: %p@%p, ref: %p", object, &field, targetObj); + collectStack.Push(targetObj); + DLOG(TRACE, "remember set marking obj: %p@%p, ref: %p", object, &field, targetObj); } } }); } -void TraceCollector::ConcurrentRemark(WorkStack& remarkStack, bool parallel) +void MarkingCollector::ConcurrentRemark(GlobalMarkStack &globalMarkStack, bool parallel) { - LOGF_CHECK(MarkSatbBuffer(remarkStack)) << "not cleared\n"; + LOGF_CHECK(MarkSatbBuffer(globalMarkStack)) << "not cleared\n"; } -void TraceCollector::MarkAwaitingJitFort() +void MarkingCollector::MarkAwaitingJitFort() { - reinterpret_cast(theAllocator_).MarkAwaitingJitFort(); + reinterpret_cast(theAllocator_).MarkAwaitingJitFort(); } -void TraceCollector::Init(const RuntimeParam& param) {} +void MarkingCollector::Init(const RuntimeParam& param) {} -void TraceCollector::Fini() { Collector::Fini(); } +void MarkingCollector::Fini() { Collector::Fini(); } #if defined(GCINFO_DEBUG) && GCINFO_DEBUG -void TraceCollector::DumpHeap(const CString& tag) +void MarkingCollector::DumpHeap(const CString& tag) { ASSERT_LOGF(MutatorManager::Instance().WorldStopped(), "Not In STW"); DLOG(FRAGMENT, "DumpHeap %s", tag.Str()); @@ -526,42 +522,51 @@ void TraceCollector::DumpHeap(const CString& tag) DLOG(FRAGMENT, "Dump Allocator"); } -void TraceCollector::DumpRoots(LogType logType) +void MarkingCollector::DumpRoots(LogType logType) { LOG_COMMON(FATAL) << "Unresolved fatal"; UNREACHABLE_CC(); } #endif -void TraceCollector::PreGarbageCollection(bool isConcurrent) +void MarkingCollector::PreGarbageCollection(bool isConcurrent) { // SatbBuffer should be initialized before concurrent enumeration. SatbBuffer::Instance().Init(); // prepare thread pool. - GetGCStats().reason = gcReason_; - GetGCStats().async = !g_gcRequests[gcReason_].IsSyncGC(); - GetGCStats().isConcurrentMark = isConcurrent; + GCStats& gcStats = GetGCStats(); + gcStats.reason = gcReason_; + gcStats.async = !g_gcRequests[gcReason_].IsSyncGC(); + gcStats.gcType = gcType_; + gcStats.isConcurrentMark = isConcurrent; + gcStats.collectedBytes = 0; + gcStats.smallGarbageSize = 0; + gcStats.nonMovableGarbageSize = 0; + gcStats.largeGarbageSize = 0; + gcStats.gcStartTime = TimeUtil::NanoSeconds(); + gcStats.totalSTWTime = 0; + gcStats.maxSTWTime = 0; #if defined(GCINFO_DEBUG) && GCINFO_DEBUG DumpBeforeGC(); #endif } -void TraceCollector::PostGarbageCollection(uint64_t gcIndex) +void MarkingCollector::PostGarbageCollection(uint64_t gcIndex) { SatbBuffer::Instance().ReclaimALLPages(); // release pages in PagePool PagePool::Instance().Trim(); - collectorResources_.NotifyGCFinished(gcIndex); + collectorResources_.MarkGCFinish(gcIndex); #if defined(GCINFO_DEBUG) && GCINFO_DEBUG DumpAfterGC(); #endif } -void TraceCollector::UpdateGCStats() +void MarkingCollector::UpdateGCStats() { - RegionSpace& space = reinterpret_cast(theAllocator_); + RegionalHeap& space = reinterpret_cast(theAllocator_); GCStats& gcStats = GetGCStats(); gcStats.Dump(); @@ -595,15 +600,18 @@ void TraceCollector::UpdateGCStats() size_t remainingBytes = recentBytes; remainingBytes = std::min(remainingBytes, gcParam.kMaxConcurrentRemainingBytes); remainingBytes = std::max(remainingBytes, gcParam.kMinConcurrentRemainingBytes); - if (UNLIKELY(remainingBytes > gcStats.targetFootprint)) { remainingBytes = std::min(gcParam.kMinConcurrentRemainingBytes, gcStats.targetFootprint); } gcStats.heapThreshold = std::max(gcStats.targetFootprint - remainingBytes, bytesAllocated); + gcStats.heapThreshold = std::max(gcStats.heapThreshold, 20 * MB); // 20 MB:set 20 MB as min heapThreshold gcStats.heapThreshold = std::min(gcStats.heapThreshold, gcParam.gcThreshold); UpdateNativeThreshold(gcParam); Heap::GetHeap().RecordAliveSizeAfterLastGC(bytesAllocated); + if (!gcStats.isYoungGC()) { + Heap::GetHeap().SetRecordHeapObjectSizeBeforeSensitive(bytesAllocated); + } if (!gcStats.isYoungGC()) { g_gcRequests[GC_REASON_HEU].SetMinInterval(gcParam.gcInterval); @@ -630,7 +638,7 @@ void TraceCollector::UpdateGCStats() ).c_str()); } -void TraceCollector::UpdateNativeThreshold(GCParam& gcParam) +void MarkingCollector::UpdateNativeThreshold(GCParam& gcParam) { size_t nativeHeapSize = Heap::GetHeap().GetNotifiedNativeSize(); size_t newNativeHeapThreshold = Heap::GetHeap().GetNotifiedNativeSize(); @@ -645,7 +653,7 @@ void TraceCollector::UpdateNativeThreshold(GCParam& gcParam) collectorResources_.SetIsNativeGCInvoked(false); } -void TraceCollector::CopyObject(const BaseObject& fromObj, BaseObject& toObj, size_t size) const +void MarkingCollector::CopyObject(const BaseObject& fromObj, BaseObject& toObj, size_t size) const { uintptr_t from = reinterpret_cast(&fromObj); uintptr_t to = reinterpret_cast(&toObj); @@ -656,16 +664,29 @@ void TraceCollector::CopyObject(const BaseObject& fromObj, BaseObject& toObj, si #endif } -void TraceCollector::RunGarbageCollection(uint64_t gcIndex, GCReason reason) +void MarkingCollector::ReclaimGarbageMemory(GCReason reason) +{ + if (reason == GC_REASON_OOM) { + Heap::GetHeap().GetAllocator().ReclaimGarbageMemory(true); + } else { + Heap::GetHeap().GetAllocator().ReclaimGarbageMemory(false); + } +} + +void MarkingCollector::RunGarbageCollection(uint64_t gcIndex, GCReason reason, GCType gcType) { gcReason_ = reason; + gcType_ = gcType; auto gcReasonName = std::string(g_gcRequests[gcReason_].name); auto currentAllocatedSize = Heap::GetHeap().GetAllocatedSize(); auto currentThreshold = Heap::GetHeap().GetCollector().GetGCStats().GetThreshold(); - VLOG(INFO, "Begin GC log. GCReason: %s, Current allocated %s, Current threshold %s, gcIndex=%llu", - gcReasonName.c_str(), Pretty(currentAllocatedSize).c_str(), Pretty(currentThreshold).c_str(), gcIndex); + VLOG(INFO, "Begin GC log. GCReason: %s, GCType: %s, Current allocated %s, Current threshold %s, gcIndex=%llu", + gcReasonName.c_str(), GCTypeToString(gcType), Pretty(currentAllocatedSize).c_str(), + Pretty(currentThreshold).c_str(), gcIndex); OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::RunGarbageCollection", ( - "GCReason:" + gcReasonName + ";Sensitive:0;IsInBackground:0;Startup:0" + + "GCReason:" + gcReasonName + ";GCType:" + GCTypeToString(gcType) + + ";Sensitive:" + std::to_string(static_cast(Heap::GetHeap().GetSensitiveStatus())) + + ";Startup:" + std::to_string(static_cast(Heap::GetHeap().GetStartupStatus())) + ";Current Allocated:" + Pretty(currentAllocatedSize) + ";Current Threshold:" + Pretty(currentThreshold) + ";Current Native:" + Pretty(Heap::GetHeap().GetNotifiedNativeSize()) + @@ -677,17 +698,12 @@ void TraceCollector::RunGarbageCollection(uint64_t gcIndex, GCReason reason) PreGarbageCollection(true); Heap::GetHeap().SetGCReason(reason); GCStats& gcStats = GetGCStats(); - gcStats.collectedBytes = 0; - gcStats.gcStartTime = TimeUtil::NanoSeconds(); DoGarbageCollection(); HeapBitmapManager::GetHeapBitmapManager().ClearHeapBitmap(); - reinterpret_cast(theAllocator_).DumpAllRegionStats("region statistics when gc ends"); - if (reason == GC_REASON_OOM) { - Heap::GetHeap().GetAllocator().ReclaimGarbageMemory(true); - } + ReclaimGarbageMemory(reason); PostGarbageCollection(gcIndex); MutatorManager::Instance().DestroyExpiredMutators(); @@ -718,25 +734,28 @@ void TraceCollector::RunGarbageCollection(uint64_t gcIndex, GCReason reason) } UpdateGCStats(); + + if (Heap::GetHeap().GetForceThrowOOM()) { + Heap::throwOOM(); + } } -void TraceCollector::CopyFromSpace() +void MarkingCollector::CopyFromSpace() { OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::CopyFromSpace", ""); TransitionToGCPhase(GCPhase::GC_PHASE_COPY, true); - ProcessStringTable(); - RegionSpace& space = reinterpret_cast(theAllocator_); + RegionalHeap& space = reinterpret_cast(theAllocator_); GCStats& stats = GetGCStats(); stats.liveBytesBeforeGC = space.GetAllocatedBytes(); stats.fromSpaceSize = space.FromSpaceSize(); space.CopyFromSpace(GetThreadPool()); - stats.smallGarbageSize = space.FromSpaceSize() - space.ToSpaceSize(); + stats.smallGarbageSize = space.FromRegionSize() - space.ToSpaceSize(); } -void TraceCollector::ExemptFromSpace() +void MarkingCollector::ExemptFromSpace() { - RegionSpace& space = reinterpret_cast(theAllocator_); + RegionalHeap& space = reinterpret_cast(theAllocator_); space.ExemptFromSpace(); } diff --git a/common_components/heap/collector/trace_collector.h b/common_components/heap/collector/marking_collector.h similarity index 59% rename from common_components/heap/collector/trace_collector.h rename to common_components/heap/collector/marking_collector.h index 4ea4705cb76dce60a5fa5a328576cf83822c0da0..dc2fef0fd8a3fe5466e315d88e403c693cb36d60 100755 --- a/common_components/heap/collector/trace_collector.h +++ b/common_components/heap/collector/marking_collector.h @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef COMMON_COMPONENTS_HEAP_COLLECTOR_TRACE_COLLECTOR_H -#define COMMON_COMPONENTS_HEAP_COLLECTOR_TRACE_COLLECTOR_H +#ifndef COMMON_COMPONENTS_HEAP_COLLECTOR_MARKING_COLLECTOR_H +#define COMMON_COMPONENTS_HEAP_COLLECTOR_MARKING_COLLECTOR_H #include #include @@ -22,7 +22,8 @@ #include "common_components/heap/collector/collector.h" #include "common_components/heap/collector/collector_resources.h" #include "common_components/common/mark_work_stack.h" -#include "common_components/heap/allocator/region_space.h" +#include "common_components/common/work_stack-inl.h" +#include "common_components/heap/allocator/regional_heap.h" #include "common_components/heap/collector/copy_data_manager.h" #include "common_components/mutator/mutator_manager.h" @@ -31,7 +32,8 @@ namespace common { template using CArrayList = std::vector; -class GlobalWorkStackQueue; +template +class GlobalStackQueue; // number of nanoseconds in a microsecond. constexpr uint64_t NS_PER_US = 1000; @@ -101,31 +103,48 @@ private: size_t totalRootsCount_; }; +class ParallelMarkingMonitor : public TaskPackMonitor { +public: + explicit ParallelMarkingMonitor(int posted, int capacity) : TaskPackMonitor(posted, capacity) {} + ~ParallelMarkingMonitor() override = default; + + void operator()() + { + WakeUpRunnerApproximately(); + } +}; + class MarkingWork; template class ConcurrentMarkingWork; - -class TraceCollector : public Collector { +using RootSet = MarkStack; +constexpr size_t LOCAL_MARK_STACK_CAPACITY = 128; +using GlobalMarkStack = StackList; +using ParallelLocalMarkStack = LocalStack; +using SequentialLocalMarkStack = LocalStack; +using LocalCollectStack = LocalStack; +using WorkStackBuf = MarkStackBuffer; +using WeakStack = MarkStack*, size_t>>>; +using WeakStackBuf = MarkStackBuffer*, size_t>>>; +using GlobalWeakStackQueue = GlobalStackQueue; + +class MarkingCollector : public Collector { friend MarkingWork; template friend class ConcurrentMarkingWork; public: - explicit TraceCollector(Allocator& allocator, CollectorResources& resources) + explicit MarkingCollector(Allocator& allocator, CollectorResources& resources) : Collector(), theAllocator_(allocator), collectorResources_(resources) {} - ~TraceCollector() override = default; + ~MarkingCollector() override = default; virtual void PreGarbageCollection(bool isConcurrent); virtual void PostGarbageCollection(uint64_t gcIndex); // Types, so that we don't confuse root sets and working stack. // The policy is: we simply `push_back` into root set, // but we use Enqueue to add into work stack. - using RootSet = MarkStack; - using WorkStack = MarkStack; - using WorkStackBuf = MarkStackBuffer; - using WeakStack = MarkStack*>; void Init(const RuntimeParam& param) override; void Fini() override; @@ -139,7 +158,8 @@ public: if (MutatorManager::Instance().WorldStopped()) { DumpHeap("before_gc"); } else { - ScopedStopTheWorld stw("dump-heap-before-gc"); + STWParam stwParam{"dump-heap-before-gc"}; + ScopedStopTheWorld stw(stwParam); DumpHeap("before_gc"); } } @@ -151,7 +171,8 @@ public: if (MutatorManager::Instance().WorldStopped()) { DumpHeap("after_gc"); } else { - ScopedStopTheWorld stw("dump-heap-after-gc"); + STWParam stwParam{"dump-heap-after-gc"}; + ScopedStopTheWorld stw(stwParam); DumpHeap("after_gc"); } } @@ -161,36 +182,29 @@ public: bool ShouldIgnoreRequest(GCRequest& request) override { return request.ShouldBeIgnored(); } template - void ProcessMarkStack(uint32_t threadIndex, Taskpool *threadPool, WorkStack &workStack, - GlobalWorkStackQueue &globalQueue); - - void TryForkTask(Taskpool *threadPool, WorkStack &workStack, GlobalWorkStackQueue &globalQueue); + void ProcessMarkStack(uint32_t threadIndex, ParallelLocalMarkStack &workStack); + void ProcessWeakStack(WeakStack &weakStack); // live but not resurrected object. - bool IsMarkedObject(const BaseObject* obj) const { return RegionSpace::IsMarkedObject(obj); } + bool IsMarkedObject(const BaseObject* obj) const { return RegionalHeap::IsMarkedObject(obj); } // live or resurrected object. inline bool IsSurvivedObject(const BaseObject* obj) const { - return RegionSpace::IsMarkedObject(obj) || RegionSpace::IsResurrectedObject(obj); + return RegionalHeap::IsMarkedObject(obj) || RegionalHeap::IsResurrectedObject(obj); } inline bool IsToObject(const BaseObject* obj) const { - return RegionDesc::GetRegionDescAt(reinterpret_cast(obj))->IsToRegion(); - } - - inline bool IsToVersion(const BaseObject* obj) const - { - return obj->IsToVersion(); + return RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj))->IsToRegion(); } - virtual bool MarkObject(BaseObject* obj, size_t cellCount = 0) const = 0; + virtual bool MarkObject(BaseObject* obj) const = 0; - // avoid std::function allocation for each object trace - class TraceRefFieldVisitor { + // avoid std::function allocation for each object marking + class MarkingRefFieldVisitor { public: - TraceRefFieldVisitor() : closure_(std::make_shared(nullptr)) {} + MarkingRefFieldVisitor() : closure_(std::make_shared(nullptr)) {} template void SetVisitor(Functor &&f) @@ -198,24 +212,27 @@ public: visitor_ = std::forward(f); } const auto &GetRefFieldVisitor() const { return visitor_; } - void SetTraceRefFieldArgs(BaseObject *obj) { *closure_ = obj; } + void SetMarkingRefFieldArgs(BaseObject *obj) { *closure_ = obj; } const auto &GetClosure() const { return closure_; } private: common::RefFieldVisitor visitor_; std::shared_ptr closure_; }; - virtual TraceRefFieldVisitor CreateTraceObjectRefFieldsVisitor(WorkStack *workStack, WeakStack *weakStack) = 0; - virtual void TraceObjectRefFields(BaseObject *obj, TraceRefFieldVisitor *data) = 0; -#ifdef PANDA_JS_ETS_HYBRID_MODE - virtual void TraceObjectXRef(BaseObject* obj, WorkStack& workStack) - { - LOG_COMMON(FATAL) << "Unresolved fatal"; - UNREACHABLE_CC(); - } -#endif + virtual MarkingRefFieldVisitor CreateMarkingObjectRefFieldsVisitor(ParallelLocalMarkStack &workStack, + WeakStack &weakStack) = 0; + virtual void MarkingObjectRefFields(BaseObject *obj, MarkingRefFieldVisitor *data) = 0; - inline bool IsResurrectedObject(const BaseObject* obj) const { return RegionSpace::IsResurrectedObject(obj); } + // UDAV: HAndle it +//#ifdef PANDA_JS_ETS_HYBRID_MODE +// virtual void TraceObjectXRef(BaseObject* obj, WorkStack& workStack) +// { +// LOG_COMMON(FATAL) << "Unresolved fatal"; +// UNREACHABLE_CC(); +// } +//#endif + + inline bool IsResurrectedObject(const BaseObject* obj) const { return RegionalHeap::IsResurrectedObject(obj); } Allocator& GetAllocator() const { return theAllocator_; } @@ -225,7 +242,15 @@ public: void SetGcStarted(bool val) { collectorResources_.SetGcStarted(val); } - void RunGarbageCollection(uint64_t, GCReason) override; + void MarkGCStart() { collectorResources_.MarkGCStart(); } + void MarkGCFinish(uint64_t gcIndex) + { + collectorResources_.MarkGCFinish(gcIndex); + } + + void RunGarbageCollection(uint64_t, GCReason, GCType) override; + + void ReclaimGarbageMemory(GCReason reason); void TransitionToGCPhase(const GCPhase phase, const bool) { @@ -248,7 +273,10 @@ protected: virtual void DoGarbageCollection() = 0; - void RequestGCInternal(GCReason reason, bool async) override { collectorResources_.RequestGC(reason, async); } + void RequestGCInternal(GCReason reason, bool async, GCType gcType) override + { + collectorResources_.RequestGC(reason, async, gcType); + } void MergeWeakStack(WeakStack& weakStack); void UpdateNativeThreshold(GCParam& gcParam); @@ -268,6 +296,8 @@ protected: // reason for current GC. GCReason gcReason_ = GC_REASON_USER; + GCType gcType_ = GC_TYPE_FULL; + // indicate whether to fix references (including global roots and reference fields). // this member field is useful for optimizing concurrent copying gc. bool fixReferences_ = false; @@ -276,7 +306,7 @@ protected: void ResetBitmap(bool heapMarked) { - // if heap is marked and tracing result will be used during next gc, we should not reset liveInfo. + // if heap is marked and tracing result will be used during next gc, we should not reset liveInfo_. } uint32_t GetGCThreadCount(const bool isConcurrent) const @@ -284,47 +314,96 @@ protected: return collectorResources_.GetGCThreadCount(isConcurrent); } - inline WorkStack NewWorkStack() const - { - WorkStack workStack = WorkStack(); - return workStack; - } - inline void SetGCReason(const GCReason reason) { gcReason_ = reason; } Taskpool *GetThreadPool() const { return collectorResources_.GetThreadPool(); } // let finalizerProcessor process finalizers, and mark resurrected if in stw gc - virtual void ProcessWeakReferences() {} + void ClearWeakStack(bool parallel); virtual void ProcessStringTable() {} virtual void ProcessFinalizers() {} - virtual void RemarkAndPreforwardStaticRoots(WorkStack& workStack) + virtual void RemarkAndPreforwardStaticRoots(GlobalMarkStack &globalMarkStack) { LOG_COMMON(FATAL) << "Unresolved fatal"; UNREACHABLE_CC(); } - void MergeAllocBufferRoots(WorkStack& workStack); - - bool PushRootToWorkStack(RootSet *workStack, BaseObject *obj); - void PushRootsToWorkStack(RootSet *workStack, const CArrayList &collectedRoots); - void TraceRoots(const CArrayList &collectedRoots); + bool PushRootToWorkStack(LocalCollectStack &markStack, BaseObject *obj); + void PushRootsToWorkStack(LocalCollectStack &markStack, const CArrayList &collectedRoots); + void MarkingRoots(const CArrayList &collectedRoots); void Remark(); - bool MarkSatbBuffer(WorkStack& workStack); + bool MarkSatbBuffer(GlobalMarkStack &globalMarkStack); // concurrent marking. - void TracingImpl(WorkStack& workStack, bool parallel); + void TracingImpl(GlobalMarkStack &globalMarkStack, bool parallel, bool Remark); - bool AddConcurrentTracingWork(WorkStack& workStack, GlobalWorkStackQueue &globalQueue, size_t threadCount); + bool AddWeakStackClearWork(WeakStack& workStack, GlobalWeakStackQueue &globalQueue, size_t threadCount); private: - void MarkRememberSetImpl(BaseObject* object, WorkStack& workStack); - void ConcurrentRemark(WorkStack& remarkStack, bool parallel); + void MarkRememberSetImpl(BaseObject* object, LocalCollectStack &markStack); + void ConcurrentRemark(GlobalMarkStack &globalMarkStack, bool parallel); void MarkAwaitingJitFort(); void EnumMutatorRoot(ObjectPtr& obj, RootSet& rootSet) const; void EnumConcurrencyModelRoots(RootSet& rootSet) const; }; + + +template +class GlobalStackQueue { +public: + GlobalStackQueue() = default; + ~GlobalStackQueue() = default; + + void AddWorkStack(StackType &&stack) + { + DCHECK_CC(!stack.empty()); + std::lock_guard guard(mtx_); + stacks_.push_back(std::move(stack)); + cv_.notify_one(); + } + + StackType PopWorkStack() + { + std::unique_lock lock(mtx_); + while (true) { + if (!stacks_.empty()) { + StackType stack(std::move(stacks_.back())); + stacks_.pop_back(); + return stack; + } + if (finished_) { + return StackType(); + } + cv_.wait(lock); + } + } + + StackType DrainAllWorkStack() + { + std::unique_lock lock(mtx_); + while (!stacks_.empty()) { + StackType stack(std::move(stacks_.back())); + stacks_.pop_back(); + return stack; + } + return StackType(); + } + + void NotifyFinish() + { + std::lock_guard guard(mtx_); + DCHECK_CC(!finished_); + finished_ = true; + cv_.notify_all(); + } +private: + bool finished_ {false}; + std::condition_variable cv_; + std::mutex mtx_; + std::vector stacks_; +}; + } // namespace common -#endif // COMMON_COMPONENTS_HEAP_COLLECTOR_TRACE_COLLECTOR_H +#endif // COMMON_COMPONENTS_HEAP_COLLECTOR_MARKING_COLLECTOR_H diff --git a/common_components/heap/collector/region_bitmap.h b/common_components/heap/collector/region_bitmap.h index 9a97d1073e4828965f4a316fd71da08285a892e7..0aeac4f6f4ed0a7e8a72ae17ac356b9ad4b2931b 100755 --- a/common_components/heap/collector/region_bitmap.h +++ b/common_components/heap/collector/region_bitmap.h @@ -28,7 +28,7 @@ namespace common { static constexpr size_t kBitsPerByte = 8; static constexpr size_t kMarkedBytesPerBit = 8; static constexpr size_t kBitsPerWord = sizeof(uint64_t) * kBitsPerByte; -class RegionDesc; +static constexpr size_t kBytesPerWord = sizeof(uint64_t) / sizeof(uint8_t); struct RegionBitmap { static constexpr uint8_t factor = 16; std::atomic partLiveBytes[factor]; @@ -41,6 +41,7 @@ struct RegionBitmap { static size_t GetRegionBitmapSize(size_t regionSize) { + CHECK_CC(regionSize % (kMarkedBytesPerBit * kBitsPerWord) == 0); return sizeof(RegionBitmap) + ((regionSize / (kMarkedBytesPerBit * kBitsPerWord)) * sizeof(uint64_t)); } @@ -71,13 +72,6 @@ struct RegionBitmap { return ret; } }; -struct RegionLiveDesc { - static constexpr HeapAddress TEMPORARY_PTR = 0x1234; - RegionDesc* relatedRegion = nullptr; - RegionBitmap* markBitmap = nullptr; - RegionBitmap* resurrectBitmap = nullptr; - RegionBitmap* enqueueBitmap = nullptr; -}; } // namespace common #endif // COMMON_COMPONENTS_HEAP_COLLECTOR_REGION_BITMAP_H diff --git a/common_components/heap/collector/region_rset.h b/common_components/heap/collector/region_rset.h index d5a9dce043049022c250efbc242390ff0403e131..89126c0de0b880bcc88df5968ef9bea41c656a37 100644 --- a/common_components/heap/collector/region_rset.h +++ b/common_components/heap/collector/region_rset.h @@ -12,99 +12,95 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + #ifndef COMMON_COMPONENTS_HEAP_COLLECTOR_REGION_RSET_H #define COMMON_COMPONENTS_HEAP_COLLECTOR_REGION_RSET_H + #include #include namespace common { -static constexpr size_t CARD_SIZE = 512; class RegionRSet { public: - explicit RegionRSet(size_t regionSize) : cardCnt(regionSize / CARD_SIZE) + static constexpr size_t CARD_SIZE = 512; + using CardElement = uint64_t; + static_assert(std::atomic::is_always_lock_free); + static constexpr size_t CARD_TABLE_DATA_OFFSET = AlignUp(sizeof(size_t), sizeof(CardElement)); + + static RegionRSet *CreateRegionRSet(size_t regionSize) { -#ifdef _WIN64 - void* startAddress = VirtualAlloc(NULL, cardCnt * sizeof(uint64_t), MEM_RESERVE, PAGE_READWRITE); - if (startAddress == NULL) { - LOG_COMMON(FATAL) << "failed to initialize HeapBitmapManager"; - UNREACHABLE_CC(); - } -#else - void* startAddress = mmap(nullptr, cardCnt * sizeof(uint64_t), - PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (startAddress == MAP_FAILED) { - LOG_COMMON(FATAL) << "failed to initialize HeapBitmapManager: " << errno; - UNREACHABLE_CC(); - } else { -#ifndef __APPLE__ - (void)madvise(startAddress, cardCnt * sizeof(uint64_t), MADV_NOHUGEPAGE); - COMMON_PRCTL(startAddress, cardCnt * sizeof(uint64_t), "forward_data"); -#endif + CHECK_CC(regionSize % CARD_SIZE == 0); + size_t cardCnt = regionSize / CARD_SIZE; + size_t cardSize = cardCnt * sizeof(CardElement); + void *ptr = malloc(CARD_TABLE_DATA_OFFSET + cardSize); + if (ptr == nullptr) { + LOG_COMMON(FATAL) << "malloc failed, regionSize=" << regionSize << ", cardSize=" << cardSize + << ", errnor=" << errno; + UNREACHABLE(); } -#endif - cardTable = reinterpret_cast*>(startAddress); + RegionRSet *rset = new (ptr) RegionRSet(cardCnt); + rset->ClearCardTable(); + return rset; } - ~RegionRSet() + static void DestroyRegionRSet(RegionRSet *rset) { -#ifdef _WIN64 - if (!VirtualFree(reinterpret_cast(cardTable), 0, MEM_RELEASE)) { - LOG_COMMON(ERROR) << "VirtualFree error for HeapBitmapManager"; - } -#else - if (munmap(reinterpret_cast(cardTable), cardCnt * sizeof(uint64_t)) != 0) { - LOG_COMMON(ERROR) << "munmap error for HeapBitmapManager: " << errno; - } -#endif + free(rset); } bool MarkCardTable(size_t offset) { size_t cardIdx = (offset / kMarkedBytesPerBit) / kBitsPerWord; size_t headMaskBitStart = (offset / kMarkedBytesPerBit) % kBitsPerWord; - uint64_t headMaskBits = static_cast(1) << headMaskBitStart; - uint64_t card = cardTable[cardIdx].load(); - bool isMarked = ((card & headMaskBits) != 0); + CardElement headMaskBits = static_cast(1ULL << headMaskBitStart); + std::atomic *card = reinterpret_cast *>(&GetCardTable()[cardIdx]); + bool isMarked = ((card->load(std::memory_order_relaxed) & headMaskBits) != 0); if (!isMarked) { - card = cardTable[cardIdx].fetch_or(headMaskBits); - isMarked = ((card & headMaskBits) != 0); + CardElement prev = card->fetch_or(headMaskBits, std::memory_order_relaxed); + isMarked = ((prev & headMaskBits) != 0); return isMarked; } - return isMarked; - } - - bool IsMarkedCard(size_t offset) - { - size_t cardIdx = (offset / kMarkedBytesPerBit) / kBitsPerWord; - size_t headMaskBitStart = (offset / kMarkedBytesPerBit) % kBitsPerWord; - uint64_t headMaskBits = static_cast(1) << headMaskBitStart; - return (cardTable[cardIdx].load() & headMaskBits) != 0; + return true; } void ClearCardTable() { - LOGF_CHECK(memset_s(cardTable, cardCnt * sizeof(uint64_t), 0, cardCnt * sizeof(uint64_t)) == EOK) - << "memset_s fail"; + LOGF_CHECK(memset_s(GetCardTable(), cardCnt_ * sizeof(CardElement), 0, cardCnt_ * sizeof(CardElement)) == EOK) + << "memset_s fail, cardCnt=" << cardCnt_; } - void VisitAllMarkedCard(const std::function& func, HeapAddress regionStart) + void VisitAllMarkedCardBefore(const std::function& func, + HeapAddress regionStart, HeapAddress end) { - for (size_t i = 0; i < cardCnt.load(); i++) { - uint64_t card = cardTable[i].load(); - for (size_t j = 0; j < kBitsPerWord; j++) { - uint64_t mask = static_cast(1) << j; - if ((card & mask) == 0) { - continue; + for (size_t i = 0; i < cardCnt_; i++) { + CardElement card = GetCardTable()[i]; + size_t index = kBitsPerWord; + while (card != 0) { + index = static_cast(__builtin_ctzll(card)); + ASSERT(index < kBitsPerWord); + HeapAddress offset = static_cast((i * kBitsPerWord) * kBitsPerByte + index * kBitsPerByte); + HeapAddress obj = regionStart + offset; + if (obj >= end) { + return; } - BaseObject* obj = reinterpret_cast(regionStart + - static_cast((i * kBitsPerWord) * kBitsPerByte + j * kBitsPerByte)); - func(obj); + func(reinterpret_cast(obj)); + card &= ~(static_cast(1ULL << index)); } } } private: - std::atomic cardCnt; - std::atomic* cardTable; + explicit RegionRSet(size_t cardCnt) : cardCnt_(cardCnt) {} + ~RegionRSet() = default; + + CardElement *GetCardTable() const + { + return reinterpret_cast(reinterpret_cast(this) + CARD_TABLE_DATA_OFFSET); + } + + size_t cardCnt_ {0}; }; + +static_assert(RegionRSet::CARD_TABLE_DATA_OFFSET == AlignUp(sizeof(RegionRSet), + sizeof(RegionRSet::CardElement))); } #endif // COMMON_COMPONENTS_HEAP_COLLECTOR_REGION_RSET_H diff --git a/common_components/heap/collector/task_queue.cpp b/common_components/heap/collector/task_queue.cpp index 83f60c613db7014606e9757eeb2c59289a765b67..dd49c291a168d023210e1d6b7ed02e56f0976b5e 100755 --- a/common_components/heap/collector/task_queue.cpp +++ b/common_components/heap/collector/task_queue.cpp @@ -29,22 +29,22 @@ bool GCRunner::Execute(void* owner) } case GCTask::GCTaskType::GC_TASK_INVOKE_GC: { GCStats::SetPrevGCStartTime(TimeUtil::NanoSeconds()); - collectorProxy->RunGarbageCollection(taskIndex_, gcReason_); + collectorProxy->RunGarbageCollection(taskIndex_, gcReason_, gcType_); GCStats::SetPrevGCFinishTime(TimeUtil::NanoSeconds()); break; } - case GCTask::GCTaskType::GC_TASK_DUMP_HEAP: { + case GCTask::GCTaskType::GC_TASK_DUMP_HEAP: { //LCOV_EXCL_BR_LINE LOG_COMMON(FATAL) << "Don't know how to dump heap"; UNREACHABLE_CC(); break; } - case GCTask::GCTaskType::GC_TASK_DUMP_HEAP_IDE: { + case GCTask::GCTaskType::GC_TASK_DUMP_HEAP_IDE: { //LCOV_EXCL_BR_LINE LOG_COMMON(FATAL) << "Don't know how to dump heap OOM"; UNREACHABLE_CC(); break; } - case GCTask::GCTaskType::GC_TASK_DUMP_HEAP_OOM: { + case GCTask::GCTaskType::GC_TASK_DUMP_HEAP_OOM: { //LCOV_EXCL_BR_LINE LOG_COMMON(FATAL) << "Don't know how to dump heap OOM"; UNREACHABLE_CC(); break; diff --git a/common_components/heap/collector/task_queue.h b/common_components/heap/collector/task_queue.h index 67c6eb0bcfa72f2adf7cc431601aca98faad81ec..736af0cf3313005b1b550730fe9cd80a680edd26 100755 --- a/common_components/heap/collector/task_queue.h +++ b/common_components/heap/collector/task_queue.h @@ -80,7 +80,7 @@ public: static constexpr uint32_t PRIO_TIMEOUT = 1; static constexpr uint32_t PRIO_INVOKE_GC = 2; - static_assert(PRIO_INVOKE_GC + static_cast(GC_REASON_MAX) <= std::numeric_limits::digits, + static_assert(PRIO_INVOKE_GC + static_cast(GC_REASON_END) < std::numeric_limits::digits, "task queue reached max capacity"); GCRunner() : GCTask(GCTaskType::GC_TASK_INVALID), gcReason_(GC_REASON_INVALID) {} @@ -90,9 +90,11 @@ public: ASSERT_LOGF(type != GCTaskType::GC_TASK_INVOKE_GC, "invalid gc task!"); } - GCRunner(GCTaskType type, GCReason reason) : GCTask(type), gcReason_(reason) + GCRunner(GCTaskType type, GCReason reason, GCType gcType = GC_TYPE_FULL) + : GCTask(type), gcReason_(reason), gcType_(gcType) { - ASSERT_LOGF(gcReason_ < GC_REASON_MAX, "invalid reason"); + ASSERT_LOGF(gcReason_ >= GC_REASON_BEGIN && gcReason_ <= GC_REASON_END, "invalid reason"); + ASSERT_LOGF(gcType_ >= GC_TYPE_BEGIN && gcType_ <= GC_TYPE_END, "invalid gc type"); } GCRunner(const GCRunner& task) = default; @@ -103,9 +105,11 @@ public: { if (prio == PRIO_TERMINATE) { return GCRunner(GCTaskType::GC_TASK_TERMINATE_GC); - } else if (prio - PRIO_INVOKE_GC < GC_REASON_MAX) { - return GCRunner(GCTaskType::GC_TASK_INVOKE_GC, static_cast(prio - PRIO_INVOKE_GC)); - } else { + } else if (prio - PRIO_INVOKE_GC <= GC_REASON_END) { + auto reason = static_cast(prio - PRIO_INVOKE_GC); + auto gcType = reason == GC_REASON_YOUNG ? GC_TYPE_YOUNG : GC_TYPE_FULL; + return GCRunner(GCTaskType::GC_TASK_INVOKE_GC, reason, gcType); + } else { //LCOV_EXCL_BR_LINE LOG_COMMON(FATAL) << "Invalid priority in GetGCRequestByPrio function"; UNREACHABLE_CC(); return GCRunner(); @@ -139,12 +143,17 @@ public: inline void SetGCReason(GCReason reason) { gcReason_ = reason; } + inline GCType GetGCType() const { return gcType_; } + + inline void SetGCType(GCType type) { gcType_ = type; } + bool NeedFilter() const override { return true; } bool Execute(void* owner) override; private: - GCReason gcReason_; + GCReason gcReason_ { GC_REASON_INVALID }; + GCType gcType_ { GC_TYPE_FULL }; }; // Lockless async task queue implementation. diff --git a/common_components/heap/collector/tests/BUILD.gn b/common_components/heap/collector/tests/BUILD.gn index 9c61f31ba0957fc6e383a34fc02e3f39293efd95..7ef6f84db27c334ca61f7438445a4a4f7ad4eef3 100755 --- a/common_components/heap/collector/tests/BUILD.gn +++ b/common_components/heap/collector/tests/BUILD.gn @@ -21,6 +21,29 @@ host_unittest_action("Collector_Test") { sources = [ # test file "collector_resources_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "bounds_checking_function:libsec_shared", + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +host_unittest_action("Finalizer_Processor_Test") { + module_out_path = module_output_path + + sources = [ + # test file "finalizer_processor_test.cpp", ] @@ -31,6 +54,76 @@ host_unittest_action("Collector_Test") { deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + # hiviewdfx libraries + external_deps = [ + "bounds_checking_function:libsec_shared", + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +host_unittest_action("Heuristic_Gc_Policy_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "heuristic_gc_policy_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "runtime_core:libarkassembler_static", + ] +} + +host_unittest_action("Collector_Proxy_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "collector_proxy_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "runtime_core:libarkassembler_static", + ] +} + +host_unittest_action("Marking_Collector_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "marking_collector_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + # hiviewdfx libraries external_deps = [ "icu:shared_icui18n", @@ -91,6 +184,10 @@ group("unittest") { # deps file deps = [ ":Collector_Test", + ":Finalizer_Processor_Test", + ":Heuristic_Gc_Policy_Test", + ":Collector_Proxy_Test", + ":Marking_Collector_Test", ":Task_Queue_Test", ":Gc_Request_Test", ] @@ -102,7 +199,12 @@ group("host_unittest") { # deps file deps = [ ":Collector_TestAction", + ":Finalizer_Processor_TestAction", + ":Heuristic_Gc_Policy_TestAction", + ":Collector_Proxy_TestAction", + ":Marking_Collector_TestAction", ":Task_Queue_TestAction", ":Gc_Request_TestAction", ] } + diff --git a/common_components/heap/collector/tests/collector_proxy_test.cpp b/common_components/heap/collector/tests/collector_proxy_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2baca36eb1519c6ea39bd6eb5f2cbff4e4b2fac8 --- /dev/null +++ b/common_components/heap/collector/tests/collector_proxy_test.cpp @@ -0,0 +1,44 @@ +/* +* Copyright (c) 2025 Huawei Device Co., Ltd. +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include "common_components/heap/collector/collector_proxy.h" +#include "common_components/tests/test_helper.h" + +using namespace common; +namespace common::test { +class CollectorProxyTest : public common::test::BaseTestWithScope { +protected: + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() + { + BaseRuntime::GetInstance()->Fini(); + } + + void SetUp() override {} + void TearDown() override {} +}; + +HWTEST_F_L0(CollectorProxyTest, RunGarbageCollection) +{ + CollectorProxy collectorProxy(Heap::GetHeap().GetAllocator(), Heap::GetHeap().GetCollectorResources()); + Heap::GetHeap().SetGCReason(GCReason::GC_REASON_OOM); + collectorProxy.RunGarbageCollection(0, GCReason::GC_REASON_OOM, GCType::GC_TYPE_BEGIN); + ASSERT_TRUE(Heap::GetHeap().GetGCReason() == GCReason::GC_REASON_OOM); +} +} // namespace common::test \ No newline at end of file diff --git a/common_components/heap/collector/tests/collector_resources_test.cpp b/common_components/heap/collector/tests/collector_resources_test.cpp index c05a505ae9ecf5fac640edba6548a8b9c29ba3fc..490c776ecbbf5b1081494940d105238e4acba5bd 100755 --- a/common_components/heap/collector/tests/collector_resources_test.cpp +++ b/common_components/heap/collector/tests/collector_resources_test.cpp @@ -14,6 +14,7 @@ */ #include "common_components/heap/collector/collector_resources.h" +#include "common_components/heap/collector/gc_request.h" #include "common_components/mutator/mutator_manager.h" #include "common_components/tests/test_helper.h" #include "common_interfaces/base_runtime.h" @@ -49,4 +50,26 @@ HWTEST_F_L0(CollectorResourcesTest, RequestHeapDumpTest) { GCTask::GCTaskType::GC_TASK_INVALID); EXPECT_TRUE(Heap::GetHeap().IsGCEnabled()); } + +HWTEST_F_L0(CollectorResourcesTest, RequestGC) { + GCRequest gcRequests = { GC_REASON_BACKUP, "backup", true, false, 0, 0 }; + Heap::GetHeap().EnableGC(false); + EXPECT_TRUE(!Heap::GetHeap().GetCollectorResources().IsGCActive()); + GCReason reason = gcRequests.reason; + Heap::GetHeap().GetCollectorResources().RequestGC(reason, true, common::GC_TYPE_FULL); +} + +HWTEST_F_L0(CollectorResourcesTest, RequestGCAndWaitTest) { + GCRequest gcRequests = { GC_REASON_USER, "user", false, false, 0, 0 }; + GCReason reason = gcRequests.reason; + Heap::GetHeap().EnableGC(true); + EXPECT_TRUE(Heap::GetHeap().GetCollectorResources().IsGCActive()); + Heap::GetHeap().GetCollectorResources().RequestGC(reason, false, common::GC_TYPE_FULL); + EXPECT_TRUE(!gcRequests.IsSyncGC()); +} + +HWTEST_F_L0(CollectorResourcesTest, GetGCThreadCountTest) { + uint32_t res = Heap::GetHeap().GetCollectorResources().GetGCThreadCount(false); + EXPECT_EQ(res, 2u); +} } // namespace common::test \ No newline at end of file diff --git a/common_components/heap/collector/tests/finalizer_processor_test.cpp b/common_components/heap/collector/tests/finalizer_processor_test.cpp index a0d2d10ca97646535f25d5e4b8c75bd0b6489345..6dc55b486b27e7afb29b50fc9c36d05ad49b14ec 100755 --- a/common_components/heap/collector/tests/finalizer_processor_test.cpp +++ b/common_components/heap/collector/tests/finalizer_processor_test.cpp @@ -21,8 +21,12 @@ using namespace common; namespace common::test { +const uint32_t TWO_SECONDS = 2; +const uint32_t HUNDRED_MILLISECONDS = 100; +constexpr uint64_t TAG_BOOLEAN = 0x04ULL; + class FinalizerProcessorTest : public common::test::BaseTestWithScope { - protected: +protected: static void SetUpTestCase() { BaseRuntime::GetInstance()->Init(); @@ -32,12 +36,12 @@ class FinalizerProcessorTest : public common::test::BaseTestWithScope { void SetUp() override { - MutatorManager::Instance().CreateRuntimeMutator(ThreadType::GC_THREAD); + MutatorManager::Instance().CreateRuntimeMutator(ThreadType::ARK_PROCESSOR); } void TearDown() override { - MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::GC_THREAD); + MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::ARK_PROCESSOR); } }; @@ -45,10 +49,10 @@ HWTEST_F_L0(FinalizerProcessorTest, RegisterFinalizer_TEST1) { FinalizerProcessor finalizerProcessor; HeapAddress addr = common::HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); - BaseObject *obj = reinterpret_cast(addr); + BaseObject *obj = reinterpret_cast(addr | TAG_BOOLEAN); new (obj) BaseObject(); // Construct BaseObject finalizerProcessor.RegisterFinalizer(obj); - bool flag = common::RegionSpace::IsMarkedObject(obj); + bool flag = common::RegionalHeap::IsMarkedObject(obj); EXPECT_FALSE(flag); } @@ -56,14 +60,86 @@ HWTEST_F_L0(FinalizerProcessorTest, EnqueueFinalizables_TEST1) { FinalizerProcessor finalizerProcessor; HeapAddress addr = common::HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); - BaseObject *obj = reinterpret_cast(addr); + BaseObject *obj = reinterpret_cast(addr | TAG_BOOLEAN); new (obj) BaseObject(); // Construct BaseObject finalizerProcessor.RegisterFinalizer(obj); std::function finalizable = [](BaseObject* obj) { - return !common::RegionSpace::IsMarkedObject(obj); + return !common::RegionalHeap::IsMarkedObject(obj); }; finalizerProcessor.EnqueueFinalizables(finalizable, 1); bool flag = finalizable(obj); EXPECT_TRUE(flag); } -} // namespace common::test \ No newline at end of file + +HWTEST_F_L0(FinalizerProcessorTest, EnqueueFinalizables_TEST2) +{ + FinalizerProcessor finalizerProcessor; + HeapAddress addr = common::HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject *obj = reinterpret_cast(addr | TAG_BOOLEAN); + new (obj) BaseObject(); + finalizerProcessor.RegisterFinalizer(obj); + RootVisitor visitor = [](ObjectRef&) { + return; + }; + std::function finalizable = [this](BaseObject* obj) { + return common::RegionalHeap::IsMarkedObject(obj); + }; + auto before = finalizerProcessor.VisitFinalizers(visitor); + finalizerProcessor.EnqueueFinalizables(finalizable, 1); + auto after = finalizerProcessor.VisitFinalizers(visitor); + bool flag = finalizable(obj); + EXPECT_FALSE(flag); + EXPECT_EQ(before, after); +} + +HWTEST_F_L0(FinalizerProcessorTest, EnqueueFinalizables_TEST3) +{ + FinalizerProcessor finalizerProcessor; + RootVisitor visitor = [](ObjectRef&) { + return; + }; + std::function finalizable = [this](BaseObject* obj) { + return common::RegionalHeap::IsMarkedObject(obj); + }; + auto num1 = finalizerProcessor.VisitFinalizers(visitor); + finalizerProcessor.EnqueueFinalizables(finalizable, 0); + EXPECT_EQ(num1, 0); + auto num2 = finalizerProcessor.VisitFinalizers(visitor); + finalizerProcessor.EnqueueFinalizables(finalizable, 1); + EXPECT_EQ(num2, 0); + HeapAddress addr = common::HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject *obj = reinterpret_cast(addr | TAG_BOOLEAN); + new (obj) BaseObject(); + finalizerProcessor.RegisterFinalizer(obj); + auto num3 = finalizerProcessor.VisitFinalizers(visitor); + finalizerProcessor.EnqueueFinalizables(finalizable, 0); + bool flag = finalizable(obj); + EXPECT_NE(num3, 0); + EXPECT_FALSE(flag); +} + +HWTEST_F_L0(FinalizerProcessorTest, Run_TEST1) +{ + FinalizerProcessor finalizerProcessor; + HeapAddress addr = common::HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject *obj = reinterpret_cast(addr | TAG_BOOLEAN); + new (obj) BaseObject(); + AllocationBuffer* buffer = new (std::nothrow) AllocationBuffer(); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + buffer->SetPreparedRegion(region); + Heap::GetHeap().GetAllocator().AddHungryBuffer(*buffer); + finalizerProcessor.RegisterFinalizer(obj); + finalizerProcessor.Start(); + std::thread notifier([&]() { + std::this_thread::sleep_for(std::chrono::milliseconds(HUNDRED_MILLISECONDS)); + finalizerProcessor.NotifyToFeedAllocBuffers(); + std::this_thread::sleep_for(std::chrono::milliseconds(HUNDRED_MILLISECONDS)); + finalizerProcessor.NotifyToReclaimGarbage(); + }); + notifier.join(); + std::this_thread::sleep_for(std::chrono::seconds(TWO_SECONDS)); + finalizerProcessor.Stop(); + EXPECT_NE(buffer->GetPreparedRegion(), nullptr); + delete buffer; +} +} // namespace common::test diff --git a/common_components/heap/collector/tests/gc_request_test.cpp b/common_components/heap/collector/tests/gc_request_test.cpp index fb393292d9e1e8eda27f92c0cd2cd07d1a3edc2a..5ceccb73318c16e9ba1dfd7562dea1a3bddf7882 100644 --- a/common_components/heap/collector/tests/gc_request_test.cpp +++ b/common_components/heap/collector/tests/gc_request_test.cpp @@ -21,141 +21,129 @@ using namespace common; -uint64_t fakeCurrentTime = 0; - namespace common { -uint64_t TimeUtil::NanoSeconds() -{ - return fakeCurrentTime; -} -} // namespace panda - class GCRequestTest : public common::test::BaseTestWithScope { -protected: - void SetUp() override - { - fakeCurrentTime = 0; - } - - void SetPrevRequestTime(GCRequest& req, uint64_t time) - { - req.SetPrevRequestTime(time); - } - - void SetMinInterval(GCRequest& req, uint64_t intervalNs) - { - req.SetMinInterval(intervalNs); - } }; -void SetLastGCFinishTime(uint64_t time) -{ - GCStats::SetPrevGCFinishTime(time); +HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Heu_Test1) { + int64_t now = static_cast(TimeUtil::NanoSeconds()); + GCStats::SetPrevGCFinishTime(now); + GCRequest req = { GC_REASON_HEU, "heuristic", false, true, 0, 0 }; + req.SetMinInterval(now + 1000); + + bool result = req.ShouldBeIgnored(); + EXPECT_TRUE(result); } +HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Heu_Test2) { + int64_t now = static_cast(TimeUtil::NanoSeconds()); + GCStats::SetPrevGCFinishTime(1000); + GCRequest req = { GC_REASON_HEU, "heuristic", false, true, 0, 0 }; + req.SetMinInterval(1000); -bool ShouldBeIgnoredWithReason(GCReason reason, uint64_t minIntervalNs, uint64_t prevReqTime, uint64_t now, - uint64_t lastGCFinishTime = 0) -{ - fakeCurrentTime = now; - SetLastGCFinishTime(lastGCFinishTime); + bool result = req.ShouldBeIgnored(); + EXPECT_FALSE(result); +} - GCRequest req = { - reason, - "", // name - false, // isSync - false, // isConcurrent - minIntervalNs, - prevReqTime - }; +HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Young_Test1) { + int64_t now = static_cast(TimeUtil::NanoSeconds()); + GCStats::SetPrevGCFinishTime(now); + GCRequest req = { GC_REASON_YOUNG, "young", false, true, 0, 0 }; + req.SetMinInterval(now + 1000); - return req.ShouldBeIgnored(); + bool result = req.ShouldBeIgnored(); + EXPECT_TRUE(result); } +HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Young_Test2) { + int64_t now = static_cast(TimeUtil::NanoSeconds()); + GCStats::SetPrevGCFinishTime(1000); + GCRequest req = { GC_REASON_YOUNG, "young", false, true, 0, 0 }; + req.SetMinInterval(1000); -HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Heu_ReturnsTrue_IfFrequent) { - bool result = ShouldBeIgnoredWithReason( - GC_REASON_HEU, - 1000, // minIntervalNs - fakeCurrentTime - 500, // prevReqTime < now - minInterval - fakeCurrentTime - ); - EXPECT_TRUE(result); + bool result = req.ShouldBeIgnored(); + EXPECT_FALSE(result); } -HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Native_ReturnsTrue_IfFrequent) { - bool result = ShouldBeIgnoredWithReason( - GC_REASON_NATIVE, - 1000, - fakeCurrentTime - 500, - fakeCurrentTime, - fakeCurrentTime - 500 // lastGCFinishTime - ); +HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Native_Test1) { + int64_t now = static_cast(TimeUtil::NanoSeconds()); + GCStats::SetPrevGCFinishTime(now); + GCRequest req = { GC_REASON_NATIVE, "native", false, true, 0, 0 }; + req.SetMinInterval(now + 1000); + + bool result = req.ShouldBeIgnored(); EXPECT_TRUE(result); } -HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Native_ReturnsFalse_IfNotFrequent) { - bool result = ShouldBeIgnoredWithReason( - GC_REASON_NATIVE, - 1000, - fakeCurrentTime - 1500, - fakeCurrentTime, - fakeCurrentTime - 1500 - ); +HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Native_Test2) { + int64_t now = static_cast(TimeUtil::NanoSeconds()); + GCStats::SetPrevGCFinishTime(1000); + GCRequest req = { GC_REASON_NATIVE, "native", false, true, 0, 0 }; + req.SetMinInterval(1000); + + bool result = req.ShouldBeIgnored(); EXPECT_FALSE(result); } -HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_OOM_ReturnsTrue_IfFrequent) { - bool result = ShouldBeIgnoredWithReason( - GC_REASON_OOM, - 1000, - fakeCurrentTime - 500, - fakeCurrentTime - ); +HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Oom_Test1) { + int64_t now = static_cast(TimeUtil::NanoSeconds()); + GCRequest req = { GC_REASON_OOM, "oom", false, true, 0, 0 }; + req.SetMinInterval(now + 1000); + req.SetPrevRequestTime(now); + + bool result = req.ShouldBeIgnored(); EXPECT_TRUE(result); } -HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_OOM_ReturnsFalse_IfNotFrequent) { - bool result = ShouldBeIgnoredWithReason( - GC_REASON_OOM, - 1000, - fakeCurrentTime - 1500, - fakeCurrentTime - ); +HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Oom_Test2) { + GCRequest req = { GC_REASON_OOM, "oom", false, true, 0, 0 }; + req.SetMinInterval(0); + req.SetPrevRequestTime(1000); + + bool result = req.ShouldBeIgnored(); + EXPECT_FALSE(result); +} + +HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Oom_Test3) { + GCRequest req = { GC_REASON_OOM, "oom", false, true, 0, 0 }; + req.SetMinInterval(1000); + req.SetPrevRequestTime(1000); + + bool result = req.ShouldBeIgnored(); EXPECT_FALSE(result); } -HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Force_ReturnsTrue_IfFrequent) { - bool result = ShouldBeIgnoredWithReason( - GC_REASON_FORCE, - 1000, - fakeCurrentTime - 500, - fakeCurrentTime - ); +HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Force_Test1) { + int64_t now = static_cast(TimeUtil::NanoSeconds()); + GCRequest req = { GC_REASON_FORCE, "force", false, true, 0, 0 }; + req.SetMinInterval(now + 1000); + req.SetPrevRequestTime(now); + + bool result = req.ShouldBeIgnored(); EXPECT_TRUE(result); } -HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Force_ReturnsFalse_IfNotFrequent) { - bool result = ShouldBeIgnoredWithReason( - GC_REASON_FORCE, - 1000, - fakeCurrentTime - 1500, - fakeCurrentTime - ); +HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Force_Test2) { + GCRequest req = { GC_REASON_FORCE, "force", false, true, 0, 0 }; + req.SetMinInterval(0); + req.SetPrevRequestTime(1000); + + bool result = req.ShouldBeIgnored(); EXPECT_FALSE(result); } -HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_User_ReturnsFalse) { - fakeCurrentTime = 1000; +HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_Force_Test3) { + GCRequest req = { GC_REASON_FORCE, "force", false, true, 0, 0 }; + req.SetMinInterval(1000); + req.SetPrevRequestTime(1000); - GCRequest req = { - GC_REASON_USER, - "", - false, - false, - 0, - 0 - }; + bool result = req.ShouldBeIgnored(); + EXPECT_FALSE(result); +} - EXPECT_FALSE(req.ShouldBeIgnored()); -} \ No newline at end of file +HWTEST_F_L0(GCRequestTest, ShouldBeIgnored_User_Test1) { + GCRequest req = { GC_REASON_USER, "user", false, true, 0, 0 }; + bool result = req.ShouldBeIgnored(); + EXPECT_FALSE(result); +} +} // namespace common::test \ No newline at end of file diff --git a/common_components/heap/collector/tests/heuristic_gc_policy_test.cpp b/common_components/heap/collector/tests/heuristic_gc_policy_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a0961fab9e48614532e4b18e9700839d0d729ff5 --- /dev/null +++ b/common_components/heap/collector/tests/heuristic_gc_policy_test.cpp @@ -0,0 +1,290 @@ +/* +* Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/common_runtime/base_runtime_param.h" +#include "common_components/heap/allocator/allocator.h" +#include "common_components/heap/collector/collector_resources.h" +#include "common_components/heap/allocator/regional_heap.h" +#include "common_components/heap/collector/heuristic_gc_policy.h" +#include "common_components/heap/heap.h" +#include "common_components/heap/heap_manager.h" +#include "common_components/tests/test_helper.h" +#include "common_interfaces/heap/heap_allocator.h" + +using namespace common; +namespace common::test { +class HeuristicGCPolicyTest : public common::test::BaseTestWithScope { +protected: + static void SetUpTestCase() + { + RuntimeParam param = BaseRuntimeParam::DefaultRuntimeParam(); + param.gcParam.enableGC = false; + BaseRuntime::GetInstance()->Init(param); + } + + static void TearDownTestCase() + { + BaseRuntime::GetInstance()->Fini(); + } + + void SetUp() override + { + holder_ = ThreadHolder::CreateAndRegisterNewThreadHolder(nullptr); + scope_ = new ThreadHolder::TryBindMutatorScope(holder_); + } + + void TearDown() override + { + if (scope_ != nullptr) { + delete scope_; + scope_ = nullptr; + } + } + + ThreadHolder *holder_ {nullptr}; + ThreadHolder::TryBindMutatorScope *scope_ {nullptr}; +}; + +HWTEST_F_L0(HeuristicGCPolicyTest, ShouldRestrainGCOnStartupOrSensitive_Test1) +{ + HeuristicGCPolicy gcPolicy; + gcPolicy.Init(); + StartupStatusManager::SetStartupStatus(StartupStatus::COLD_STARTUP_FINISH); + gcPolicy.TryHeuristicGC(); + EXPECT_FALSE(gcPolicy.ShouldRestrainGCOnStartupOrSensitive()); +} + +HWTEST_F_L0(HeuristicGCPolicyTest, ShouldRestrainGCOnStartupOrSensitive_Test2) +{ + HeuristicGCPolicy gcPolicy; + gcPolicy.Init(); + StartupStatusManager::SetStartupStatus(StartupStatus::COLD_STARTUP); + EXPECT_TRUE(gcPolicy.ShouldRestrainGCOnStartupOrSensitive()); + + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + auto allocated = theAllocator.GetAllocatedBytes(); + auto param = BaseRuntime::GetInstance()->GetHeapParam(); + auto size = param.heapSize * KB * HeuristicGCPolicy::COLD_STARTUP_PHASE1_GC_THRESHOLD_RATIO; + for (int i = 0; allocated < size; i++) { + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + allocated = theAllocator.GetAllocatedBytes(); + } + + StartupStatusManager::SetStartupStatus(StartupStatus::COLD_STARTUP_FINISH); + EXPECT_FALSE(gcPolicy.ShouldRestrainGCOnStartupOrSensitive()); + + StartupStatusManager::SetStartupStatus(StartupStatus::COLD_STARTUP); + EXPECT_FALSE(gcPolicy.ShouldRestrainGCOnStartupOrSensitive()); + + theAllocator.GetOldSpace().AssembleRecentFull(); + auto& fromSpace = theAllocator.GetFromSpace(); + theAllocator.GetOldSpace().AssembleGarbageCandidates(fromSpace); + fromSpace.GetFromRegionList().ClearList(); + allocated = theAllocator.GetAllocatedBytes(); + EXPECT_EQ(allocated, 0); +} + +HWTEST_F_L0(HeuristicGCPolicyTest, ShouldRestrainGCOnStartupOrSensitive_Test3) +{ + HeuristicGCPolicy gcPolicy; + gcPolicy.Init(); + StartupStatusManager::SetStartupStatus(StartupStatus::COLD_STARTUP_PARTIALLY_FINISH); + EXPECT_TRUE(gcPolicy.ShouldRestrainGCOnStartupOrSensitive()); + + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + auto allocated = theAllocator.GetAllocatedBytes(); + auto param = BaseRuntime::GetInstance()->GetHeapParam(); + auto size = param.heapSize * KB * HeuristicGCPolicy::COLD_STARTUP_PHASE2_GC_THRESHOLD_RATIO; + for (int i = 0; allocated < size; i++) { + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + allocated = theAllocator.GetAllocatedBytes(); + } + + StartupStatusManager::SetStartupStatus(StartupStatus::COLD_STARTUP_FINISH); + EXPECT_FALSE(gcPolicy.ShouldRestrainGCOnStartupOrSensitive()); + + StartupStatusManager::SetStartupStatus(StartupStatus::COLD_STARTUP_PARTIALLY_FINISH); + EXPECT_FALSE(gcPolicy.ShouldRestrainGCOnStartupOrSensitive()); + + theAllocator.GetOldSpace().AssembleRecentFull(); + auto& fromSpace = theAllocator.GetFromSpace(); + theAllocator.GetOldSpace().AssembleGarbageCandidates(fromSpace); + fromSpace.GetFromRegionList().ClearList(); + allocated = theAllocator.GetAllocatedBytes(); + EXPECT_EQ(allocated, 0); +} + +HWTEST_F_L0(HeuristicGCPolicyTest, NotifyNativeAllocation) +{ + HeuristicGCPolicy gcPolicy; + size_t initialNotified = gcPolicy.GetNotifiedNativeSize(); + size_t initialObjects = gcPolicy.GetNativeHeapThreshold(); + + gcPolicy.NotifyNativeAllocation(NATIVE_IMMEDIATE_THRESHOLD / 2); + + EXPECT_EQ(gcPolicy.GetNotifiedNativeSize(), initialNotified + NATIVE_IMMEDIATE_THRESHOLD / 2); + EXPECT_NE(gcPolicy.GetNativeHeapThreshold(), initialObjects + 1); +} + +HWTEST_F_L0(HeuristicGCPolicyTest, NotifyNativeAllocation_TriggerByBytes) +{ + HeuristicGCPolicy gcPolicy; + size_t initialNotified = gcPolicy.GetNotifiedNativeSize(); + size_t initialObjects = gcPolicy.GetNativeHeapThreshold(); + + gcPolicy.NotifyNativeAllocation(NATIVE_IMMEDIATE_THRESHOLD + 1); + + EXPECT_EQ(gcPolicy.GetNotifiedNativeSize(), initialNotified + NATIVE_IMMEDIATE_THRESHOLD + 1); + EXPECT_NE(gcPolicy.GetNativeHeapThreshold(), initialObjects + 1); +} + +HWTEST_F_L0(HeuristicGCPolicyTest, TryHeuristicGC) +{ + HeuristicGCPolicy gcPolicy; + StartupStatusManager::SetStartupStatus(StartupStatus::COLD_STARTUP_FINISH); + Heap::GetHeap().GetCollector().GetGCStats().heapThreshold = 0; + gcPolicy.TryHeuristicGC(); + Heap::GetHeap().GetCollector().GetGCStats().shouldRequestYoung = true; + gcPolicy.TryHeuristicGC(); + EXPECT_EQ(Heap::GetHeap().GetAllocator().GetAllocatedBytes(), + Heap::GetHeap().GetCollector().GetGCStats().GetThreshold()); +} + +HWTEST_F_L0(HeuristicGCPolicyTest, ChangeGCParams) +{ + HeuristicGCPolicy gcPolicy; + gcPolicy.RecordAliveSizeAfterLastGC(1); + gcPolicy.ChangeGCParams(true); + EXPECT_EQ(Heap::GetHeap().GetAllocator().GetAllocatedBytes(), 0); +} + +HWTEST_F_L0(HeuristicGCPolicyTest, CheckAndTriggerHintGC) +{ + HeuristicGCPolicy gcPolicy; + StartupStatusManager::SetStartupStatus(StartupStatus::COLD_STARTUP); + bool result = gcPolicy.CheckAndTriggerHintGC(MemoryReduceDegree::HIGH); + ASSERT_FALSE(result); + + StartupStatusManager::SetStartupStatus(StartupStatus::COLD_STARTUP_FINISH); + result = gcPolicy.CheckAndTriggerHintGC(MemoryReduceDegree::HIGH); + ASSERT_FALSE(result); + + gcPolicy.RecordAliveSizeAfterLastGC(1); + result = gcPolicy.CheckAndTriggerHintGC(MemoryReduceDegree::HIGH); + ASSERT_FALSE(result); + + result = gcPolicy.CheckAndTriggerHintGC(MemoryReduceDegree::LOW); + ASSERT_FALSE(result); +} + +HWTEST_F_L0(HeuristicGCPolicyTest, NotifyNativeAllocation_TriggerByBytes1) +{ + HeuristicGCPolicy gcPolicy; + size_t initialNotified = gcPolicy.GetNotifiedNativeSize(); + size_t initialObjects = gcPolicy.GetNativeHeapThreshold(); + + gcPolicy.SetNativeHeapThreshold(1); + gcPolicy.NotifyNativeAllocation(NATIVE_IMMEDIATE_THRESHOLD + 1); + + EXPECT_EQ(gcPolicy.GetNotifiedNativeSize(), initialNotified + NATIVE_IMMEDIATE_THRESHOLD + 1); + EXPECT_NE(gcPolicy.GetNativeHeapThreshold(), initialObjects + 1); +} + +HWTEST_F_L0(HeuristicGCPolicyTest, ShouldRestrainGCOnStartup) { + Heap& heap = Heap::GetHeap(); + HeuristicGCPolicy& heuristicGCPolicy = heap.GetHeuristicGCPolicy(); + heuristicGCPolicy.TryHeuristicGC(); + + StartupStatusManager::SetStartupStatus(StartupStatus::COLD_STARTUP); + bool result = heuristicGCPolicy.ShouldRestrainGCOnStartupOrSensitive(); + EXPECT_EQ(result, true); // cold Startup + + StartupStatusManager::SetStartupStatus(StartupStatus::COLD_STARTUP_PARTIALLY_FINISH); + result = heuristicGCPolicy.ShouldRestrainGCOnStartupOrSensitive(); + EXPECT_EQ(result, true); // cold partially Startup + + StartupStatusManager::SetStartupStatus(StartupStatus::COLD_STARTUP_FINISH); + result = heuristicGCPolicy.ShouldRestrainGCOnStartupOrSensitive(); + EXPECT_EQ(result, false); // cold partially Startup +} + +HWTEST_F_L0(HeuristicGCPolicyTest, ShouldRestrainGCInSensitive) { + Heap& heap = Heap::GetHeap(); + HeuristicGCPolicy& heuristicGCPolicy = heap.GetHeuristicGCPolicy(); + heuristicGCPolicy.TryIdleGC(); + bool result = heuristicGCPolicy.ShouldRestrainGCInSensitive(1 * MB); + EXPECT_EQ(result, false); // normal scene + + heap.NotifyHighSensitive(true); + GCStats& gcStates = heap.GetCollectorResources().GetGCStats(); + gcStates.shouldRequestYoung = true; + result = heuristicGCPolicy.ShouldRestrainGCInSensitive(1 * MB); + EXPECT_EQ(result, false); // sensitive scene + + gcStates.shouldRequestYoung = false; + result = heuristicGCPolicy.ShouldRestrainGCInSensitive(1 * MB); + EXPECT_EQ(result, true); // sensitive scene for young gc + + result = heuristicGCPolicy.ShouldRestrainGCInSensitive(200 * MB); + EXPECT_EQ(result, false); // sensitive scene for size over threshold + + heap.NotifyHighSensitive(false); + EXPECT_EQ(result, false); // exit sensitive +} + +HWTEST_F_L0(HeuristicGCPolicyTest, CheckAndTriggerHintGCLow) { + Heap& heap = Heap::GetHeap(); + HeuristicGCPolicy& heuristicGCPolicy = heap.GetHeuristicGCPolicy(); + bool result = heuristicGCPolicy.CheckAndTriggerHintGC(MemoryReduceDegree::LOW); + EXPECT_EQ(result, false); + + heap.NotifyHighSensitive(false); + result = heuristicGCPolicy.CheckAndTriggerHintGC(MemoryReduceDegree::LOW); + EXPECT_EQ(result, false); + + auto obj = common::HeapAllocator::AllocateLargeRegion(7 * MB); + RegionDesc *regionInfo = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); + HeapAddress start = regionInfo->GetRegionAllocPtr(); + HeapAddress end = regionInfo->GetRegionEnd(); + constexpr uint64_t TAG_SPECIAL = 0x02ULL; + for (HeapAddress current = start; current < end; current += sizeof(HeapAddress)) { + *(reinterpret_cast(current)) = TAG_SPECIAL; + } + + heap.RecordAliveSizeAfterLastGC(1); + result = heuristicGCPolicy.CheckAndTriggerHintGC(MemoryReduceDegree::LOW); + EXPECT_EQ(result, true); +} + +HWTEST_F_L0(HeuristicGCPolicyTest, CheckAndTriggerHintGCHigh) { + Heap& heap = Heap::GetHeap(); + HeuristicGCPolicy& heuristicGCPolicy = heap.GetHeuristicGCPolicy(); + + auto obj = common::HeapAllocator::AllocateLargeRegion(2 * MB); + RegionDesc *regionInfo = RegionDesc::GetAliveRegionDescAt(reinterpret_cast(obj)); + HeapAddress start = regionInfo->GetRegionAllocPtr(); + HeapAddress end = regionInfo->GetRegionEnd(); + constexpr uint64_t TAG_SPECIAL = 0x02ULL; + for (HeapAddress current = start; current < end; current += sizeof(HeapAddress)) { + *(reinterpret_cast(current)) = TAG_SPECIAL; + } + + heap.RecordAliveSizeAfterLastGC(1); + bool result = heuristicGCPolicy.CheckAndTriggerHintGC(MemoryReduceDegree::HIGH); + EXPECT_EQ(result, true); +} +} // namespace common::test \ No newline at end of file diff --git a/common_components/heap/collector/tests/marking_collector_test.cpp b/common_components/heap/collector/tests/marking_collector_test.cpp new file mode 100755 index 0000000000000000000000000000000000000000..f96ff418e6b8f9789e613a2d95dc8d8f842445fd --- /dev/null +++ b/common_components/heap/collector/tests/marking_collector_test.cpp @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/heap/collector/marking_collector.h" +#include "common_components/heap/heap_manager.h" +#include "common_components/heap/ark_collector/ark_collector.h" +#include "common_components/mutator/mutator_manager.h" +#include "common_components/tests/test_helper.h" +#include + +using namespace common; + +namespace common::test { +class MarkingCollectorTest : public common::test::BaseTestWithScope { +protected: + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() + { + BaseRuntime::GetInstance()->Fini(); + } + void SetUp() override + { + MutatorManager::Instance().CreateRuntimeMutator(ThreadType::ARK_PROCESSOR); + } + + void TearDown() override + { + MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::ARK_PROCESSOR); + } + + StaticRootTable rootTable_; + bool ContainsRoot(StaticRootTable& table, const StaticRootTable::StaticRootArray* array, uint32_t size) + { + bool found = false; + auto visitor = [&found, array, size](RefField<>& root) { + for (uint32_t i = 0; i < size; ++i) { + if (&root == array->content[i]) { + found = true; + return; + } + } + }; + table.VisitRoots(visitor); + return found; + } + class TableMarkingCollctor : public MarkingCollector { + public: + using MarkingCollector::SetGCReason; + using MarkingCollector::MarkingRoots; + using MarkingCollector::PushRootToWorkStack; + using MarkingCollector::UpdateNativeThreshold; + }; +}; + +HWTEST_F_L0(MarkingCollectorTest, RunGarbageCollection) +{ + MarkingCollector& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); + Heap::GetHeap().SetGCReason(GCReason::GC_REASON_YOUNG); + collector.RunGarbageCollection(0, GCReason::GC_REASON_USER, common::GC_TYPE_FULL); + ASSERT_FALSE(Heap::GetHeap().GetCollector().GetGCStats().isYoungGC()); + + Heap::GetHeap().SetGCReason(GCReason::GC_REASON_BACKUP); + collector.RunGarbageCollection(0, GCReason::GC_REASON_OOM, common::GC_TYPE_FULL); + ASSERT_FALSE(Heap::GetHeap().GetCollector().GetGCStats().isYoungGC()); +} + +HWTEST_F_L0(MarkingCollectorTest, RunGarbageCollectionTest2) +{ + MarkingCollector& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); + Heap::GetHeap().SetGCReason(GCReason::GC_REASON_YOUNG); + collector.RunGarbageCollection(0, GCReason::GC_REASON_YOUNG, common::GC_TYPE_FULL); + ASSERT_TRUE(Heap::GetHeap().GetCollector().GetGCStats().isYoungGC()); +} + +HWTEST_F_L0(MarkingCollectorTest, UpdateNativeThresholdTest) +{ + TableMarkingCollctor& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); + GCParam gcParam; + gcParam.minGrowBytes = 1024; + Heap::GetHeap().SetNativeHeapThreshold(512); + auto oldThreshold = Heap::GetHeap().GetNativeHeapThreshold(); + collector.UpdateNativeThreshold(gcParam); + auto newThreshold = Heap::GetHeap().GetNativeHeapThreshold(); + EXPECT_NE(newThreshold, oldThreshold); +} + +HWTEST_F_L0(MarkingCollectorTest, UpdateNativeThresholdTest2) +{ + TableMarkingCollctor& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); + Heap::GetHeap().NotifyNativeAllocation(1100 * MB); + + GCParam param; + collector.UpdateNativeThreshold(param); + ASSERT_TRUE(Heap::GetHeap().GetNotifiedNativeSize() > MAX_NATIVE_SIZE_INC); +} + +HWTEST_F_L0(MarkingCollectorTest, MarkingRootsTest) +{ + TableMarkingCollctor& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); + CArrayList roots; + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + BaseObject* obj = reinterpret_cast(addr); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::OLD_REGION); + Heap::GetHeap().SetGCReason(GC_REASON_YOUNG); + collector.SetGCReason(GC_REASON_YOUNG); + + roots.push_back(obj); + collector.MarkingRoots(roots); + ASSERT_TRUE(region->IsInOldSpace()); +} + +HWTEST_F_L0(MarkingCollectorTest, PushRootToWorkStackTest) +{ + TableMarkingCollctor& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + BaseObject* obj = reinterpret_cast(addr); + GlobalMarkStack globalMarkStack; + LocalCollectStack collectStack(&globalMarkStack); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::RECENT_LARGE_REGION); + collector.SetGCReason(GC_REASON_NATIVE); + region->MarkObject(obj); + bool result = collector.PushRootToWorkStack(collectStack, obj); + ASSERT_FALSE(result); + + region->SetRegionType(RegionDesc::RegionType::RECENT_LARGE_REGION); + collector.SetGCReason(GC_REASON_YOUNG); + result = collector.PushRootToWorkStack(collectStack, obj); + ASSERT_FALSE(result); + + region->SetRegionType(RegionDesc::RegionType::OLD_REGION); + collector.SetGCReason(GC_REASON_NATIVE); + result = collector.PushRootToWorkStack(collectStack, obj); + ASSERT_FALSE(result); +} +} \ No newline at end of file diff --git a/common_components/heap/collector/tests/task_queue_test.cpp b/common_components/heap/collector/tests/task_queue_test.cpp index 7b3692a74182395dc386d0a50993d99d6c31d7b9..f11f30e1a543b554f229e117d522bbdd70ef1c42 100644 --- a/common_components/heap/collector/tests/task_queue_test.cpp +++ b/common_components/heap/collector/tests/task_queue_test.cpp @@ -15,242 +15,100 @@ #include "common_components/heap/collector/task_queue.h" #include "common_components/heap/collector/collector_proxy.h" - +#include "common_components/heap/collector/gc_request.h" #include "common_components/tests/test_helper.h" -using namespace common; - -// ==================== Mock Classes ==================== - -class MockCollectorProxy : public CollectorProxy { -public: - explicit MockCollectorProxy(Allocator& allocator, CollectorResources& resources) - : CollectorProxy(allocator, resources), runCalled_(false), - lastGcIndex_(0), lastReason_(GC_REASON_INVALID) {} - - void RunGarbageCollection(uint64_t gcIndex, GCReason reason) override - { - runCalled_ = true; - lastGcIndex_ = gcIndex; - lastReason_ = reason; - } - - bool WasRunCalled() const - { - return runCalled_; - } - uint64_t GetLastGcIndex() const - { - return lastGcIndex_; - } - GCReason GetLastReason() const - { - return lastReason_; - } - - void Reset() - { - runCalled_ = false; - lastGcIndex_ = 0; - lastReason_ = GC_REASON_INVALID; - } - -private: - mutable bool runCalled_; - uint64_t lastGcIndex_; - GCReason lastReason_; -}; - -constexpr size_t DEFAULT_MAX_CAPACITY_SIZE = 10 * 1024 * 1024; -constexpr size_t DEFAULT_CAPACITY_SIZE = 5 * 1024 * 1024; - -class TestAllocator : public Allocator { +namespace common { +class StubAllocator : public Allocator { public: - HeapAddress Allocate(size_t size, AllocType type) override - { - return 0; - } - HeapAddress AllocateNoGC(size_t size, AllocType type) override - { - return 0; - } - bool ForEachObject(const std::function& callback, bool safe) const override - { - return true; - } - size_t ReclaimGarbageMemory(bool releaseAll) override - { - return 0; - } - void FeedHungryBuffers() override {} - size_t LargeObjectSize() const override - { - return 0; - } - size_t GetAllocatedBytes() const override - { - return 0; - } + HeapAddress Allocate(size_t size, AllocType allocType) override { return 0; } + HeapAddress AllocateNoGC(size_t size, AllocType allocType) override { return 0; } + bool ForEachObject(const std::function&, bool safe) const override { return true; } + size_t ReclaimGarbageMemory(bool releaseAll) override { return 0; } + size_t LargeObjectSize() const override { return 0; } + size_t GetAllocatedBytes() const override { return 0; } void Init(const RuntimeParam& param) override {} - - size_t GetMaxCapacity() const override - { - return DEFAULT_MAX_CAPACITY_SIZE; - } - size_t GetCurrentCapacity() const override - { - return DEFAULT_CAPACITY_SIZE; - } - size_t GetUsedPageSize() const override - { - return 0; - } - HeapAddress GetSpaceStartAddress() const override - { - return 0; - } - HeapAddress GetSpaceEndAddress() const override - { - return 0; - } + size_t GetMaxCapacity() const override { return 0; } + size_t GetCurrentCapacity() const override { return 0; } + size_t GetUsedPageSize() const override { return 0; } + HeapAddress GetSpaceStartAddress() const override { return 0; } + HeapAddress GetSpaceEndAddress() const override { return 0; } #ifndef NDEBUG - bool IsHeapObject(HeapAddress addr) const - { - return false; - } + bool IsHeapObject(HeapAddress) const override { return false; } #endif + void FeedHungryBuffers() override {} + size_t GetSurvivedSize() const override { return 0; } }; -class TestCollectorResources : public CollectorResources { +class StubCollectorProxy : public CollectorProxy { public: - explicit TestCollectorResources(CollectorProxy& proxy) : CollectorResources(proxy) {} - void Init() {} - void Fini() {} -}; + explicit StubCollectorProxy(Allocator& allocator, CollectorResources& resources) + : CollectorProxy(allocator, resources) {} -// ==================== Fix: Use correct GCTask interface ==================== + void RunGarbageCollection(uint64_t gcIndex, GCReason reason, GCType gcType) override {} +}; +} -class MockGCTask : public GCTask { +namespace common { +class DummyCollectorProxy : public CollectorProxy { public: - explicit MockGCTask(GCTaskType type, uint64_t index, GCReason reason) - : GCTask(type), taskIndex_(index), gcReason_(reason) {} - - uint64_t GetGCIndex() const - { - return taskIndex_; - } - GCReason GetGCReason() const - { - return gcReason_; - } - - bool Execute(void* owner) override - { - if (owner == nullptr) { - return false; - } - - auto* proxy = reinterpret_cast(owner); - switch (GetTaskType()) { - case GCTaskType::GC_TASK_TERMINATE_GC: - return false; - case GCTaskType::GC_TASK_TIMEOUT_GC: - // Simulate timeout behavior - return true; - case GCTaskType::GC_TASK_INVOKE_GC: - proxy->RunGarbageCollection(taskIndex_, gcReason_); - return true; - default: - return true; - } - } - -private: - uint64_t taskIndex_; - GCReason gcReason_; + explicit DummyCollectorProxy(Allocator& alloc, CollectorResources& res) + : CollectorProxy(alloc, res) {} + void RunGarbageCollection(uint64_t gcIndex, GCReason reason, GCType gcType) override {} }; -// ==================== Test Fixture ==================== +class DummyCollectorResources : public CollectorResources { +private: + DummyCollectorProxy proxy_; -// class GCRunnerTest : public BaseTestWithScope { +public: + explicit DummyCollectorResources(Allocator& alloc) + : CollectorResources(proxy_), + proxy_(alloc, *this) {} +}; +} namespace common::test { class GCRunnerTest : public common::test::BaseTestWithScope { protected: void SetUp() override { - resources_.reset(new TestCollectorResources(proxy_)); - mockProxy_ = new MockCollectorProxy(allocator_, *resources_); + allocator_ = std::make_unique(); + dummyResources_ = std::make_unique(*allocator_); + proxy_ = std::make_unique(*allocator_, *dummyResources_); + proxyStorage_ = std::make_unique(*allocator_, *dummyResources_); } void TearDown() override { - delete mockProxy_; - mockProxy_ = nullptr; - resources_.reset(); + proxyStorage_.reset(); + dummyResources_.reset(); + proxy_.reset(); + allocator_.reset(); } - TestAllocator allocator_; - std::unique_ptr resources_; - MockCollectorProxy* mockProxy_; - CollectorProxy proxy_{allocator_, *resources_}; + std::unique_ptr allocator_; + std::unique_ptr proxy_; + std::unique_ptr proxyStorage_; + std::unique_ptr dummyResources_; }; -// ==================== Test Cases ==================== - -/** - * @tc.name: GCRunner_Execute_Terminate - * @tc.desc: Test GC_TASK_TERMINATE_GC task type. - * @tc.type: FUNC - */ -HWTEST_F_L0(GCRunnerTest, Execute_Terminate) { - // Arrange - MockGCTask task(GCTask::GCTaskType::GC_TASK_TERMINATE_GC, 0, GC_REASON_INVALID); - - // Act - bool result = task.Execute(mockProxy_); - - // Assert - EXPECT_FALSE(result); // Should return false to terminate thread +HWTEST_F_L0(GCRunnerTest, Execute_TerminateGC) { + common::GCRunner runner(common::GCTask::GCTaskType::GC_TASK_TERMINATE_GC); + bool result = runner.Execute(proxyStorage_.get()); + EXPECT_FALSE(result); } -/** - * @tc.name: GCRunner_Execute_InvokeGC - * @tc.desc: Test GC_TASK_INVOKE_GC triggers RunGarbageCollection. - * @tc.type: FUNC - */ HWTEST_F_L0(GCRunnerTest, Execute_InvokeGC) { - // Arrange - mockProxy_->Reset(); - - MockGCTask task(GCTask::GCTaskType::GC_TASK_INVOKE_GC, 123, GC_REASON_FORCE); - - // Act - bool result = task.Execute(mockProxy_); - - // Assert - EXPECT_TRUE(result); // Thread should continue - EXPECT_TRUE(mockProxy_->WasRunCalled()); - EXPECT_EQ(mockProxy_->GetLastGcIndex(), 123U); - EXPECT_EQ(mockProxy_->GetLastReason(), GC_REASON_FORCE); + common::GCRunner runner(common::GCTask::GCTaskType::GC_TASK_INVOKE_GC, GC_REASON_BACKUP); + bool result = runner.Execute(proxyStorage_.get()); + EXPECT_TRUE(result); } -/** - * @tc.name: GCRunner_Execute_TimeoutGC_NoTrigger - * @tc.desc: Test GC_TASK_TIMEOUT_GC does not trigger GC when time not exceeded. - * @tc.type: FUNC - */ -HWTEST_F_L0(GCRunnerTest, Execute_TimeoutGC_NoTrigger) { - // Arrange - mockProxy_->Reset(); - - MockGCTask task(GCTask::GCTaskType::GC_TASK_TIMEOUT_GC, 0, GC_REASON_BACKUP); - - // Act - bool result = task.Execute(mockProxy_); - - // Assert +HWTEST_F_L0(GCRunnerTest, Execute_InvalidTaskType) { + common::GCRunner runner(static_cast( + static_cast(common::GCTask::GCTaskType::GC_TASK_DUMP_HEAP_IDE) + 1)); + bool result = runner.Execute(proxyStorage_.get()); EXPECT_TRUE(result); - EXPECT_FALSE(mockProxy_->WasRunCalled()); } -} \ No newline at end of file +} // namespace common::test diff --git a/common_components/heap/heap.cpp b/common_components/heap/heap.cpp index 9a3a5092c5a955cd1b93cd79b086c99565b0e29f..29d5aca830584c187ff0b590160da4550ed546fe 100644 --- a/common_components/heap/heap.cpp +++ b/common_components/heap/heap.cpp @@ -15,16 +15,15 @@ #include "common_components/heap/heap.h" +#include "common_components/heap/ark_collector/idle_barrier.h" +#include "common_components/heap/ark_collector/enum_barrier.h" +#include "common_components/heap/ark_collector/marking_barrier.h" +#include "common_components/heap/ark_collector/remark_barrier.h" +#include "common_components/heap/ark_collector/post_marking_barrier.h" +#include "common_components/heap/ark_collector/preforward_barrier.h" +#include "common_components/heap/ark_collector/copy_barrier.h" #include "common_components/heap/collector/collector_proxy.h" #include "common_components/heap/collector/collector_resources.h" -#include "common_components/heap/collector/heuristic_gc_policy.h" -#include "common_components/heap/w_collector/idle_barrier.h" -#include "common_components/heap/w_collector/enum_barrier.h" -#include "common_components/heap/w_collector/trace_barrier.h" -#include "common_components/heap/w_collector/remark_barrier.h" -#include "common_components/heap/w_collector/post_trace_barrier.h" -#include "common_components/heap/w_collector/preforward_barrier.h" -#include "common_components/heap/w_collector/copy_barrier.h" #include "common_components/mutator/mutator_manager.h" #if defined(_WIN64) @@ -36,6 +35,10 @@ #include #endif namespace common { +static_assert(Heap::NORMAL_UNIT_SIZE == RegionDesc::UNIT_SIZE); +static_assert(Heap::NORMAL_UNIT_HEADER_SIZE == RegionDesc::UNIT_HEADER_SIZE); +static_assert(Heap::NORMAL_UNIT_AVAILABLE_SIZE == RegionDesc::UNIT_AVAILABLE_SIZE); + std::atomic* Heap::currentBarrierPtr_ = nullptr; Barrier* Heap::stwBarrierPtr_ = nullptr; HeapAddress Heap::heapStartAddr_ = 0; @@ -46,8 +49,8 @@ public: HeapImpl() : theSpace_(Allocator::CreateAllocator()), collectorResources_(collectorProxy_), collectorProxy_(*theSpace_, collectorResources_), stwBarrier_(collectorProxy_), - idleBarrier_(collectorProxy_), enumBarrier_(collectorProxy_), traceBarrier_(collectorProxy_), - remarkBarrier_(collectorProxy_), postTraceBarrier_(collectorProxy_), preforwardBarrier_(collectorProxy_), + idleBarrier_(collectorProxy_), enumBarrier_(collectorProxy_), markingBarrier_(collectorProxy_), + remarkBarrier_(collectorProxy_), postMarkingBarrier_(collectorProxy_), preforwardBarrier_(collectorProxy_), copyBarrier_(collectorProxy_) { currentBarrier_.store(&stwBarrier_, std::memory_order_relaxed); @@ -64,13 +67,16 @@ public: bool IsSurvivedObject(const BaseObject* obj) const override { - return RegionSpace::IsMarkedObject(obj) || RegionSpace::IsResurrectedObject(obj); + return RegionalHeap::IsMarkedObject(obj) || RegionalHeap::IsResurrectedObject(obj); } bool IsGcStarted() const override { return collectorResources_.IsGcStarted(); } void WaitForGCFinish() override { return collectorResources_.WaitForGCFinish(); } + void MarkGCStart() override { return collectorResources_.MarkGCStart(); } + void MarkGCFinish() override { return collectorResources_.MarkGCFinish(); } + bool IsGCEnabled() const override { return isGCEnabled_.load(); } void EnableGC(bool val) override { return isGCEnabled_.store(val); } @@ -79,17 +85,26 @@ public: void SetGCReason(GCReason reason) override { gcReason_ = reason; } + bool InRecentSpace(const void *addr) override + { + RegionDesc *region = RegionDesc::GetRegionDescAt(reinterpret_cast(addr)); + return region->IsInRecentSpace(); + } + bool GetForceThrowOOM() const override { return isForceThrowOOM_; }; + void SetForceThrowOOM(bool val) override { isForceThrowOOM_ = val; }; + HeapAddress Allocate(size_t size, AllocType allocType, bool allowGC = true) override; GCPhase GetGCPhase() const override; void SetGCPhase(const GCPhase phase) override; Collector& GetCollector() override; Allocator& GetAllocator() override; - + HeuristicGCPolicy& GetHeuristicGCPolicy() override; size_t GetMaxCapacity() const override; size_t GetCurrentCapacity() const override; size_t GetUsedPageSize() const override; size_t GetAllocatedSize() const override; + size_t GetSurvivedSize() const override; size_t GetRemainHeapSize() const override; size_t GetAccumulatedAllocateSize() const override; size_t GetAccumulatedFreeSize() const override; @@ -101,8 +116,10 @@ public: FinalizerProcessor& GetFinalizerProcessor() override; CollectorResources& GetCollectorResources() override; void RegisterAllocBuffer(AllocationBuffer& buffer) override; + void UnregisterAllocBuffer(AllocationBuffer& buffer) override; void StopGCWork() override; void TryHeuristicGC() override; + void TryIdleGC() override; void NotifyNativeAllocation(size_t bytes) override; void NotifyNativeFree(size_t bytes) override; void NotifyNativeReset(size_t oldBytes, size_t newBytes) override; @@ -112,6 +129,11 @@ public: void ChangeGCParams(bool isBackground) override; void RecordAliveSizeAfterLastGC(size_t aliveBytes) override; bool CheckAndTriggerHintGC(MemoryReduceDegree degree) override; + void NotifyHighSensitive(bool isStart) override; + void SetRecordHeapObjectSizeBeforeSensitive(size_t objSize) override; + AppSensitiveStatus GetSensitiveStatus() override; + StartupStatus GetStartupStatus() override; + bool OnStartupEvent() const override; private: // allocator is actually a subspace in heap @@ -126,9 +148,9 @@ private: Barrier stwBarrier_; IdleBarrier idleBarrier_; EnumBarrier enumBarrier_; - TraceBarrier traceBarrier_; + MarkingBarrier markingBarrier_; RemarkBarrier remarkBarrier_; - PostTraceBarrier postTraceBarrier_; + PostMarkingBarrier postMarkingBarrier_; PreforwardBarrier preforwardBarrier_; CopyBarrier copyBarrier_; std::atomic currentBarrier_ = nullptr; @@ -139,6 +161,7 @@ private: std::atomic isGCEnabled_ = { true }; GCReason gcReason_ = GCReason::GC_REASON_INVALID; + bool isForceThrowOOM_ = { false }; }; // end class HeapImpl static ImmortalWrapper g_heapInstance; @@ -195,11 +218,21 @@ void HeapImpl::StopRuntimeThreads() collectorResources_.StopRuntimeThreads(); } +HeuristicGCPolicy& HeapImpl::GetHeuristicGCPolicy() +{ + return heuristicGCPolicy_; +} + void HeapImpl::TryHeuristicGC() { heuristicGCPolicy_.TryHeuristicGC(); } +void HeapImpl::TryIdleGC() +{ + heuristicGCPolicy_.TryIdleGC(); +} + void HeapImpl::NotifyNativeAllocation(size_t bytes) { heuristicGCPolicy_.NotifyNativeAllocation(bytes); @@ -246,6 +279,33 @@ bool HeapImpl::CheckAndTriggerHintGC(MemoryReduceDegree degree) return heuristicGCPolicy_.CheckAndTriggerHintGC(degree); } +void HeapImpl::NotifyHighSensitive(bool isStart) +{ + heuristicGCPolicy_.NotifyHighSensitive(isStart); +} + +void HeapImpl::SetRecordHeapObjectSizeBeforeSensitive(size_t objSize) +{ + if (heuristicGCPolicy_.InSensitiveStatus()) { + heuristicGCPolicy_.SetRecordHeapObjectSizeBeforeSensitive(objSize); + } +} + +AppSensitiveStatus HeapImpl::GetSensitiveStatus() +{ + return heuristicGCPolicy_.GetSensitiveStatus(); +} + +StartupStatus HeapImpl::GetStartupStatus() +{ + return heuristicGCPolicy_.GetStartupStatus(); +} + +bool HeapImpl::OnStartupEvent() const +{ + return heuristicGCPolicy_.OnStartupEvent(); +} + Collector& HeapImpl::GetCollector() { return collectorProxy_.GetCurrentCollector(); } Allocator& HeapImpl::GetAllocator() { return *theSpace_; } @@ -255,7 +315,7 @@ void HeapImpl::InstallBarrier(const GCPhase phase) if (phase == GCPhase::GC_PHASE_ENUM) { currentBarrier_.store(&enumBarrier_, std::memory_order_relaxed); } else if (phase == GCPhase::GC_PHASE_MARK) { - currentBarrier_.store(&traceBarrier_, std::memory_order_relaxed); + currentBarrier_.store(&markingBarrier_, std::memory_order_relaxed); } else if (phase == GCPhase::GC_PHASE_PRECOPY) { currentBarrier_.store(&preforwardBarrier_, std::memory_order_relaxed); } else if (phase == GCPhase::GC_PHASE_COPY || phase == GCPhase::GC_PHASE_FIX) { @@ -263,7 +323,7 @@ void HeapImpl::InstallBarrier(const GCPhase phase) } else if (phase == GCPhase::GC_PHASE_IDLE) { currentBarrier_.store(&idleBarrier_, std::memory_order_relaxed); } else if (phase == GCPhase::GC_PHASE_POST_MARK) { - currentBarrier_.store(&postTraceBarrier_, std::memory_order_relaxed); + currentBarrier_.store(&postMarkingBarrier_, std::memory_order_relaxed); } else if (phase == GCPhase::GC_PHASE_FINAL_MARK || phase == GCPhase::GC_PHASE_REMARK_SATB) { currentBarrier_ = &remarkBarrier_; @@ -283,11 +343,13 @@ size_t HeapImpl::GetUsedPageSize() const { return theSpace_->GetUsedPageSize(); size_t HeapImpl::GetAllocatedSize() const { return theSpace_->GetAllocatedBytes(); } -size_t HeapImpl::GetRemainHeapSize() const { return theSpace_->GetMaxCapacity() - theSpace_->GetUsedPageSize(); } +size_t HeapImpl::GetRemainHeapSize() const { return theSpace_->GetMaxCapacity() - theSpace_->GetAllocatedBytes(); } + +size_t HeapImpl::GetSurvivedSize() const { return theSpace_->GetSurvivedSize(); } size_t HeapImpl::GetAccumulatedAllocateSize() const { - return collectorResources_.GetGCStats().GetAccumulatedFreeSize() + theSpace_->GetUsedPageSize(); + return collectorResources_.GetGCStats().GetAccumulatedFreeSize() + theSpace_->GetAllocatedBytes(); } size_t HeapImpl::GetAccumulatedFreeSize() const { return collectorResources_.GetGCStats().GetAccumulatedFreeSize(); } @@ -307,4 +369,5 @@ CollectorResources& HeapImpl::GetCollectorResources() { return collectorResource void HeapImpl::StopGCWork() { collectorResources_.StopGCWork(); } void HeapImpl::RegisterAllocBuffer(AllocationBuffer& buffer) { GetAllocator().RegisterAllocBuffer(buffer); } +void HeapImpl::UnregisterAllocBuffer(AllocationBuffer& buffer) { GetAllocator().UnregisterAllocBuffer(buffer); } } // namespace common diff --git a/common_components/heap/heap.h b/common_components/heap/heap.h index 69ee31dffb555e6fafead82158f8ad19654c7358..e3b548e9df4723dc834a4519f3d9e051c7fed311 100755 --- a/common_components/heap/heap.h +++ b/common_components/heap/heap.h @@ -24,21 +24,47 @@ #include "common_components/common/type_def.h" #include "common_components/heap/barrier/barrier.h" #include "common_components/heap/collector/collector.h" +#include "common_components/heap/collector/heuristic_gc_policy.h" #include "common_interfaces/base/runtime_param.h" #include "common_interfaces/base_runtime.h" +#include "common_interfaces/profiler/heap_profiler_listener.h" namespace common { class Allocator; class AllocationBuffer; class FinalizerProcessor; class CollectorResources; +class HeuristicGCPolicy; using MemoryReduceDegree = common::MemoryReduceDegree; +using AppSensitiveStatus = common::AppSensitiveStatus; +using StartupStatus = common::StartupStatus; class Heap { public: + // These need to keep same with that in `RegionDesc` + static constexpr size_t NORMAL_UNIT_SIZE = 256 * 1024; + static constexpr size_t NORMAL_UNIT_HEADER_SIZE = AlignUp(2 * sizeof(void *) + sizeof(uint8_t), 8); + static constexpr size_t NORMAL_UNIT_AVAILABLE_SIZE = NORMAL_UNIT_SIZE - NORMAL_UNIT_HEADER_SIZE; + + static constexpr size_t GetNormalRegionSize() + { + return NORMAL_UNIT_SIZE; + } + + static constexpr size_t GetNormalRegionHeaderSize() + { + return NORMAL_UNIT_HEADER_SIZE; + } + + static constexpr size_t GetNormalRegionAvailableSize() + { + return NORMAL_UNIT_AVAILABLE_SIZE; + } + static void throwOOM() { // Maybe we need to add heapdump logic here + HeapProfilerListener::GetInstance().OnOutOfMemoryEventCb(); LOG_COMMON(FATAL) << "Out of Memory, abort."; UNREACHABLE_CC(); } @@ -60,6 +86,8 @@ public: virtual bool IsGcStarted() const = 0; virtual void WaitForGCFinish() = 0; + virtual void MarkGCStart() = 0; + virtual void MarkGCFinish() = 0; virtual bool IsGCEnabled() const = 0; virtual void EnableGC(bool val) = 0; @@ -68,7 +96,9 @@ public: virtual Collector& GetCollector() = 0; virtual Allocator& GetAllocator() = 0; + virtual HeuristicGCPolicy& GetHeuristicGCPolicy() = 0; virtual void TryHeuristicGC() = 0; + virtual void TryIdleGC() = 0; virtual void NotifyNativeAllocation(size_t bytes) = 0; virtual void NotifyNativeFree(size_t bytes) = 0; virtual void NotifyNativeReset(size_t oldBytes, size_t newBytes) = 0; @@ -78,6 +108,11 @@ public: virtual void ChangeGCParams(bool isBackground) = 0; virtual void RecordAliveSizeAfterLastGC(size_t aliveBytes) = 0; virtual bool CheckAndTriggerHintGC(MemoryReduceDegree degree) = 0; + virtual void NotifyHighSensitive(bool isStart) = 0; + virtual void SetRecordHeapObjectSizeBeforeSensitive(size_t objSize) = 0; + virtual AppSensitiveStatus GetSensitiveStatus() = 0; + virtual StartupStatus GetStartupStatus() = 0; + virtual bool OnStartupEvent() const = 0; /* to avoid misunderstanding, variant types of heap size are defined as followed: * |------------------------------ max capacity ---------------------------------| * |------------------------------ current capacity ------------------------| @@ -99,6 +134,8 @@ public: // total memory allocated for each allocation request, including memory fragment for alignment or padding. virtual size_t GetAllocatedSize() const = 0; + virtual size_t GetSurvivedSize() const = 0; + virtual size_t GetRemainHeapSize() const = 0; virtual size_t GetAccumulatedAllocateSize() const = 0; @@ -127,7 +164,7 @@ public: return false; } - static void MarkJitFortMemInstalled(void* obj); + static void MarkJitFortMemInstalled(void *thread, void *obj); static bool IsHeapAddress(const void* addr) { return IsHeapAddress(reinterpret_cast(addr)); } @@ -147,12 +184,18 @@ public: virtual void RegisterAllocBuffer(AllocationBuffer& buffer) = 0; + virtual void UnregisterAllocBuffer(AllocationBuffer& buffer) = 0; + virtual void StopGCWork() = 0; virtual GCReason GetGCReason() = 0; virtual void SetGCReason(GCReason reason) = 0; + virtual bool InRecentSpace(const void *addr) = 0; + virtual bool GetForceThrowOOM() const = 0; + virtual void SetForceThrowOOM(bool val) = 0; + static void OnHeapCreated(HeapAddress startAddr) { heapStartAddr_ = startAddr; diff --git a/common_components/heap/heap_allocator-inl.h b/common_components/heap/heap_allocator-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..d7cac051d0f49efbf5b19ca6d98595dabdc3f16e --- /dev/null +++ b/common_components/heap/heap_allocator-inl.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef COMMON_COMPONENTS_HEAP_HEAP_ALLOCATOR_INL_H +#define COMMON_COMPONENTS_HEAP_HEAP_ALLOCATOR_INL_H + +#include "heap/heap_allocator.h" + +namespace common { +Address AllocateYoungInAllocBuffer(uintptr_t buffer, size_t size); +Address AllocateOldInAllocBuffer(uintptr_t buffer, size_t size); +} // namespace common +#endif // COMMON_COMPONENTS_HEAP_HEAP_ALLOCATOR_INL_H diff --git a/common_components/heap/heap_allocator.cpp b/common_components/heap/heap_allocator.cpp index 2eace71088b2798c87c639b1ae42d4ce688db0bc..1e99ceef58e1ad2506cbce219044e972cfa0ede8 100755 --- a/common_components/heap/heap_allocator.cpp +++ b/common_components/heap/heap_allocator.cpp @@ -15,29 +15,44 @@ #include "common_interfaces/heap/heap_allocator.h" +#include "common_components/heap/heap_allocator-inl.h" #include "common_components/common/type_def.h" #include "common_components/heap/heap_manager.h" #include "common_components/heap/allocator/region_manager.h" -#include "common_components/heap/allocator/region_space.h" +#include "common_components/heap/allocator/regional_heap.h" namespace common { -Address HeapAllocator::Allocate(size_t size, LanguageType language) +Address AllocateYoungInAllocBuffer(uintptr_t buffer, size_t size) +{ + ASSERT(buffer != 0); + AllocationBuffer *allocBuffer = reinterpret_cast(buffer); + return allocBuffer->FastAllocateInTlab(size); +} + +Address AllocateOldInAllocBuffer(uintptr_t buffer, size_t size) +{ + ASSERT(buffer != 0); + AllocationBuffer *allocBuffer = reinterpret_cast(buffer); + return allocBuffer->FastAllocateInTlab(size); +} + +Address HeapAllocator::AllocateInYoungOrHuge(size_t size, LanguageType language) { auto address = HeapManager::Allocate(size); BaseObject::Cast(address)->SetLanguageType(language); return address; } -Address HeapAllocator::AllocateInNonmove(size_t size, LanguageType language) +Address HeapAllocator::AllocateInNonmoveOrHuge(size_t size, LanguageType language) { - auto address = HeapManager::Allocate(size, AllocType::PINNED_OBJECT); + auto address = HeapManager::Allocate(size, AllocType::NONMOVABLE_OBJECT); BaseObject::Cast(address)->SetLanguageType(language); return address; } -Address HeapAllocator::AllocateInOld(size_t size, LanguageType language) +Address HeapAllocator::AllocateInOldOrHuge(size_t size, LanguageType language) { - auto address = HeapManager::Allocate(size); + auto address = HeapManager::Allocate(size, AllocType::MOVEABLE_OLD_OBJECT); BaseObject::Cast(address)->SetLanguageType(language); return address; } @@ -58,7 +73,7 @@ Address HeapAllocator::AllocateInReadOnly(size_t size, LanguageType language) uintptr_t HeapAllocator::AllocateLargeJitFortRegion(size_t size, LanguageType language) { - RegionSpace& allocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + RegionalHeap& allocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); auto address = allocator.AllocJitFortRegion(size); BaseObject::Cast(address)->SetLanguageType(language); return address; @@ -70,26 +85,34 @@ Address HeapAllocator::AllocateNoGC(size_t size) return HeapManager::Allocate(size, AllocType::MOVEABLE_OBJECT, false); } -Address HeapAllocator::AllocatePinNoGC(size_t size) +Address HeapAllocator::AllocateOldOrLargeNoGC(size_t size) +{ + if (size >= RegionDesc::LARGE_OBJECT_DEFAULT_THRESHOLD) { + return AllocateLargeRegion(size); + } + return HeapManager::Allocate(size, AllocType::MOVEABLE_OLD_OBJECT, false); +} + +Address HeapAllocator::AllocateNonmoveNoGC(size_t size) { - return HeapManager::Allocate(size, AllocType::PINNED_OBJECT, false); + return HeapManager::Allocate(size, AllocType::NONMOVABLE_OBJECT, false); } -Address HeapAllocator::AllocateRegion() +Address HeapAllocator::AllocateOldRegion() { - RegionSpace& allocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - return allocator.AllocRegion(); + RegionalHeap& allocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + return allocator.AllocOldRegion(); } -Address HeapAllocator::AllocatePinnedRegion() +Address HeapAllocator::AllocateNonMovableRegion() { - RegionSpace& allocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - return allocator.AllocPinnedRegion(); + RegionalHeap& allocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + return allocator.AllocateNonMovableRegion(); } Address HeapAllocator::AllocateLargeRegion(size_t size) { - RegionSpace& allocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + RegionalHeap& allocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); return allocator.AllocLargeRegion(size); } diff --git a/common_components/heap/heap_manager.cpp b/common_components/heap/heap_manager.cpp index 4952073ddcc86de5447ed64c67efc7801674e9a4..86254137413b3c753a34f3687b64d86689eefff8 100755 --- a/common_components/heap/heap_manager.cpp +++ b/common_components/heap/heap_manager.cpp @@ -17,17 +17,17 @@ #include "common_components/heap/heap.h" #include "common_components/heap/collector/collector.h" #include "common_components/heap/allocator/region_manager.h" -#include "common_components/heap/allocator/region_space.h" +#include "common_components/heap/allocator/regional_heap.h" namespace common { HeapManager::HeapManager() {} -void HeapManager::RequestGC(GCReason reason, bool async) +void HeapManager::RequestGC(GCReason reason, bool async, GCType gcType) { if (!Heap::GetHeap().IsGCEnabled()) { return; } Collector& collector = Heap::GetHeap().GetCollector(); - collector.RequestGC(reason, async); + collector.RequestGC(reason, async, gcType); } HeapAddress HeapManager::Allocate(size_t allocSize, AllocType allocType, bool allowGC) @@ -43,26 +43,26 @@ void HeapManager::StartRuntimeThreads() { Heap::GetHeap().StartRuntimeThreads(); void HeapManager::StopRuntimeThreads() { Heap::GetHeap().StopRuntimeThreads(); } -void HeapManager::MarkJitFortMemInstalled(void* obj) +void HeapManager::MarkJitFortMemInstalled(void *vm, void *obj) { - RegionManager& manager = reinterpret_cast(Heap::GetHeap().GetAllocator()).GetRegionManager(); - manager.MarkJitFortMemInstalled(reinterpret_cast(obj)); + RegionalHeap& regionalHeap = reinterpret_cast(Heap::GetHeap().GetAllocator()); + regionalHeap.MarkJitFortMemInstalled(vm, reinterpret_cast(obj)); } void HeapManager::SetReadOnlyToROSpace() { - RegionManager& manager = reinterpret_cast(Heap::GetHeap().GetAllocator()).GetRegionManager(); - manager.SetReadOnlyToRORegionList(); + RegionalHeap& regionalHeap = reinterpret_cast(Heap::GetHeap().GetAllocator()); + regionalHeap.SetReadOnlyToROSpace(); } void HeapManager::ClearReadOnlyFromROSpace() { - RegionManager& manager = reinterpret_cast(Heap::GetHeap().GetAllocator()).GetRegionManager(); - manager.ClearReadOnlyFromRORegionList(); + RegionalHeap& regionalHeap = reinterpret_cast(Heap::GetHeap().GetAllocator()); + regionalHeap.ClearReadOnlyFromROSpace(); } bool HeapManager::IsInROSpace(BaseObject *obj) { - return RegionSpace::IsReadOnlyObject(obj); + return RegionalHeap::IsReadOnlyObject(obj); } } // namespace common diff --git a/common_components/heap/heap_manager.h b/common_components/heap/heap_manager.h index eba890b1519a5d6d98273bb6a4dac2ffbe7e55b2..cbb15c4b23c3f701e32aad8211360484273892dd 100755 --- a/common_components/heap/heap_manager.h +++ b/common_components/heap/heap_manager.h @@ -32,8 +32,8 @@ public: void Init(const RuntimeParam& param); void Fini(); - static void RequestGC(GCReason reason, bool async); - static void MarkJitFortMemInstalled(void* obj); + static void RequestGC(GCReason reason, bool async, GCType gcType); + static void MarkJitFortMemInstalled(void *vm, void *obj); // alloc return memory address, not "object" pointers, since they're not // initialized yet diff --git a/common_components/heap/heap_visitor.cpp b/common_components/heap/heap_visitor.cpp index f03639824f52281396ab20d619e37c229c4e7c70..37bccac1046b6a2a4a570cc75e3a291dce0716a9 100755 --- a/common_components/heap/heap_visitor.cpp +++ b/common_components/heap/heap_visitor.cpp @@ -15,7 +15,7 @@ #include "common_interfaces/heap/heap_visitor.h" -#include "common_components/base_runtime/hooks.h" +#include "common_components/common_runtime/hooks.h" #include "common_components/mutator/mutator.h" namespace common { UnmarkAllXRefsHookFunc g_unmarkAllXRefsHook = nullptr; @@ -44,6 +44,17 @@ void RegisterSweepStaticRootsHook(SweepStaticRootsHookFunc func) void VisitRoots(const RefFieldVisitor &visitor) +{ + VisitDynamicGlobalRoots(visitor); + VisitDynamicLocalRoots(visitor); + VisitDynamicConcurrentRoots(visitor); + VisitBaseRoots(visitor); + if (g_visitStaticRootsHook != nullptr) { + g_visitStaticRootsHook(visitor); + } +} + +void VisitSTWRoots(const RefFieldVisitor &visitor) { VisitDynamicGlobalRoots(visitor); VisitDynamicLocalRoots(visitor); @@ -63,9 +74,15 @@ void UpdateRoots(const RefFieldVisitor &visitor) } } +void VisitConcurrentRoots(const RefFieldVisitor &visitor) +{ + VisitDynamicConcurrentRoots(visitor); +} + void VisitWeakRoots(const WeakRefFieldVisitor &visitor) { VisitDynamicWeakGlobalRoots(visitor); + VisitDynamicWeakGlobalRootsOld(visitor); VisitDynamicWeakLocalRoots(visitor); if (g_updateStaticRootsHook != nullptr) { g_updateStaticRootsHook(visitor); @@ -96,12 +113,12 @@ void UpdateGlobalRoots(const RefFieldVisitor &visitor) void VisitWeakGlobalRoots(const WeakRefFieldVisitor &visitor) { VisitDynamicWeakGlobalRoots(visitor); - if (g_updateStaticRootsHook != nullptr) { - g_updateStaticRootsHook(visitor); - } - if (g_sweepStaticRootsHook != nullptr) { - g_sweepStaticRootsHook(visitor); - } + VisitDynamicWeakGlobalRootsOld(visitor); +} + +void VisitPreforwardRoots(const RefFieldVisitor &visitor) +{ + VisitDynamicPreforwardRoots(visitor); } // Visit specific mutator's root. @@ -160,4 +177,11 @@ void RemoveXRefFromRoots() RemoveXRefFromDynamicRoots(); g_removeXRefFromStaticRootsHook(); } + +void VisitMutatorPreforwardRoot(const RefFieldVisitor &visitor, Mutator &mutator) +{ + if (mutator.GetEcmaVMPtr()) { + VisitDynamicThreadPreforwardRoot(visitor, mutator.GetEcmaVMPtr()); + } +} } // namespace common diff --git a/common_components/heap/space/appspawn_space.h b/common_components/heap/space/appspawn_space.h new file mode 100644 index 0000000000000000000000000000000000000000..bbd75fc42ca4d43a82df5b427c4e0ce4ab484bfd --- /dev/null +++ b/common_components/heap/space/appspawn_space.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef COMMON_COMPONENTS_HEAP_SPACE_APPSPAWN_SPACE_H +#define COMMON_COMPONENTS_HEAP_SPACE_APPSPAWN_SPACE_H + +#include "common_components/heap/allocator/region_manager.h" +#include "common_components/heap/space/regional_space.h" +#include "common_components/heap/allocator/region_desc.h" + +namespace common { +class AppSpawnSpace : public RegionalSpace { +public: + AppSpawnSpace(RegionManager& regionManager) + : RegionalSpace(regionManager), + appSpawnRegionList_("appSpawn regions") {} + + size_t GetUsedUnitCount() const + { + return appSpawnRegionList_.GetUnitCount(); + } + + size_t GetAllocatedSize() const + { + return appSpawnRegionList_.GetAllocatedSize(); + } + + void ReassembleAppspawnSpace(RegionList& regionList) + { + appSpawnRegionList_.MergeRegionList(regionList, RegionDesc::RegionType::APPSPAWN_REGION); + } + + void ClearAllGCInfo() + { + ClearGCInfo(appSpawnRegionList_); + } + + void MarkRememberSet(const std::function& func) + { + auto visitFunc = [&func](RegionDesc* region) { + region->VisitRememberSetBeforeMarking(func); + }; + appSpawnRegionList_.VisitAllRegions(visitFunc); + } + + void ClearRSet() + { + appSpawnRegionList_.VisitAllRegions([](RegionDesc* region) { region->ClearRSet(); }); + } + + void CollectFixTasks(FixHeapTaskList& taskList) + { + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + FixHeapWorker::CollectFixHeapTasks(taskList, appSpawnRegionList_, FIX_OLD_REGION); + } else { + FixHeapWorker::CollectFixHeapTasks(taskList, appSpawnRegionList_, FIX_REGION); + } + } + + void DumpRegionStats() const + { + appSpawnRegionList_.DumpRegionSummary(); + } + +private: + RegionList appSpawnRegionList_; +}; +} // namespace common +#endif // COMMON_COMPONENTS_HEAP_SPACE_APPSPAWN_SPACE_H \ No newline at end of file diff --git a/common_components/heap/space/from_space.cpp b/common_components/heap/space/from_space.cpp index 66b354a66ad1a75a470e61fad8dc87b702c7d580..8f315570e738af045393a3461e57d363fc60fc50 100644 --- a/common_components/heap/space/from_space.cpp +++ b/common_components/heap/space/from_space.cpp @@ -13,7 +13,7 @@ * limitations under the License. */ -#include "common_components/heap/allocator/region_space.h" +#include "common_components/heap/allocator/regional_heap.h" #include "common_components/heap/space/from_space.h" #include "common_components/heap/space/old_space.h" #include "common_components/heap/collector/collector_resources.h" @@ -37,8 +37,8 @@ void FromSpace::DumpRegionStats() const size_t units = fromUnits + exemptedFromUnits; VLOG(DEBUG, "\tfrom space units: %zu (%zu B)", units, units * RegionDesc::UNIT_SIZE); - VLOG(DEBUG, "\tfrom-regions %zu: %zu units (%zu B, alloc %zu)", fromRegions, fromUnits, fromSize, allocFromSize); - VLOG(DEBUG, "\texempted from-regions %zu: %zu units (%zu B, alloc %zu)", + VLOG(DEBUG, "\t from-regions %zu: %zu units (%zu B, alloc %zu)", fromRegions, fromUnits, fromSize, allocFromSize); + VLOG(DEBUG, "\t exempted from-regions %zu: %zu units (%zu B, alloc %zu)", exemptedFromRegions, exemptedFromUnits, exemptedFromSize, allocExemptedFromSize); } @@ -124,7 +124,7 @@ void FromSpace::ParallelCopyFromRegions(RegionDesc *startRegion, size_t regionCn AllocationBuffer* allocBuffer = AllocationBuffer::GetAllocBuffer(); if (LIKELY_CC(allocBuffer != nullptr)) { - allocBuffer->ClearRegion(); // clear thread local region for gc threads. + allocBuffer->ClearRegions(); // clear thread local region for gc threads. } } @@ -143,7 +143,7 @@ void FromSpace::CopyFromRegions() AllocationBuffer* allocBuffer = AllocationBuffer::GetAllocBuffer(); if (LIKELY(allocBuffer != nullptr)) { - allocBuffer->ClearRegion(); // clear region for next GC + allocBuffer->ClearRegions(); // clear region for next GC } } diff --git a/common_components/heap/space/from_space.h b/common_components/heap/space/from_space.h index 2f38cd7a750717c1a2f2352bda0331a2bf2c0d5e..4997212b3cf17a1d38711132e70f30c3ae6e6c49 100644 --- a/common_components/heap/space/from_space.h +++ b/common_components/heap/space/from_space.h @@ -26,21 +26,23 @@ #include "common_components/heap/allocator/allocator.h" #include "common_components/heap/allocator/region_manager.h" #include "common_components/mutator/mutator.h" +#include "common_components/heap/allocator/fix_heap.h" +#include "common_components/heap/space/regional_space.h" #if defined(COMMON_SANITIZER_SUPPORT) #include "common_components/base/asan_interface.h" #endif namespace common { -class RegionSpace; +class RegionalHeap; class OldSpace; class Taskpool; // regions for small-sized movable objects, which may be moved during gc. class FromSpace : public RegionalSpace { public: - FromSpace(RegionManager& regionManager, RegionSpace& heap) : RegionalSpace(regionManager), + FromSpace(RegionManager& regionManager, RegionalHeap& heap) : RegionalSpace(regionManager), fromRegionList_("from-regions"), - exemptedFromRegionList_("exempted from-regions"), heap_(heap) {} + exemptedFromRegionList_("exempted from-regions"), heap_(heap), exemptedRegionThreshold_(0) {} void DumpRegionStats() const; @@ -51,10 +53,9 @@ public: void ExemptFromRegions(); - void FixAllRegions() + void CollectFixTasks(FixHeapTaskList &taskList) { - TraceCollector& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); - RegionManager::FixRegionList(collector, exemptedFromRegionList_); + FixHeapWorker::CollectFixHeapTasks(taskList, exemptedFromRegionList_, FIX_REGION); } size_t GetUsedUnitCount() const @@ -63,6 +64,11 @@ public: } size_t GetAllocatedSize() const + { + return fromRegionList_.GetAllocatedSize() + exemptedFromRegionList_.GetAllocatedSize(); + } + + size_t GetFromRegionAllocatedSize() const { return fromRegionList_.GetAllocatedSize(); } @@ -89,7 +95,7 @@ public: return exemptedFromRegionList_.GetAllocatedSize(); } - RegionSpace& GetHeap() { return heap_; } + RegionalHeap& GetHeap() { return heap_; } void ParallelCopyFromRegions(RegionDesc* startRegion, size_t regionCnt); void CopyFromRegions(Taskpool* threadPool); @@ -120,7 +126,7 @@ private: RegionList tmp("temp region list"); list.CopyListTo(tmp); tmp.VisitAllRegions([](RegionDesc* region) { - region->ClearTraceCopyFixLine(); + region->ClearMarkingCopyLine(); region->ClearLiveInfo(); region->ResetMarkBit(); }); @@ -132,7 +138,7 @@ private: // regions exempted by ExemptFromRegions, which will not be moved during current GC. RegionList exemptedFromRegionList_; - RegionSpace& heap_; + RegionalHeap& heap_; double exemptedRegionThreshold_; }; diff --git a/common_components/heap/space/large_space.cpp b/common_components/heap/space/large_space.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5a951ed0ebf50e39941497c7e9bdf23be33aa5aa --- /dev/null +++ b/common_components/heap/space/large_space.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/heap/space/large_space.h" +#include "heap/allocator/region_manager.h" +#include "common_components/common_runtime/hooks.h" +#include "common_components/heap/collector/collector.h" +#include "common_components/heap/collector/marking_collector.h" +#if defined(COMMON_SANITIZER_SUPPORT) +#include "common_components/base/asan_interface.h" +#endif + +namespace common { +void LargeSpace::AssembleGarbageCandidates() +{ + largeRegionList_.MergeRegionList(recentLargeRegionList_, RegionDesc::RegionType::LARGE_REGION); +} + +void LargeSpace::CollectFixTasks(FixHeapTaskList& taskList) +{ + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + FixHeapWorker::CollectFixHeapTasks(taskList, largeRegionList_, FIX_OLD_REGION); + FixHeapWorker::CollectFixHeapTasks(taskList, recentLargeRegionList_, FIX_RECENT_OLD_REGION); + } else { + FixHeapWorker::CollectFixHeapTasks(taskList, largeRegionList_, FIX_REGION); + FixHeapWorker::CollectFixHeapTasks(taskList, recentLargeRegionList_, FIX_RECENT_REGION); + } +} + +size_t LargeSpace::CollectLargeGarbage() +{ + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::CollectLargeGarbage", ""); + size_t garbageSize = 0; + MarkingCollector& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); + RegionDesc* region = largeRegionList_.GetHeadRegion(); + while (region != nullptr) { + HeapAddress addr = region->GetRegionStart(); + BaseObject *obj = reinterpret_cast(addr); + + if (region->IsJitFortAwaitInstallFlag()) { + region = region->GetNextRegion(); + continue; + } + if (!collector.IsSurvivedObject(obj) && !region->IsNewObjectSinceMarking(obj)) { + DLOG(REGION, "reclaim large region %p@0x%zx+%zu type %u", region, region->GetRegionStart(), + region->GetRegionAllocatedSize(), region->GetRegionType()); + + RegionDesc* del = region; + region = region->GetNextRegion(); + largeRegionList_.DeleteRegion(del); + if (IsMachineCodeObject(reinterpret_cast(obj))) { + JitFortUnProt(del->GetRegionBaseSize(), reinterpret_cast(del->GetRegionBaseFast())); + } + if (del->GetRegionSize() > RegionDesc::LARGE_OBJECT_RELEASE_THRESHOLD) { + garbageSize += regionManager_.ReleaseRegion(del); + } else { + garbageSize += regionManager_.CollectRegion(del); + } + } else { + DLOG(REGION, "clear mark-bit for large region %p@0x%zx+%zu type %u", region, region->GetRegionStart(), + region->GetRegionAllocatedSize(), region->GetRegionType()); + region = region->GetNextRegion(); + } + } + + region = recentLargeRegionList_.GetHeadRegion(); + while (region != nullptr) { + region = region->GetNextRegion(); + } + + return garbageSize; +} + +uintptr_t LargeSpace::Alloc(size_t size, bool allowGC) +{ + size_t alignedSize = AlignUp(size + RegionDesc::UNIT_HEADER_SIZE, RegionDesc::UNIT_SIZE); + size_t regionCount = alignedSize / RegionDesc::UNIT_SIZE; + RegionDesc* region = regionManager_.TakeRegion(regionCount, RegionDesc::UnitRole::LARGE_SIZED_UNITS, + false, allowGC); + if (region == nullptr) { + return 0; + } + InitRegionPhaseLine(region); + DLOG(REGION, "alloc large region @0x%zx+%zu type %u", region->GetRegionStart(), + region->GetRegionSize(), region->GetRegionType()); + uintptr_t addr = region->Alloc(size); + ASSERT(addr > 0); + recentLargeRegionList_.PrependRegion(region, RegionDesc::RegionType::RECENT_LARGE_REGION); + return addr; +} + +} // namespace common \ No newline at end of file diff --git a/common_components/heap/space/large_space.h b/common_components/heap/space/large_space.h new file mode 100644 index 0000000000000000000000000000000000000000..a8786890c7929d676adda59028b6a5b8d99418c4 --- /dev/null +++ b/common_components/heap/space/large_space.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef COMMON_COMPONENTS_HEAP_SPACE_LARGE_SPACE_H +#define COMMON_COMPONENTS_HEAP_SPACE_LARGE_SPACE_H + +#include "base/common.h" +#include "common_components/heap/allocator/region_manager.h" +#include "common_components/heap/space/regional_space.h" + +namespace common { + +// regions for large-sized objects. +class LargeSpace : public RegionalSpace { +public: + LargeSpace(RegionManager& regionManager) + : RegionalSpace(regionManager), + largeRegionList_("large regions"), + recentLargeRegionList_("recent large regions") {} + + size_t GetRecentAllocatedSize() const + { + return recentLargeRegionList_.GetAllocatedSize(); + } + + size_t GetSurvivedSize() const + { + return largeRegionList_.GetAllocatedSize(); + } + + void CollectFixTasks(FixHeapTaskList& taskList); + + uintptr_t Alloc(size_t size, bool allowGC = true); + + void AssembleGarbageCandidates(); + size_t CollectLargeGarbage(); + + size_t GetUsedUnitCount() const + { + return largeRegionList_.GetUnitCount() + recentLargeRegionList_.GetUnitCount(); + } + + size_t GetAllocatedSize() const + { + return largeRegionList_.GetAllocatedSize() + recentLargeRegionList_.GetAllocatedSize(); + } + + void ClearAllGCInfo() + { + ClearGCInfo(largeRegionList_); + ClearGCInfo(recentLargeRegionList_); + } + + void MarkRememberSet(const std::function& func) + { + auto visitFunc = [&func](RegionDesc* region) { + region->VisitRememberSetBeforeMarking(func); + }; + recentLargeRegionList_.VisitAllRegions(visitFunc); + largeRegionList_.VisitAllRegions(visitFunc); + } + + void ClearRSet() + { + auto clearFunc = [](RegionDesc* region) { + region->ClearRSet(); + }; + recentLargeRegionList_.VisitAllRegions(clearFunc); + largeRegionList_.VisitAllRegions(clearFunc); + } + + void DumpRegionStats() const + { + largeRegionList_.DumpRegionSummary(); + recentLargeRegionList_.DumpRegionSummary(); + } + +private: + // large region which allocated before last GC + RegionList largeRegionList_; + + // large regions which allocated since last GC beginning. + // record large regions in here first and move those to largeRegionList_ when gc starts. + RegionList recentLargeRegionList_; +}; +} // namespace common +#endif // COMMON_COMPONENTS_HEAP_SPACE_LARGE_SPACE_H \ No newline at end of file diff --git a/common_components/heap/space/nonmovable_space.cpp b/common_components/heap/space/nonmovable_space.cpp new file mode 100644 index 0000000000000000000000000000000000000000..647e4bf4b955f9db2207b4a9b0f2092990b15f66 --- /dev/null +++ b/common_components/heap/space/nonmovable_space.cpp @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/common_runtime/hooks.h" +#include "common_components/heap/allocator/region_desc.h" +#include "common_components/heap/allocator/region_list.h" +#include "common_components/heap/allocator/regional_heap.h" +#include "common_components/heap/collector/collector.h" +#include "common_components/heap/collector/marking_collector.h" +#include "common_components/common/base_object.h" +#include "common_components/heap/allocator/fix_heap.h" +#include "common_components/heap/allocator/region_manager.h" +#include "common_components/heap/space/nonmovable_space.h" + +#if defined(COMMON_TSAN_SUPPORT) +#include "common_components/sanitizer/sanitizer_interface.h" +#endif +#include "common_components/log/log.h" +#include "common_components/taskpool/taskpool.h" +#include "common_interfaces/base_runtime.h" + +#if defined(_WIN64) +#include +#endif + +namespace common { + +void NonMovableSpace::AssembleGarbageCandidates() +{ + polySizeRegionList_.MergeRegionListWithoutHead(recentPolySizeRegionList_, + RegionDesc::RegionType::FULL_POLYSIZE_NONMOVABLE_REGION); + RegionDesc* region = polySizeRegionList_.GetHeadRegion(); + for (size_t i = 0; i < NONMOVABLE_OBJECT_SIZE_COUNT; i++) { + monoSizeRegionList_[i]->MergeRegionListWithoutHead(*recentMonoSizeRegionList_[i], + RegionDesc::RegionType::FULL_MONOSIZE_NONMOVABLE_REGION); + } +} + +void CollectFixHeapTaskForFullRegion(MarkingCollector &collector, RegionList &list, + FixHeapTaskList &taskList) +{ + RegionDesc *region = list.GetHeadRegion(); + while (region != nullptr) { + auto liveBytes = region->GetLiveByteCount(); + if (liveBytes == 0) { + PostFixHeapWorker::AddEmptyRegionToCollectDuringPostFix(&list, region); + region = region->GetNextRegion(); + continue; + } + taskList.push_back({region, FIX_REGION}); + region = region->GetNextRegion(); + } +} + +void NonMovableSpace::CollectFixTasks(FixHeapTaskList &taskList) +{ + // fix all objects. + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + FixHeapWorker::CollectFixHeapTasks(taskList, recentPolySizeRegionList_, FIX_RECENT_OLD_REGION); + FixHeapWorker::CollectFixHeapTasks(taskList, polySizeRegionList_, FIX_OLD_REGION); + + for (size_t i = 0; i < NONMOVABLE_OBJECT_SIZE_COUNT; i++) { + FixHeapWorker::CollectFixHeapTasks(taskList, *recentMonoSizeRegionList_[i], FIX_RECENT_OLD_REGION); + FixHeapWorker::CollectFixHeapTasks(taskList, *monoSizeRegionList_[i], FIX_OLD_REGION); + } + } else { + FixHeapWorker::CollectFixHeapTasks(taskList, recentPolySizeRegionList_, FIX_RECENT_REGION); + MarkingCollector &collector = reinterpret_cast(Heap::GetHeap().GetCollector()); + CollectFixHeapTaskForFullRegion(collector, polySizeRegionList_, taskList); + for (size_t i = 0; i < NONMOVABLE_OBJECT_SIZE_COUNT; i++) { + FixHeapWorker::CollectFixHeapTasks(taskList, *recentMonoSizeRegionList_[i], FIX_RECENT_REGION); + CollectFixHeapTaskForFullRegion(collector, *monoSizeRegionList_[i], taskList); + } + } +} + +void NonMovableSpace::DumpRegionStats() const +{ + polySizeRegionList_.DumpRegionSummary(); + recentPolySizeRegionList_.DumpRegionSummary(); +} + +uintptr_t NonMovableSpace::AllocInMonoSizeList(size_t cellCount) +{ + GCPhase mutatorPhase = Mutator::GetMutator()->GetMutatorPhase(); + // workaround: make sure collector doesn't fix newly allocated incomplete objects + if (mutatorPhase == GC_PHASE_MARK || mutatorPhase == GC_PHASE_FIX) { + return 0; + } + + RegionList* list = monoSizeRegionList_[cellCount]; + std::lock_guard lock(list->GetListMutex()); + uintptr_t allocPtr = list->AllocFromFreeListInLock(); + // For making bitmap comform with live object count, do not mark object repeated. + if (allocPtr == 0 || mutatorPhase == GCPhase::GC_PHASE_IDLE) { + return allocPtr; + } + + // Mark new allocated non-movable object. + RegionDesc* regionDesc = RegionDesc::GetRegionDescAt(allocPtr); + BaseObject* object = reinterpret_cast(allocPtr); + regionDesc->MarkObject(object); + size_t size = (cellCount + 1) * sizeof(uint64_t); + regionDesc->AddLiveByteCount(size); + return allocPtr; +} + +uintptr_t NonMovableSpace::Alloc(size_t size, bool allowGC) +{ + uintptr_t addr = 0; + if (!allowGC || size > NONMOVABLE_OBJECT_SIZE_THRESHOLD) { + DLOG(ALLOC, "alloc non-movable obj 0x%zx(%zu)", addr, size); + return AllocInPolySizeList(size); + } + CHECK_CC(size % sizeof(uint64_t) == 0); + size_t cellCount = size / sizeof(uint64_t) - 1; + RegionList* list = recentMonoSizeRegionList_[cellCount]; + std::mutex& listMutex = list->GetListMutex(); + std::lock_guard lock(listMutex); + RegionDesc* headRegion = list->GetHeadRegion(); + if (headRegion != nullptr) { + addr = headRegion->Alloc(size); + } + if (addr == 0) { + addr = AllocInMonoSizeList(cellCount); + } + if (addr == 0) { + RegionDesc* region = + regionManager_.TakeRegion(1, RegionDesc::UnitRole::SMALL_SIZED_UNITS, false, allowGC); + if (region == nullptr) { + return 0; + } + DLOG(REGION, "alloc non-movable region @0x%zx+%zu type %u", region->GetRegionStart(), + region->GetRegionAllocatedSize(), + region->GetRegionType()); + ASSERT(cellCount == static_cast(static_cast(cellCount))); + region->SetRegionCellCount(static_cast(cellCount)); + InitRegionPhaseLine(region); + // To make sure the allocedSize are consistent, it must prepend region first then alloc object. + list->PrependRegionLocked(region, RegionDesc::RegionType::MONOSIZE_NONMOVABLE_REGION); + addr = region->Alloc(size); + } + DLOG(ALLOC, "alloc non-movable obj 0x%zx(%zu)", addr, size); + return addr; +} + +uintptr_t NonMovableSpace::AllocInPolySizeList(size_t size, bool allowGC) +{ + uintptr_t addr = 0; + std::mutex& regionListMutex = recentPolySizeRegionList_.GetListMutex(); + + std::lock_guard lock(regionListMutex); + RegionDesc* headRegion = recentPolySizeRegionList_.GetHeadRegion(); + if (headRegion != nullptr) { + addr = headRegion->Alloc(size); + } + if (addr == 0) { + RegionDesc* region = + regionManager_.TakeRegion(1, RegionDesc::UnitRole::SMALL_SIZED_UNITS, false, allowGC); + if (region == nullptr) { + return 0; + } + DLOG(REGION, "alloc non-movable region @0x%zx+%zu type %u", region->GetRegionStart(), + region->GetRegionAllocatedSize(), + region->GetRegionType()); + + InitRegionPhaseLine(region); + // To make sure the allocedSize are consistent, it must prepend region first then alloc object. + recentPolySizeRegionList_.PrependRegionLocked(region, + RegionDesc::RegionType::RECENT_POLYSIZE_NONMOVABLE_REGION); + addr = region->Alloc(size); + } + + DLOG(ALLOC, "alloc non-movable obj 0x%zx(%zu)", addr, size); + return addr; +} + +uintptr_t NonMovableSpace::AllocFullRegion() +{ + RegionDesc* region = regionManager_.TakeRegion(false, false); + ASSERT(region != nullptr); + + InitRegionPhaseLine(region); + + DLOG(REGION, "alloc non-movable region @0x%zx+%zu type %u", region->GetRegionStart(), + region->GetRegionAllocatedSize(), + region->GetRegionType()); + + recentPolySizeRegionList_.PrependRegion(region, RegionDesc::RegionType::RECENT_POLYSIZE_NONMOVABLE_REGION); + + uintptr_t start = region->GetRegionStart(); + uintptr_t addr = region->Alloc(region->GetRegionEnd() - region->GetRegionAllocPtr()); + ASSERT(addr != 0); + + return start; +} + +void NonMovableSpace::MarkRememberSet(const std::function& func) +{ + auto visitFunc = [&func](RegionDesc* region) { + region->VisitRememberSetBeforeMarking(func); + }; + recentPolySizeRegionList_.VisitAllRegions(visitFunc); + polySizeRegionList_.VisitAllRegions(visitFunc); + + for (size_t i = 0; i < NONMOVABLE_OBJECT_SIZE_COUNT; i++) { + recentMonoSizeRegionList_[i]->VisitAllRegions(visitFunc); + monoSizeRegionList_[i]->VisitAllRegions(visitFunc); + } +} + +} \ No newline at end of file diff --git a/common_components/heap/space/nonmovable_space.h b/common_components/heap/space/nonmovable_space.h new file mode 100644 index 0000000000000000000000000000000000000000..6cf267776dfaf2aa1e4bcf143c36cb6008d70e40 --- /dev/null +++ b/common_components/heap/space/nonmovable_space.h @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef COMMON_COMPONENTS_HEAP_SPACE_NONMOVABLE_SPACE_H +#define COMMON_COMPONENTS_HEAP_SPACE_NONMOVABLE_SPACE_H + +#include "common_components/heap/allocator/region_manager.h" +#include "common_components/heap/space/regional_space.h" +#include "common_components/mutator/mutator.h" +#include "common_components/heap/allocator/fix_heap.h" +#if defined(COMMON_SANITIZER_SUPPORT) +#include "common_components/base/asan_interface.h" +#endif + +namespace common { +// regions for non movable objects +class NonMovableSpace : public RegionalSpace { +public: + NonMovableSpace(RegionManager& regionManager) + : RegionalSpace(regionManager), + polySizeRegionList_("mixed size non-movable regions"), + recentPolySizeRegionList_("recent mixed size non-movable regions") { + for (size_t i = 0; i < NONMOVABLE_OBJECT_SIZE_COUNT; i++) { + recentMonoSizeRegionList_[i] = new RegionList("recent one size non-movable regions"); + monoSizeRegionList_[i] = new RegionList("one size non-movable regions"); + } + } + + ~NonMovableSpace() + { + for (size_t i = 0; i < NONMOVABLE_OBJECT_SIZE_COUNT; i++) { + if (recentMonoSizeRegionList_[i] != nullptr) { + delete recentMonoSizeRegionList_[i]; + recentMonoSizeRegionList_[i] = nullptr; + } + if (monoSizeRegionList_[i] != nullptr) { + delete monoSizeRegionList_[i]; + monoSizeRegionList_[i] = nullptr; + } + } + } + + void CollectFixTasks(FixHeapTaskList& taskList); + + uintptr_t AllocInMonoSizeList(size_t size); + uintptr_t Alloc(size_t size, bool allowGC = true); + uintptr_t AllocInPolySizeList(size_t size, bool allowGC = true); + + uintptr_t AllocFullRegion(); + + void AssembleGarbageCandidates(); + void MarkRememberSet(const std::function& func); + void DumpRegionStats() const; + + size_t GetRecentAllocatedSize() const + { + return recentPolySizeRegionList_.GetAllocatedSize(); + } + + size_t GetSurvivedSize() const + { + return polySizeRegionList_.GetAllocatedSize(); + } + + size_t GetUsedUnitCount() const + { + size_t nonMovableUnitCount = + polySizeRegionList_.GetUnitCount() + recentPolySizeRegionList_.GetUnitCount(); + for (size_t i = 0; i < NONMOVABLE_OBJECT_SIZE_COUNT; i++) { + nonMovableUnitCount += recentMonoSizeRegionList_[i]->GetUnitCount(); + nonMovableUnitCount += monoSizeRegionList_[i]->GetUnitCount(); + } + return nonMovableUnitCount; + } + + size_t GetAllocatedSize() const + { + size_t nonMovableSpaceSize = + polySizeRegionList_.GetAllocatedSize() + recentPolySizeRegionList_.GetAllocatedSize(); + for (size_t i = 0; i < NONMOVABLE_OBJECT_SIZE_COUNT; i++) { + nonMovableSpaceSize += recentMonoSizeRegionList_[i]->GetAllocatedSize(); + nonMovableSpaceSize += monoSizeRegionList_[i]->GetAllocatedSize(); + } + return nonMovableSpaceSize; + } + + void PrepareMarking() + { + RegionDesc* region = recentPolySizeRegionList_.GetHeadRegion(); + if (region != nullptr && region != RegionDesc::NullRegion()) { + region->SetMarkingLine(); + } + + for (size_t i = 0; i < NONMOVABLE_OBJECT_SIZE_COUNT; i++) { + RegionDesc* region = recentMonoSizeRegionList_[i]->GetHeadRegion(); + if (region != nullptr && region != RegionDesc::NullRegion()) { + region->SetMarkingLine(); + } + } + } + + void PrepareForward() + { + RegionDesc* region = recentPolySizeRegionList_.GetHeadRegion(); + if (region != nullptr && region != RegionDesc::NullRegion()) { + region->SetCopyLine(); + } + + for (size_t i = 0; i < NONMOVABLE_OBJECT_SIZE_COUNT; i++) { + RegionDesc* region = recentMonoSizeRegionList_[i]->GetHeadRegion(); + if (region != nullptr && region != RegionDesc::NullRegion()) { + region->SetCopyLine(); + } + } + } + + void ClearAllGCInfo() + { + ClearGCInfo(recentPolySizeRegionList_); + ClearGCInfo(polySizeRegionList_); + for (size_t i = 0; i < NONMOVABLE_OBJECT_SIZE_COUNT; i++) { + ClearGCInfo(*recentMonoSizeRegionList_[i]); + ClearGCInfo(*monoSizeRegionList_[i]); + } + } + + void ClearRSet() + { + auto clearFunc = [](RegionDesc* region) { + region->ClearRSet(); + }; + recentPolySizeRegionList_.VisitAllRegions(clearFunc); + polySizeRegionList_.VisitAllRegions(clearFunc); + for (size_t i = 0; i < NONMOVABLE_OBJECT_SIZE_COUNT; i++) { + recentMonoSizeRegionList_[i]->VisitAllRegions(clearFunc); + monoSizeRegionList_[i]->VisitAllRegions(clearFunc); + } + } + +private: + constexpr static size_t NONMOVABLE_OBJECT_SIZE_COUNT = 128; + constexpr static size_t NONMOVABLE_OBJECT_SIZE_THRESHOLD = sizeof(uint64_t) * NONMOVABLE_OBJECT_SIZE_COUNT; + + // objects in region have multi size. + RegionList polySizeRegionList_; + RegionList recentPolySizeRegionList_; + + // objects in region have only one size + RegionList* monoSizeRegionList_[NONMOVABLE_OBJECT_SIZE_COUNT]; + RegionList* recentMonoSizeRegionList_[NONMOVABLE_OBJECT_SIZE_COUNT]; +}; +} + +#endif // COMMON_COMPONENTS_HEAP_SPACE_NONMOVABLE_SPACE_H \ No newline at end of file diff --git a/common_components/heap/space/old_space.cpp b/common_components/heap/space/old_space.cpp index 79b3d7850068b50679d7ae248560d6e8d128e16a..0a51e3d5b979b1ba9f94ffacc5f8ed7be4de6021 100644 --- a/common_components/heap/space/old_space.cpp +++ b/common_components/heap/space/old_space.cpp @@ -21,12 +21,28 @@ namespace common { void OldSpace::DumpRegionStats() const { - size_t oldRegions = oldRegionList_.GetRegionCount(); - size_t oldUnits = oldRegionList_.GetUnitCount(); + size_t oldRegions = + tlOldRegionList_.GetRegionCount() + recentFullOldRegionList_.GetRegionCount() + oldRegionList_.GetRegionCount(); + size_t oldUnits = + tlOldRegionList_.GetUnitCount() + recentFullOldRegionList_.GetUnitCount() + oldRegionList_.GetUnitCount(); size_t oldSize = oldUnits * RegionDesc::UNIT_SIZE; - size_t allocFromSize = oldRegionList_.GetAllocatedSize(); + size_t allocFromSize = GetAllocatedSize(); VLOG(DEBUG, "\told-regions %zu: %zu units (%zu B, alloc %zu)", oldRegions, oldUnits, oldSize, allocFromSize); } + +RegionDesc* OldSpace::AllocateThreadLocalRegion(bool expectPhysicalMem) +{ + RegionDesc* region = regionManager_.TakeRegion(expectPhysicalMem, true); + ASSERT_LOGF(!IsGcThread(), "GC thread cannot take tlOldRegion"); + if (region != nullptr) { + DLOG(REGION, "alloc thread local old region @0x%zx+%zu type %u", region->GetRegionStart(), + region->GetRegionAllocatedSize(), + region->GetRegionType()); + InitRegionPhaseLine(region); + tlOldRegionList_.PrependRegion(region, RegionDesc::RegionType::THREAD_LOCAL_OLD_REGION); + } + return region; +} } // namespace common diff --git a/common_components/heap/space/old_space.h b/common_components/heap/space/old_space.h index 6f23c0c5e3d0c053ff642f43c0e5fb3fae4c7f3e..7d77b47c917089331c744de9613eaf07b7a9ca29 100644 --- a/common_components/heap/space/old_space.h +++ b/common_components/heap/space/old_space.h @@ -15,20 +15,12 @@ #ifndef COMMON_COMPONENTS_HEAP_SPACE_OLD_SPACE_H #define COMMON_COMPONENTS_HEAP_SPACE_OLD_SPACE_H -#include -#include -#include -#include -#include -#include - -#include "common_components/heap/allocator/alloc_util.h" -#include "common_components/heap/allocator/allocator.h" #include "common_components/heap/allocator/region_manager.h" #include "common_components/heap/space/regional_space.h" #include "common_components/heap/space/from_space.h" #include "common_components/heap/space/to_space.h" #include "common_components/mutator/mutator.h" +#include "common_components/heap/allocator/fix_heap.h" #if defined(COMMON_SANITIZER_SUPPORT) #include "common_components/base/asan_interface.h" #endif @@ -37,17 +29,26 @@ namespace common { // regions for small-sized movable objects, which may be moved during gc. class OldSpace : public RegionalSpace { public: - OldSpace(RegionManager& regionManager) : RegionalSpace(regionManager), oldRegionList_("old regions") {} + OldSpace(RegionManager& regionManager) + : RegionalSpace(regionManager), + tlOldRegionList_("thread-local old regions"), + recentFullOldRegionList_("recent full old regions"), + oldRegionList_("old regions") {} void DumpRegionStats() const; - void FixAllRegions() + void CollectFixTasks(FixHeapTaskList &taskList) { - TraceCollector& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { - RegionManager::FixOldRegionList(collector, oldRegionList_); + FixHeapWorker::CollectFixHeapTasks(taskList, oldRegionList_, FIX_OLD_REGION); + std::lock_guard lock(lock_); + FixHeapWorker::CollectFixHeapTasks(taskList, tlOldRegionList_, FIX_RECENT_OLD_REGION); + FixHeapWorker::CollectFixHeapTasks(taskList, recentFullOldRegionList_, FIX_RECENT_OLD_REGION); } else { - RegionManager::FixRegionList(collector, oldRegionList_); + FixHeapWorker::CollectFixHeapTasks(taskList, oldRegionList_, FIX_REGION); + std::lock_guard lock(lock_); + FixHeapWorker::CollectFixHeapTasks(taskList, tlOldRegionList_, FIX_RECENT_REGION); + FixHeapWorker::CollectFixHeapTasks(taskList, recentFullOldRegionList_, FIX_RECENT_REGION); } } @@ -58,12 +59,14 @@ public: size_t GetAllocatedSize() const { - return oldRegionList_.GetAllocatedSize(); + return tlOldRegionList_.GetAllocatedSize(false) + recentFullOldRegionList_.GetAllocatedSize() + + oldRegionList_.GetAllocatedSize(); } size_t GetUsedUnitCount() const { - return oldRegionList_.GetUnitCount(); + return tlOldRegionList_.GetRegionCount() + recentFullOldRegionList_.GetRegionCount() + + oldRegionList_.GetUnitCount(); } void PromoteRegionList(RegionList& list) @@ -71,44 +74,109 @@ public: oldRegionList_.MergeRegionList(list, RegionDesc::RegionType::OLD_REGION); } + void AssembleRecentFull() + { + oldRegionList_.MergeRegionList(recentFullOldRegionList_, RegionDesc::RegionType::OLD_REGION); + } + void ClearRSet() { - RegionDesc* region = oldRegionList_.GetHeadRegion(); - while (region != nullptr) { - region->ClearRSet(); - region = region->GetNextRegion(); - } + ClearRSet(tlOldRegionList_.GetHeadRegion()); + ClearRSet(recentFullOldRegionList_.GetHeadRegion()); + ClearRSet(oldRegionList_.GetHeadRegion()); } void ClearAllGCInfo() { ClearGCInfo(oldRegionList_); + std::lock_guard lock(lock_); + ClearGCInfo(tlOldRegionList_); + ClearGCInfo(recentFullOldRegionList_); } - void VisitRememberSet(const std::function& func) + void MarkRememberSet(const std::function& func) { auto visitFunc = [&func](RegionDesc* region) { - region->VisitAllObjects([®ion, &func](BaseObject* obj) { - if (region->IsInRSet(obj)) { - func(obj); - } - }); + region->VisitRememberSet(func); + }; + + auto visitRecentFunc = [&func](RegionDesc* region) { + region->VisitRememberSetBeforeMarking(func); }; + + // Need to visit objects allocated before current GC iteration, markingline is used to distinguish them. + tlOldRegionList_.VisitAllRegions(visitRecentFunc); + recentFullOldRegionList_.VisitAllRegions(visitRecentFunc); oldRegionList_.VisitAllRegions(visitFunc); } + uintptr_t AllocFullRegion() + { + RegionDesc* region = regionManager_.TakeRegion(false, false); + ASSERT(region != nullptr); + + InitRegionPhaseLine(region); + + DLOG(REGION, "alloc small object region %p @0x%zx+%zu units[%zu+%zu, %zu) type %u", + region, region->GetRegionStart(), region->GetRegionSize(), + region->GetUnitIdx(), region->GetUnitCount(), region->GetUnitIdx() + region->GetUnitCount(), + region->GetRegionType()); + AddFullRegion(region); + + uintptr_t start = region->GetRegionStart(); + uintptr_t addr = region->Alloc(region->GetRegionEnd() - region->GetRegionAllocPtr()); + (void)addr; + ASSERT(addr != 0); + + return start; + } + + void AddFullRegion(RegionDesc *region) + { + recentFullOldRegionList_.PrependRegion(region, RegionDesc::RegionType::OLD_REGION); + } + + void HandleFullThreadLocalRegion(RegionDesc* region) + { + std::lock_guard lock(lock_); + ASSERT_LOGF(region->GetRegionType() == RegionDesc::RegionType::THREAD_LOCAL_OLD_REGION, + "not thread local old region"); + tlOldRegionList_.DeleteRegion(region); + recentFullOldRegionList_.PrependRegion(region, RegionDesc::RegionType::OLD_REGION); + } + + RegionDesc* AllocateThreadLocalRegion(bool expectPhysicalMem); + private: void ClearGCInfo(RegionList& list) { RegionList tmp("temp region list"); list.CopyListTo(tmp); tmp.VisitAllRegions([](RegionDesc* region) { - region->ClearTraceCopyFixLine(); + region->ClearMarkingCopyLine(); region->ClearLiveInfo(); region->ResetMarkBit(); }); } + void ClearRSet(RegionDesc* region) + { + while (region != nullptr) { + region->ClearRSet(); + region = region->GetNextRegion(); + } + } + // Used to exclude the promotion of TLS regions during the ClearGCInfo phase + // This is necessary when the mutator can promote TL to recentFull while the GC is performing ClearGC + std::mutex lock_; + + // regions for thread-local allocation. + // regions in this list are already used for allocation but not full yet. + RegionList tlOldRegionList_; + + // recentFullRegionList is a list of regions which become full . + RegionList recentFullOldRegionList_; + RegionList oldRegionList_; }; } // namespace common diff --git a/common_components/heap/space/rawpointer_space.h b/common_components/heap/space/rawpointer_space.h new file mode 100644 index 0000000000000000000000000000000000000000..03e5a2ec6c6ff76a63bde9815725ab41d72051fa --- /dev/null +++ b/common_components/heap/space/rawpointer_space.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef COMMON_COMPONENTS_HEAP_SPACE_RAWPOINTER_SPACE_H +#define COMMON_COMPONENTS_HEAP_SPACE_RAWPOINTER_SPACE_H + +#include "common_components/heap/allocator/region_manager.h" +#include "common_components/heap/space/regional_space.h" + +namespace common { +class RawPointerSpace : public RegionalSpace { +public: + RawPointerSpace(RegionManager& regionManager) + : RegionalSpace(regionManager), + rawPointerRegionList_("raw pointer regions") {} + + void AddRawPointerRegion(RegionDesc* region) + { + rawPointerRegionList_.PrependRegion(region, RegionDesc::RegionType::RAW_POINTER_REGION); + } + + size_t GetUsedUnitCount() const + { + return rawPointerRegionList_.GetUnitCount(); + } + + size_t GetAllocatedSize() const + { + return rawPointerRegionList_.GetAllocatedSize(); + } + + void ClearAllGCInfo() + { + ClearGCInfo(rawPointerRegionList_); + } + + void MarkRememberSet(const std::function& func) + { + auto visitFunc = [&func](RegionDesc* region) { + region->VisitRememberSetBeforeMarking(func); + }; + rawPointerRegionList_.VisitAllRegions(visitFunc); + } + + void ClearRSet() + { + rawPointerRegionList_.VisitAllRegions([](RegionDesc* region) { region->ClearRSet(); }); + } + + void CollectFixTasks(FixHeapTaskList& taskList) + { + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + FixHeapWorker::CollectFixHeapTasks(taskList, rawPointerRegionList_, FIX_OLD_REGION); + } else { + FixHeapWorker::CollectFixHeapTasks(taskList, rawPointerRegionList_, FIX_REGION); + } + } + + void DumpRegionStats() const + { + rawPointerRegionList_.DumpRegionSummary(); + } + +private: + + // region lists for small-sized raw-pointer objects (i.e. future, monitor) + // which can not be moved ever (even during compaction). + RegionList rawPointerRegionList_; // delete rawPointerRegion, use PinnedRegion +}; +} // namespace common +#endif // COMMON_COMPONENTS_HEAP_SPACE_RAWPOINTER_SPACE_H \ No newline at end of file diff --git a/common_components/heap/space/readonly_space.h b/common_components/heap/space/readonly_space.h new file mode 100644 index 0000000000000000000000000000000000000000..befb78c7a31878304dc5bef057f7671605d6feee --- /dev/null +++ b/common_components/heap/space/readonly_space.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef COMMON_COMPONENTS_HEAP_SPACE_READONLY_SPACE_H +#define COMMON_COMPONENTS_HEAP_SPACE_READONLY_SPACE_H + +#include "common_components/heap/allocator/region_manager.h" +#include "common_components/heap/space/regional_space.h" + +namespace common { +// regions for read only objects +class ReadOnlySpace : public RegionalSpace { +public: + ReadOnlySpace(RegionManager& regionManager) : RegionalSpace(regionManager), + readOnlyRegionList_("read only region") {} + + size_t GetUsedUnitCount() const + { + return readOnlyRegionList_.GetUnitCount(); + } + + size_t GetAllocatedSize() const + { + return readOnlyRegionList_.GetAllocatedSize(); + } + + void PrepareMarking() + { + RegionDesc* readOnlyRegion = readOnlyRegionList_.GetHeadRegion(); + if (readOnlyRegion != nullptr && readOnlyRegion != RegionDesc::NullRegion()) { + readOnlyRegion->SetMarkingLine(); + } + } + + void PrepareForward() + { + RegionDesc* readOnlyRegion = readOnlyRegionList_.GetHeadRegion(); + if (readOnlyRegion != nullptr && readOnlyRegion != RegionDesc::NullRegion()) { + readOnlyRegion->SetCopyLine(); + } + } + + void ClearAllGCInfo() + { + ClearGCInfo(readOnlyRegionList_); + } + + void SetReadOnlyToRORegionList() + { + auto visitor = [](RegionDesc* region) { + if (region != nullptr) { + region->SetReadOnly(); + } + }; + readOnlyRegionList_.VisitAllRegions(visitor); + } + + void ClearReadOnlyFromRORegionList() + { + auto visitor = [](RegionDesc* region) { + if (region != nullptr) { + region->ClearReadOnly(); + } + }; + readOnlyRegionList_.VisitAllRegions(visitor); + } + + void DumpRegionStats() const + { + readOnlyRegionList_.DumpRegionSummary(); + } + + uintptr_t Alloc(size_t size, bool allowGC = true) + { + uintptr_t addr = 0; + std::mutex& regionListMutex = readOnlyRegionList_.GetListMutex(); + + std::lock_guard lock(regionListMutex); + RegionDesc* headRegion = readOnlyRegionList_.GetHeadRegion(); + if (headRegion != nullptr) { + addr = headRegion->Alloc(size); + } + if (addr == 0) { + RegionDesc* region = + regionManager_.TakeRegion(1, RegionDesc::UnitRole::SMALL_SIZED_UNITS, false, allowGC); + if (region == nullptr) { + return 0; + } + DLOG(REGION, "alloc read only region @0x%zx+%zu type %u", region->GetRegionStart(), + region->GetRegionAllocatedSize(), + region->GetRegionType()); + + InitRegionPhaseLine(region); + + // To make sure the allocedSize are consistent, it must prepend region first then alloc object. + readOnlyRegionList_.PrependRegionLocked(region, RegionDesc::RegionType::READ_ONLY_REGION); + addr = region->Alloc(size); + } + + DLOG(ALLOC, "alloc read only obj 0x%zx(%zu)", addr, size); + return addr; + } + +private: + // regions for read only objects + RegionList readOnlyRegionList_; +}; +} // namespace common +#endif // COMMON_COMPONENTS_HEAP_SPACE_READONLY_SPACE_H \ No newline at end of file diff --git a/common_components/heap/space/regional_space.h b/common_components/heap/space/regional_space.h index 1e6c595e6040c48e5575a63c9f55885c0a060b9b..d8fcef3838cd7dacb3db0dc02ddcdef2e587016d 100644 --- a/common_components/heap/space/regional_space.h +++ b/common_components/heap/space/regional_space.h @@ -22,10 +22,13 @@ #include #include +#include "common_interfaces/base/common.h" #include "common_components/heap/allocator/alloc_util.h" #include "common_components/heap/allocator/allocator.h" #include "common_components/heap/allocator/region_manager.h" #include "common_components/mutator/mutator.h" +#include "common_components/heap/allocator/region_desc.h" +#include "common_components/mutator/mutator_manager.h" #if defined(COMMON_SANITIZER_SUPPORT) #include "common_components/base/asan_interface.h" #endif @@ -38,6 +41,32 @@ public: RegionManager& GetRegionManager() { return regionManager_; } protected: + void ClearGCInfo(RegionList& list) + { + RegionList tmp("temp region list"); + list.CopyListTo(tmp); + tmp.VisitAllRegions([](RegionDesc* region) { + region->ClearMarkingCopyLine(); + region->ClearLiveInfo(); + region->ResetMarkBit(); + }); + } + + void InitRegionPhaseLine(RegionDesc* region) + { + if (region == nullptr) { + return; + } + GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); + if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB || + phase == GC_PHASE_POST_MARK) { + region->SetMarkingLine(); + } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY || phase == GC_PHASE_FIX) { + region->SetMarkingLine(); + region->SetCopyLine(); + } + } + RegionManager& regionManager_; }; } // namespace common diff --git a/test/fuzztest/containersvectorsort_fuzzer/BUILD.gn b/common_components/heap/space/tests/BUILD.gn similarity index 39% rename from test/fuzztest/containersvectorsort_fuzzer/BUILD.gn rename to common_components/heap/space/tests/BUILD.gn index 20d860be33c192db1f8b89ca34deea937d14c9a4..deaaa4e3bae817a50547f578c59367b1bd787afc 100644 --- a/test/fuzztest/containersvectorsort_fuzzer/BUILD.gn +++ b/common_components/heap/space/tests/BUILD.gn @@ -1,4 +1,4 @@ -# Copyright (c) 2022 Huawei Device Co., Ltd. +# Copyright (c) 2025 Huawei Device Co., Ltd. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,33 +11,48 @@ # See the License for the specific language governing permissions and # limitations under the License. -##################################hydra-fuzz################################### -import("//arkcompiler/ets_runtime/js_runtime_config.gni") -import("//arkcompiler/ets_runtime/test/test_helper.gni") -import("//build/config/features.gni") -import("//build/ohos.gni") +import("//arkcompiler/ets_runtime/common_components/tests/test_helper.gni") -##################################fuzztest##################################### -ohos_fuzztest("ContainersVectorSortFuzzTest") { - module_out_path = ets_runtime_output_path +module_output_path = "ets_runtime" - fuzz_config_file = - "//arkcompiler/ets_runtime/test/fuzztest/containersvectorsort_fuzzer" +host_unittest_action("From_Space_Test") { + module_out_path = module_output_path - sources = [ "containersvectorsort_fuzzer.cpp" ] + sources = [ + # test file + "from_space_test.cpp", + ] - configs = [ "//arkcompiler/ets_runtime:ecma_test_config" ] + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] - deps = [ "../../../:libark_jsruntime_test" ] + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] # hiviewdfx libraries - external_deps = hiviewdfx_ext_deps - external_deps += [ sdk_libc_secshared_dep ] - deps += hiviewdfx_deps + external_deps = [ + "bounds_checking_function:libsec_shared", + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] } -group("fuzztest") { +group("unittest") { testonly = true - deps = [] - deps += [ ":ContainersVectorSortFuzzTest" ] + + # deps file + deps = [ + ":From_Space_Test", + ] } + +group("host_unittest") { + testonly = true + + # deps file + deps = [ + ":From_Space_TestAction", + ] +} \ No newline at end of file diff --git a/common_components/heap/space/tests/from_space_test.cpp b/common_components/heap/space/tests/from_space_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..590a86050f954c3abc676a47602cc593f9f34ce5 --- /dev/null +++ b/common_components/heap/space/tests/from_space_test.cpp @@ -0,0 +1,89 @@ +/* +* Copyright (c) 2025 Huawei Device Co., Ltd. +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include "common_components/heap/allocator/regional_heap.h" +#include "common_components/heap/space/from_space.h" +#include "common_components/mutator/thread_local.h" +#include "common_components/tests/test_helper.h" + +using namespace common; +namespace common::test { +class FromSpaceTest : public common::test::BaseTestWithScope { +protected: + class TestRegionList : public RegionList { + public: + TestRegionList() : RegionList("TestList") {} + void setHeadRegion(RegionDesc* head) { listHead_ = head; } + }; + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() + { + BaseRuntime::GetInstance()->Fini(); + } + + void SetUp() override {} + void TearDown() override {} +}; + +HWTEST_F_L0(FromSpaceTest, CopyFromRegions) +{ + RegionManager regionManager; + RegionalHeap heap; + FromSpace fromSpace(regionManager, heap); + ThreadLocal::SetAllocBuffer(nullptr); + fromSpace.CopyFromRegions(nullptr); + ASSERT_FALSE(fromSpace.GetFromRegionList().GetHeadRegion() != nullptr); +} + +HWTEST_F_L0(FromSpaceTest, CopyFromRegionsTest) +{ + size_t unitIdx = 0; + size_t nUnit = 4; + RegionDesc* region = RegionDesc::InitRegion(unitIdx, nUnit, RegionDesc::UnitRole::LARGE_SIZED_UNITS); + TestRegionList list; + list.setHeadRegion(region); + ASSERT_TRUE(list.GetHeadRegion() != nullptr); + + RegionalHeap heap; + RegionManager manager; + FromSpace fromSpace(manager, heap); + fromSpace.AssembleGarbageCandidates(list); + + AllocationBuffer* buffer1 = new (std::nothrow) AllocationBuffer(); + ThreadLocal::SetAllocBuffer(buffer1); + fromSpace.CopyFromRegions(); + fromSpace.ExemptFromRegions(); + ASSERT_TRUE(AllocationBuffer::GetAllocBuffer() != nullptr); +} + +HWTEST_F_L0(FromSpaceTest, ParallelCopyFromRegions) +{ + RegionManager regionManager; + RegionalHeap heap; + FromSpace fromSpace(regionManager, heap); + AllocationBuffer* buffer1 = new (std::nothrow) AllocationBuffer(); + ThreadLocal::SetAllocBuffer(buffer1); + fromSpace.ParallelCopyFromRegions(nullptr, 5); + ASSERT_TRUE(AllocationBuffer::GetAllocBuffer() != nullptr); + + ThreadLocal::SetAllocBuffer(nullptr); + fromSpace.ParallelCopyFromRegions(nullptr, 5); + ASSERT_FALSE(AllocationBuffer::GetAllocBuffer() != nullptr); +} +} // namespace common::test \ No newline at end of file diff --git a/common_components/heap/space/to_space.cpp b/common_components/heap/space/to_space.cpp index 7e64fb81fe076cc45dc1ff011e9e434fd1438064..998f546565e1ef869606c0476ea2f8391372a28b 100644 --- a/common_components/heap/space/to_space.cpp +++ b/common_components/heap/space/to_space.cpp @@ -22,25 +22,46 @@ namespace common { void ToSpace::DumpRegionStats() const { + size_t tlToRegions = tlToRegionList_.GetRegionCount(); + size_t tlToUnits = tlToRegionList_.GetUnitCount(); + size_t tlToSize = tlToUnits * RegionDesc::UNIT_SIZE; + size_t allocTLToSize = tlToRegionList_.GetAllocatedSize(false); + size_t fullToRegions = fullToRegionList_.GetRegionCount(); size_t fullToUnits = fullToRegionList_.GetUnitCount(); size_t fullToSize = fullToUnits * RegionDesc::UNIT_SIZE; size_t allocfullToSize = fullToRegionList_.GetAllocatedSize(); - size_t tlToRegions = tlToRegionList_.GetRegionCount(); - size_t tlToUnits = tlToRegionList_.GetUnitCount(); - size_t tlToSize = tlToUnits * RegionDesc::UNIT_SIZE; - size_t allocTLToSize = tlToRegionList_.GetAllocatedSize(); - - VLOG(DEBUG, "\tfull to-regions %zu: %zu units (%zu B, alloc %zu)", + size_t units = tlToUnits + fullToUnits; + VLOG(DEBUG, "\tto space units: %zu (%zu B)", units, units * RegionDesc::UNIT_SIZE); + VLOG(DEBUG, "\t thread-local to-regions %zu: %zu units (%zu B, alloc %zu)", + tlToRegions, tlToUnits, tlToSize, allocTLToSize); + VLOG(DEBUG, "\t full to-regions %zu: %zu units (%zu B, alloc %zu)", fullToRegions, fullToUnits, fullToSize, allocfullToSize); - VLOG(DEBUG, "\tthread-local to-regions %zu: %zu units (%zu B, alloc %zu)", - fullToRegions, fullToUnits, fullToSize, allocTLToSize); } void ToSpace::GetPromotedTo(OldSpace& mspace) { + // release thread-local to-space regions as they will promote to old-space + AllocBufferVisitor visitor = [](AllocationBuffer& tlab) { + tlab.ClearRegion(); + }; + Heap::GetHeap().GetAllocator().VisitAllocBuffers(visitor); + mspace.PromoteRegionList(fullToRegionList_); mspace.PromoteRegionList(tlToRegionList_); } + +RegionDesc* ToSpace::AllocateThreadLocalRegion(bool expectPhysicalMem) +{ + RegionDesc* region = regionManager_.TakeRegion(expectPhysicalMem, false, true); + if (region != nullptr) { + DLOG(REGION, "alloc thread local to region @0x%zx+%zu type %u", region->GetRegionStart(), + region->GetRegionAllocatedSize(), + region->GetRegionType()); + tlToRegionList_.PrependRegion(region, RegionDesc::RegionType::TO_REGION); + } + return region; +} + } // namespace common diff --git a/common_components/heap/space/to_space.h b/common_components/heap/space/to_space.h index 2fa954ce11ef5583b69ef032ddc86604793b8e29..83f9ec95ca1eef164ef136fd942f02af5d3b5a2b 100644 --- a/common_components/heap/space/to_space.h +++ b/common_components/heap/space/to_space.h @@ -44,11 +44,10 @@ public: void DumpRegionStats() const; - void FixAllRegions() + void CollectFixTasks(FixHeapTaskList &taskList) { - TraceCollector& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); - RegionManager::FixToRegionList(collector, fullToRegionList_); - RegionManager::FixToRegionList(collector, tlToRegionList_); + FixHeapWorker::CollectFixHeapTasks(taskList, tlToRegionList_, FIX_TO_REGION); + FixHeapWorker::CollectFixHeapTasks(taskList, fullToRegionList_, FIX_TO_REGION); } void AddFullRegion(RegionDesc* region) @@ -57,26 +56,24 @@ public: fullToRegionList_.PrependRegion(region, RegionDesc::RegionType::TO_REGION); } - void AddThreadLocalRegion(RegionDesc* region) - { - tlToRegionList_.PrependRegion(region, RegionDesc::RegionType::TO_REGION); - } + RegionDesc* AllocateThreadLocalRegion(bool expectPhysicalMem); void HandleFullThreadLocalRegion(RegionDesc* region) { DCHECK_CC(Heap::GetHeap().IsGcStarted()); + DCHECK_CC(region->IsToRegion()); tlToRegionList_.DeleteRegion(region); fullToRegionList_.PrependRegion(region, RegionDesc::RegionType::TO_REGION); } size_t GetAllocatedSize() const { - return tlToRegionList_.GetAllocatedSize() + fullToRegionList_.GetAllocatedSize(); + return tlToRegionList_.GetAllocatedSize(false) + fullToRegionList_.GetAllocatedSize(); } size_t GetSurvivedSize() const { - return tlToRegionList_.GetAllocatedSize() + fullToRegionList_.GetAllocatedSize(); + return tlToRegionList_.GetAllocatedSize(false) + fullToRegionList_.GetAllocatedSize(); } size_t GetUsedUnitCount() const @@ -102,7 +99,7 @@ private: RegionList tmp("temp region list"); list.CopyListTo(tmp); tmp.VisitAllRegions([](RegionDesc* region) { - region->ClearTraceCopyFixLine(); + region->ClearMarkingCopyLine(); region->ClearLiveInfo(); region->ResetMarkBit(); }); diff --git a/common_components/heap/space/young_space.cpp b/common_components/heap/space/young_space.cpp index 8dbd5168851006c2267e370979c85420ced76f49..d1a9b66a2385de37dc08198cc08efca52e4328f9 100644 --- a/common_components/heap/space/young_space.cpp +++ b/common_components/heap/space/young_space.cpp @@ -32,9 +32,23 @@ void YoungSpace::DumpRegionStats() const size_t allocRecentFullSize = recentFullRegionList_.GetAllocatedSize(); size_t units = tlUnits + recentFullUnits; - VLOG(DEBUG, "young space units: %zu (%zu B)", units, units * RegionDesc::UNIT_SIZE); - VLOG(DEBUG, "\ttl-regions %zu: %zu units (%zu B, alloc %zu)", tlRegions, tlUnits, tlSize, allocTLSize); - VLOG(DEBUG, "\trecent-full regions %zu: %zu units (%zu B, alloc %zu)", + VLOG(DEBUG, "\tyoung space units: %zu (%zu B)", units, units * RegionDesc::UNIT_SIZE); + VLOG(DEBUG, "\t tl-regions %zu: %zu units (%zu B, alloc %zu)", tlRegions, tlUnits, tlSize, allocTLSize); + VLOG(DEBUG, "\t recent-full regions %zu: %zu units (%zu B, alloc %zu)", recentFullRegions, recentFullUnits, recentFullSize, allocRecentFullSize); } + +RegionDesc* YoungSpace::AllocateThreadLocalRegion(bool expectPhysicalMem) +{ + RegionDesc* region = regionManager_.TakeRegion(expectPhysicalMem, true); + ASSERT_LOGF(!IsGcThread(), "GC thread cannot take tlRegion"); + if (region != nullptr) { + DLOG(REGION, "alloc thread local young region @0x%zx+%zu type %u", region->GetRegionStart(), + region->GetRegionAllocatedSize(), + region->GetRegionType()); + InitRegionPhaseLine(region); + tlRegionList_.PrependRegion(region, RegionDesc::RegionType::THREAD_LOCAL_REGION); + } + return region; +} } // namespace common diff --git a/common_components/heap/space/young_space.h b/common_components/heap/space/young_space.h index 66d62fcc775990d13de545d142b552b78d85cd06..69798998040ddbed2f505d5a7803d63d0adaf86a 100644 --- a/common_components/heap/space/young_space.h +++ b/common_components/heap/space/young_space.h @@ -28,6 +28,8 @@ #include "common_components/heap/space/regional_space.h" #include "common_components/heap/space/from_space.h" #include "common_components/mutator/mutator.h" +#include "common_components/heap/allocator/fix_heap.h" +#include "common_components/heap/allocator/region_desc.h" #if defined(COMMON_SANITIZER_SUPPORT) #include "common_components/base/asan_interface.h" #endif @@ -42,17 +44,14 @@ public: void DumpRegionStats() const; - void FixAllRegions() + void CollectFixTasks(FixHeapTaskList &taskList) { - TraceCollector& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); - RegionManager::FixRecentRegionList(collector, tlRegionList_); - RegionManager::FixRecentRegionList(collector, recentFullRegionList_); + std::lock_guard lock(lock_); + FixHeapWorker::CollectFixHeapTasks(taskList, tlRegionList_, FIX_RECENT_REGION); + FixHeapWorker::CollectFixHeapTasks(taskList, recentFullRegionList_, FIX_RECENT_REGION); } - void AddThreadLocalRegion(RegionDesc* region) - { - tlRegionList_.PrependRegion(region, RegionDesc::RegionType::THREAD_LOCAL_REGION); - } + RegionDesc* AllocateThreadLocalRegion(bool expectPhysicalMem); void AddFullRegion(RegionDesc* region) { @@ -61,6 +60,7 @@ public: void HandleFullThreadLocalRegion(RegionDesc* region) { + std::lock_guard lock(lock_); DCHECK_CC(region->IsThreadLocalRegion()); tlRegionList_.DeleteRegion(region); recentFullRegionList_.PrependRegion(region, RegionDesc::RegionType::RECENT_FULL_REGION); @@ -78,7 +78,7 @@ public: size_t GetAllocatedSize() const { - return tlRegionList_.GetAllocatedSize() + recentFullRegionList_.GetAllocatedSize(); + return tlRegionList_.GetAllocatedSize(false) + recentFullRegionList_.GetAllocatedSize(); } size_t GetRecentAllocatedSize() const @@ -88,6 +88,7 @@ public: void ClearAllGCInfo() { + std::lock_guard lock(lock_); ClearGCInfo(tlRegionList_); ClearGCInfo(recentFullRegionList_); } @@ -102,11 +103,15 @@ private: RegionList tmp("temp region list"); list.CopyListTo(tmp); tmp.VisitAllRegions([](RegionDesc* region) { - region->ClearTraceCopyFixLine(); + region->ClearMarkingCopyLine(); region->ClearLiveInfo(); region->ResetMarkBit(); }); } + // Used to exclude the promotion of TLS regions during the ClearGCInfo phase + // This is necessary when the mutator can promote TL to recentFull while the GC is performing ClearGC + std::mutex lock_; + // regions for thread-local allocation. // regions in this list are already used for allocation but not full yet. RegionList tlRegionList_; diff --git a/common_components/heap/tests/BUILD.gn b/common_components/heap/tests/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..ead18f376182e924776fc3be6f0d69a3bca90bba --- /dev/null +++ b/common_components/heap/tests/BUILD.gn @@ -0,0 +1,85 @@ +# Copyright (c) 2025 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//arkcompiler/ets_runtime/common_components/tests/test_helper.gni") + +module_output_path = "ets_runtime" + +host_unittest_action("Heap_Manager_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "heap_manager_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +host_unittest_action("Verification_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "verification_test.cpp", + ] + + include_dirs = [ "//arkcompiler/ets_runtime/common_components/" ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "bounds_checking_function:libsec_shared", + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +group("unittest") { + testonly = true + + # deps file + deps = [ + ":Heap_Manager_Test", + ":Verification_Test", + ] +} + +group("host_unittest") { + testonly = true + + # deps file + deps = [ + ":Heap_Manager_TestAction", + ":Verification_TestAction", + ] +} \ No newline at end of file diff --git a/common_components/heap/tests/heap_manager_test.cpp b/common_components/heap/tests/heap_manager_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7721f429c268c8bdbdad2c1327c90e9c69ed5558 --- /dev/null +++ b/common_components/heap/tests/heap_manager_test.cpp @@ -0,0 +1,41 @@ +/* +* Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/heap/heap.h" +#include "common_components/heap/heap_manager.h" +#include "common_components/tests/test_helper.h" + +using namespace common; +namespace common::test { +class HeapManagerTest : public common::test::BaseTestWithScope { +protected: + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } +}; + +HWTEST_F_L0(HeapManagerTest, RequestGC) +{ + HeapManager manager; + Heap::GetHeap().EnableGC(false); + manager.RequestGC(GCReason::GC_REASON_USER, true, common::GC_TYPE_FULL); + ASSERT_FALSE(Heap::GetHeap().IsGCEnabled()); + + Heap::GetHeap().EnableGC(true); + manager.RequestGC(GCReason::GC_REASON_USER, true, common::GC_TYPE_FULL); + ASSERT_TRUE(Heap::GetHeap().IsGCEnabled()); +} +} // namespace common::test \ No newline at end of file diff --git a/common_components/heap/tests/verification_test.cpp b/common_components/heap/tests/verification_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1709de37c1b2fcc45e2be7ce2064b93a0b84a81a --- /dev/null +++ b/common_components/heap/tests/verification_test.cpp @@ -0,0 +1,286 @@ +/* +* Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/heap/ark_collector/ark_collector.h" +#include "common_components/heap/verification.cpp" +#include "common_components/heap/heap_manager.h" +#include "common_components/tests/test_helper.h" +#include "common_interfaces/objects/base_object_operator.h" + +using namespace common; +namespace common::test { +class TestBaseObjectOperator : public common::BaseObjectOperatorInterfaces { +public: + bool IsValidObject([[maybe_unused]] const BaseObject *object) const override { return enbaleValidObject_; } + void ForEachRefField(const BaseObject *object, const common::RefFieldVisitor &visitor) const override {} + size_t GetSize(const BaseObject *object) const override{ return size_; } + BaseObject *GetForwardingPointer(const BaseObject *object) const override { return nullptr; } + void SetForwardingPointerAfterExclusive(BaseObject *object, BaseObject *fwdPtr) override {} + void SetValidObject(bool value) { enbaleValidObject_ = value; } + void SetSize(size_t size) { size_ = size; } +private: + bool enbaleValidObject_ = false; + size_t size_ = 0; +}; +class VerificationTest : public common::test::BaseTestWithScope { +protected: + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() + { + BaseRuntime::GetInstance()->Fini(); + } + + void SetUp() override + { + MutatorManager::Instance().CreateRuntimeMutator(ThreadType::GC_THREAD); + } + + void TearDown() override + { + MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::GC_THREAD); + } +}; + +HWTEST_F_L0(VerificationTest, GetObjectInfoTest) +{ + BaseObject* obj = nullptr; + std::string result = GetObjectInfo(obj); + + EXPECT_NE(result.find("address: 0x0"), std::string::npos); + EXPECT_NE(result.find("Skip: nullptr"), std::string::npos); + EXPECT_NE(result.find("Skip: Object is not in heap range"), std::string::npos); +} + +HWTEST_F_L0(VerificationTest, GetObjectInfoTest2) +{ + BaseObject obj; + std::string result = GetObjectInfo(&obj); + EXPECT_NE(result.find("address: 0x"), std::string::npos); + EXPECT_NE(result.find("Skip: Object is not in heap range"), std::string::npos); +} + +HWTEST_F_L0(VerificationTest, GetRefInfoTest) +{ + BaseObject oldObj; + RefField oldField(&oldObj); + MAddress oldAddress = oldField.GetFieldValue(); + std::string result = GetRefInfo(oldField); + EXPECT_NE(result.find("address: 0x"), std::string::npos); + EXPECT_NE(result.find("Skip: Object is not in heap range"), std::string::npos); +} + +HWTEST_F_L0(VerificationTest, VerifyRefImplTest2) +{ + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0U); + BaseObject* obj = reinterpret_cast(addr); + RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + ASSERT_NE(region, nullptr); + region->SetRegionType(RegionDesc::RegionType::FROM_REGION); + RefField field(obj); + + auto refObj = field.GetTargetObject(); + + AfterForwardVisitor visitor; + visitor.VerifyRefImpl(obj, field); + ASSERT_FALSE(RegionalHeap::IsMarkedObject(refObj)); + ASSERT_FALSE(RegionalHeap::IsResurrectedObject(refObj)); +} + +HWTEST_F_L0(VerificationTest, VerifyRefImplTest3) +{ + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0U); + BaseObject* obj = reinterpret_cast(addr); + RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + ASSERT_NE(region, nullptr); + region->SetRegionType(RegionDesc::RegionType::FULL_POLYSIZE_NONMOVABLE_REGION); + RefField field(obj); + + auto refObj = field.GetTargetObject(); + + ReadBarrierSetter visitor; + visitor.VerifyRefImpl(nullptr, field); + visitor.VerifyRefImpl(obj, field); + EXPECT_EQ(RegionDesc::RegionType::FULL_POLYSIZE_NONMOVABLE_REGION, + RegionDesc::GetRegionDescAt(reinterpret_cast(field.GetTargetObject()))->GetRegionType()); + + region->SetRegionType(RegionDesc::RegionType::RECENT_POLYSIZE_NONMOVABLE_REGION); + visitor.VerifyRefImpl(obj, field); + EXPECT_EQ(RegionDesc::RegionType::RECENT_POLYSIZE_NONMOVABLE_REGION, + RegionDesc::GetRegionDescAt(reinterpret_cast(field.GetTargetObject()))->GetRegionType()); + + region->SetRegionType(RegionDesc::RegionType::MONOSIZE_NONMOVABLE_REGION); + visitor.VerifyRefImpl(obj, field); + EXPECT_EQ(RegionDesc::RegionType::MONOSIZE_NONMOVABLE_REGION, + RegionDesc::GetRegionDescAt(reinterpret_cast(field.GetTargetObject()))->GetRegionType()); + + region->SetRegionType(RegionDesc::RegionType::FULL_MONOSIZE_NONMOVABLE_REGION); + visitor.VerifyRefImpl(obj, field); + EXPECT_EQ(RegionDesc::RegionType::FULL_MONOSIZE_NONMOVABLE_REGION, + RegionDesc::GetRegionDescAt(reinterpret_cast(field.GetTargetObject()))->GetRegionType()); + + region->SetRegionType(RegionDesc::RegionType::READ_ONLY_REGION); + auto oldRefValue = field.GetFieldValue(); + visitor.VerifyRefImpl(obj, field); + auto newRefValue = field.GetFieldValue(); + EXPECT_NE(oldRefValue, newRefValue); +} + +std::unique_ptr GetArkCollector() +{ + CollectorResources &resources = Heap::GetHeap().GetCollectorResources(); + Allocator &allocator = Heap::GetHeap().GetAllocator(); + + return std::make_unique(allocator, resources); +} + +HWTEST_F_L0(VerificationTest, VerifyAfterMarkTest1) +{ + Heap::GetHeap().SetGCPhase(GCPhase::GC_PHASE_POST_MARK); + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + WVerify verify; + verify.VerifyAfterMark(*arkCollector); + ASSERT_FALSE(MutatorManager::Instance().WorldStopped()); +} + +HWTEST_F_L0(VerificationTest, VerifyAfterForwardTest1) +{ + Heap::GetHeap().SetGCPhase(GCPhase::GC_PHASE_COPY); + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + WVerify verify; + verify.VerifyAfterForward(*arkCollector); + ASSERT_FALSE(MutatorManager::Instance().WorldStopped()); +} + +HWTEST_F_L0(VerificationTest, VerifyAfterFixTest1) +{ + Heap::GetHeap().SetGCPhase(GCPhase::GC_PHASE_FIX); + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + WVerify verify; + verify.VerifyAfterFix(*arkCollector); + ASSERT_FALSE(MutatorManager::Instance().WorldStopped()); +} + +HWTEST_F_L0(VerificationTest, EnableReadBarrierDFXTest1) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + WVerify verify; + verify.EnableReadBarrierDFX(*arkCollector); + ASSERT_FALSE(MutatorManager::Instance().WorldStopped()); +} + +HWTEST_F_L0(VerificationTest, DisableReadBarrierDFXTest1) +{ + std::unique_ptr arkCollector = GetArkCollector(); + ASSERT_TRUE(arkCollector != nullptr); + WVerify verify; + verify.DisableReadBarrierDFX(*arkCollector); + ASSERT_FALSE(MutatorManager::Instance().WorldStopped()); +} + +HWTEST_F_L0(VerificationTest, GetObjectInfoTest3) +{ + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::NONMOVABLE_OBJECT, true); + BaseObject *obj = reinterpret_cast(addr); + std::string result = GetObjectInfo(obj); + EXPECT_NE(result.find("address: 0x"), std::string::npos); + EXPECT_NE(result.find("Type: 0x"), std::string::npos); + EXPECT_NE(result.find("Base: 0x"), std::string::npos); + EXPECT_NE(result.find("Start: 0x"), std::string::npos); + EXPECT_NE(result.find("End: 0x"), std::string::npos); + EXPECT_NE(result.find("AllocPtr: 0x"), std::string::npos); + EXPECT_NE(result.find("MarkingLine: 0x"), std::string::npos); + EXPECT_NE(result.find("CopyLine: 0x"), std::string::npos); +} + +HWTEST_F_L0(VerificationTest, GetRefInfoTest2) +{ + RefField field(nullptr); + uintptr_t taggedValue = 0x04; + field.SetFieldValue(static_cast(taggedValue)); + std::string result = GetRefInfo(field); + EXPECT_NE(result.find("> Raw memory:"), std::string::npos); + EXPECT_NE(result.find("Skip: primitive"), std::string::npos); +} + +HWTEST_F_L0(VerificationTest, VerifyRefImplTest) +{ + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::NONMOVABLE_OBJECT, true); + BaseObject *obj = reinterpret_cast(addr); + RefField oldField(obj); + TestBaseObjectOperator operatorImpl; + BaseObject::RegisterDynamic(&operatorImpl); + operatorImpl.SetValidObject(true); + Heap::GetHeap().SetGCReason(GCReason::GC_REASON_YOUNG); + operatorImpl.SetSize(BaseObject::BaseObjectSize()); + AfterMarkVisitor visitor; + visitor.VerifyRefImpl(nullptr, oldField); + ASSERT_TRUE(Heap::GetHeap().GetGCReason() == GCReason::GC_REASON_YOUNG); + ASSERT_TRUE(Heap::IsTaggedObject(oldField.GetFieldValue())); + + AfterMarkVisitor visitor1; + visitor1.VerifyRefImpl(nullptr, oldField); + ASSERT_TRUE(Heap::IsTaggedObject(oldField.GetFieldValue())); +} + +HWTEST_F_L0(VerificationTest, VerifyRefImplTest1) +{ + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::NONMOVABLE_OBJECT, true); + BaseObject *obj = reinterpret_cast(addr); + RefField oldField(obj); + TestBaseObjectOperator operatorImpl; + BaseObject::RegisterDynamic(&operatorImpl); + operatorImpl.SetValidObject(true); + Heap::GetHeap().SetGCReason(GCReason::GC_REASON_YOUNG); + operatorImpl.SetSize(BaseObject::BaseObjectSize()); + AfterMarkVisitor visitor; + visitor.VerifyRefImpl(obj, oldField); + ASSERT_TRUE(Heap::GetHeap().GetGCReason() == GCReason::GC_REASON_YOUNG); + ASSERT_TRUE(Heap::IsTaggedObject(oldField.GetFieldValue())); +} + +static BaseObject* testObj = nullptr; +static void CustomVisitRoot(const RefFieldVisitor& visitorFunc) +{ + RefField<> field(testObj); + visitorFunc(field); +} +HWTEST_F_L0(VerificationTest, IterateRemarked_VerifyAllRefs) +{ + RegionalHeap regionalHeap; + VerifyIterator verify(regionalHeap); + AfterForwardVisitor visitor; + std::unordered_set markSet; + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::NONMOVABLE_OBJECT, true); + testObj = reinterpret_cast(addr); + markSet.insert(testObj); + + verify.IterateRemarked(visitor, markSet, true); + verify.IterateRemarked(visitor, markSet, false); + EXPECT_EQ(markSet.size(), 1); + EXPECT_TRUE(markSet.find(testObj) != markSet.end()); +} +} // namespace common::test diff --git a/common_components/heap/verification.cpp b/common_components/heap/verification.cpp index 7ee2c041414d31e59851a1f437f884d498a80a30..b67fea77b6e8326b49626d79149952d74c884dd6 100755 --- a/common_components/heap/verification.cpp +++ b/common_components/heap/verification.cpp @@ -14,8 +14,10 @@ */ #include "verification.h" + +#include "ark_collector/ark_collector.h" #include "allocator/region_desc.h" -#include "allocator/region_space.h" +#include "allocator/regional_heap.h" #include "common/mark_work_stack.h" #include "common/type_def.h" #include "common_components/log/log.h" @@ -26,7 +28,6 @@ #include "mutator/mutator_manager.h" #include "securec.h" #include "thread/mutator_base.h" -#include "w_collector/w_collector.h" #include #include #include @@ -38,7 +39,8 @@ * * RB DFX: * Force to use STW GC. Force to use read barrier out of GC. - * After GC is finished, set the lowerst bit(WEAK_TAG) of RefField which is not root or doesn't point to pinned objects. + * After GC is finished, set the lowerst bit(WEAK_TAG) of RefField which is not root or doesn't + * point to non-movable objects. * The read barrier is responsible to remove the WEAK_TAG for properly deferencing the object. * Disabled by defualt. Controlled by gn-option `ets_runtime_enable_rb_dfx`. * @@ -101,12 +103,12 @@ std::string GetObjectInfo(const BaseObject* obj) } else { auto region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); s << std::hex << "Type: 0x" << (int) region->GetRegionType() << ", " + << "Base: 0x" << region->GetRegionBase() << ", " << "Start: 0x" << region->GetRegionStart() << ", " << "End: 0x" << region->GetRegionEnd() << ", " << "AllocPtr: 0x" << region->GetRegionAllocPtr() << ", " - << "TraceLine: 0x" << region->GetTraceLine() << ", " - << "CopyLine: 0x" << region->GetCopyLine() << ", " - << "FixLine: 0x" << region->GetFixLine() << std::endl; + << "MarkingLine: 0x" << region->GetMarkingLine() << ", " + << "CopyLine: 0x" << region->GetCopyLine() << std::endl; } return s.str(); @@ -203,37 +205,46 @@ private: size_t count_ = 0; }; +template class AfterMarkVisitor : public VerifyVisitor { public: void VerifyRefImpl(const BaseObject* obj, const RefField<>& ref) override { IsValidRef(obj, ref); - // check retraced objects, so they must be in one of the states below + // check remarked objects, so they must be in one of the states below auto refObj = ref.GetTargetObject(); RegionDesc *region = RegionDesc::GetRegionDescAt(reinterpret_cast(refObj)); - // obj == nullptr means that during EnumStrongRoots, there can no longer - // be any objects in fromSpace, because they would all have been copied - // by then + // if obj is nullptr, this means it is a root object + // We expect root objects to be already forwarded: assert(!region->isFromRegion()) if (obj == nullptr) { - CHECKF(!region->IsFromRegion()) - << CONTEXT << "Object: " << GetObjectInfo(obj) << std::endl - << "Ref: " << GetRefInfo(ref) << std::endl; - return; + if constexpr (IsSTWRootVerify) { + CHECKF(!region->IsFromRegion()) + << CONTEXT << "Object: " << GetObjectInfo(obj) << std::endl + << "Ref: " << GetRefInfo(ref) << std::endl; + + return; + } else { + CHECKF(!region->IsInToSpace()) + << CONTEXT << "Object: " << GetObjectInfo(obj) << std::endl + << "Ref: " << GetRefInfo(ref) << std::endl; + + return; + } } if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { - CHECKF(RegionSpace::IsResurrectedObject(refObj) || - RegionSpace::IsMarkedObject(refObj) || - RegionSpace::IsNewObjectSinceTrace(refObj) || - !RegionSpace::IsYoungSpaceObject(refObj)) + CHECKF(RegionalHeap::IsResurrectedObject(refObj) || + RegionalHeap::IsMarkedObject(refObj) || + RegionalHeap::IsNewObjectSinceMarking(refObj) || + !RegionalHeap::IsYoungSpaceObject(refObj)) << CONTEXT << "Object: " << GetObjectInfo(obj) << std::endl << "Ref: " << GetRefInfo(ref) << std::endl; } else { - CHECKF(RegionSpace::IsResurrectedObject(refObj) || - RegionSpace::IsMarkedObject(refObj) || - RegionSpace::IsNewObjectSinceTrace(refObj)) + CHECKF(RegionalHeap::IsResurrectedObject(refObj) || + RegionalHeap::IsMarkedObject(refObj) || + RegionalHeap::IsNewObjectSinceMarking(refObj)) << CONTEXT << "Object: " << GetObjectInfo(obj) << std::endl << "Ref: " << GetRefInfo(ref) << std::endl; } @@ -246,7 +257,7 @@ public: { // check objects in from-space, only alive objects are forwarded auto refObj = ref.GetTargetObject(); - if (RegionSpace::IsMarkedObject(refObj) || RegionSpace::IsResurrectedObject(refObj)) { + if (RegionalHeap::IsMarkedObject(refObj) || RegionalHeap::IsResurrectedObject(refObj)) { CHECKF(refObj->IsForwarded()) << CONTEXT << "Object: " << GetObjectInfo(obj) << std::endl @@ -289,11 +300,11 @@ public: auto regionType = RegionDesc::GetRegionDescAt(reinterpret_cast(ref.GetTargetObject()))->GetRegionType(); - if (regionType == RegionDesc::RegionType::RECENT_PINNED_REGION || - regionType == RegionDesc::RegionType::FULL_PINNED_REGION || - regionType == RegionDesc::RegionType::FIXED_PINNED_REGION || - regionType == RegionDesc::RegionType::FULL_FIXED_PINNED_REGION) { - // Read barrier for pinned objects might be optimized out, so don't set dfx tag + if (regionType == RegionDesc::RegionType::RECENT_POLYSIZE_NONMOVABLE_REGION || + regionType == RegionDesc::RegionType::FULL_POLYSIZE_NONMOVABLE_REGION || + regionType == RegionDesc::RegionType::MONOSIZE_NONMOVABLE_REGION || + regionType == RegionDesc::RegionType::FULL_MONOSIZE_NONMOVABLE_REGION) { + // Read barrier for non-movable objects might be optimized out, so don't set dfx tag return; } @@ -320,7 +331,7 @@ public: class VerifyIterator { public: - explicit VerifyIterator(RegionSpace& space) : space_(space) {} + explicit VerifyIterator(RegionalHeap& space) : space_(space) {} void IterateFromSpace(VerifyVisitor& visitor) { @@ -346,10 +357,11 @@ public: VisitWeakRoots(refVisitor); } - void IterateRetraced(VerifyVisitor& visitor, bool forRBDFX = false) + // By default, IterateRemarked uses the VisitRoots method to traverse GC roots + template + void IterateRemarked(VerifyVisitor &visitor, std::unordered_set &markSet, bool forRBDFX = false) { MarkStack markStack; - std::unordered_set markSet; BaseObject* obj = nullptr; auto markFunc = [this, &visitor, &markStack, &markSet, &obj, &forRBDFX](RefField<>& field) { @@ -382,7 +394,7 @@ public: markStack.push_back(refObj); }; - EnumStrongRoots(markFunc); + VisitRoot(markFunc); while (!markStack.empty()) { obj = markStack.back(); markStack.pop_back(); @@ -404,38 +416,45 @@ private: VisitRoots(markFunc); } - void Trace(MarkStack& markStack) {} + void Marking(MarkStack& markStack) {} - RegionSpace& space_; + RegionalHeap& space_; }; -void WVerify::VerifyAfterMarkInternal(RegionSpace& space) +void WVerify::VerifyAfterMarkInternal(RegionalHeap& space) { CHECKF(Heap::GetHeap().GetGCPhase() == GCPhase::GC_PHASE_POST_MARK) - << CONTEXT << "Mark verification should be called after PostTrace()"; + << CONTEXT << "Mark verification should be called after PostMarking()"; auto iter = VerifyIterator(space); - auto visitor = AfterMarkVisitor(); - iter.IterateRetraced(visitor); - - LOG_COMMON(DEBUG) << "[WVerify]: VerifyAfterMark verified ref count: " << visitor.VerifyRefCount(); + auto verifySTWRoots = AfterMarkVisitor(); + std::unordered_set markSet; + iter.IterateRemarked(verifySTWRoots, markSet); + auto verifyConcurrentRoots = AfterMarkVisitor(); + iter.IterateRemarked(verifyConcurrentRoots, markSet); + + LOG_COMMON(DEBUG) << "[WVerify]: VerifyAfterMark (STWRoots) verified ref count: " + << verifySTWRoots.VerifyRefCount(); + LOG_COMMON(DEBUG) << "[WVerify]: VerifyAfterMark (ConcurrentRoots) verified ref count: " + << verifyConcurrentRoots.VerifyRefCount(); } -void WVerify::VerifyAfterMark(WCollector& collector) +void WVerify::VerifyAfterMark(ArkCollector& collector) { #if !defined(ENABLE_CMC_VERIFY) && defined(NDEBUG) return; #endif - RegionSpace& space = reinterpret_cast(collector.GetAllocator()); + RegionalHeap& space = reinterpret_cast(collector.GetAllocator()); if (!MutatorManager::Instance().WorldStopped()) { - ScopedStopTheWorld stw("WGC-verify-aftermark"); + STWParam stwParam{"WGC-verify-aftermark"}; + ScopedStopTheWorld stw(stwParam); VerifyAfterMarkInternal(space); } else { VerifyAfterMarkInternal(space); } } -void WVerify::VerifyAfterForwardInternal(RegionSpace& space) +void WVerify::VerifyAfterForwardInternal(RegionalHeap& space) { CHECKF(Heap::GetHeap().GetGCPhase() == GCPhase::GC_PHASE_COPY) << CONTEXT << "Forward verification should be called after ForwardFromSpace()"; @@ -447,87 +466,95 @@ void WVerify::VerifyAfterForwardInternal(RegionSpace& space) LOG_COMMON(DEBUG) << "[WVerify]: VerifyAfterForward verified ref count: " << visitor.VerifyRefCount(); } -void WVerify::VerifyAfterForward(WCollector& collector) +void WVerify::VerifyAfterForward(ArkCollector& collector) { #if !defined(ENABLE_CMC_VERIFY) && defined(NDEBUG) return; #endif - RegionSpace& space = reinterpret_cast(collector.GetAllocator()); + RegionalHeap& space = reinterpret_cast(collector.GetAllocator()); if (!MutatorManager::Instance().WorldStopped()) { - ScopedStopTheWorld stw("WGC-verify-aftermark"); + STWParam stwParam{"WGC-verify-aftermark"}; + ScopedStopTheWorld stw(stwParam); VerifyAfterForwardInternal(space); } else { VerifyAfterForwardInternal(space); } } -void WVerify::VerifyAfterFixInternal(RegionSpace& space) +void WVerify::VerifyAfterFixInternal(RegionalHeap& space) { CHECKF(Heap::GetHeap().GetGCPhase() == GCPhase::GC_PHASE_FIX) << CONTEXT << "Fix verification should be called after Fix()"; auto iter = VerifyIterator(space); auto visitor = AfterFixVisitor(); - iter.IterateRetraced(visitor); + + std::unordered_set markSet; + iter.IterateRemarked(visitor, markSet); LOG_COMMON(DEBUG) << "[WVerify]: VerifyAfterFix verified ref count: " << visitor.VerifyRefCount(); } -void WVerify::VerifyAfterFix(WCollector& collector) +void WVerify::VerifyAfterFix(ArkCollector& collector) { #if !defined(ENABLE_CMC_VERIFY) && defined(NDEBUG) return; #endif - RegionSpace& space = reinterpret_cast(collector.GetAllocator()); + RegionalHeap& space = reinterpret_cast(collector.GetAllocator()); if (!MutatorManager::Instance().WorldStopped()) { - ScopedStopTheWorld stw("WGC-verify-aftermark"); + STWParam stwParam{"WGC-verify-aftermark"}; + ScopedStopTheWorld stw(stwParam); VerifyAfterFixInternal(space); } else { VerifyAfterFixInternal(space); } } -void WVerify::EnableReadBarrierDFXInternal(RegionSpace& space) +void WVerify::EnableReadBarrierDFXInternal(RegionalHeap& space) { auto iter = VerifyIterator(space); auto setter = ReadBarrierSetter(); auto unsetter = ReadBarrierUnsetter(); - iter.IterateRetraced(setter, true); + std::unordered_set markSet; + iter.IterateRemarked(setter, markSet, true); // some slots of heap object are also roots, so we need to unset them iter.IterateRoot(unsetter); } -void WVerify::EnableReadBarrierDFX(WCollector& collector) +void WVerify::EnableReadBarrierDFX(ArkCollector& collector) { #if !defined(ENABLE_CMC_RB_DFX) return; #endif - RegionSpace& space = reinterpret_cast(collector.GetAllocator()); + RegionalHeap& space = reinterpret_cast(collector.GetAllocator()); if (!MutatorManager::Instance().WorldStopped()) { - ScopedStopTheWorld stw("WGC-verify-enable-rb-dfx"); + STWParam stwParam{"WGC-verify-enable-rb-dfx"}; + ScopedStopTheWorld stw(stwParam); EnableReadBarrierDFXInternal(space); } else { EnableReadBarrierDFXInternal(space); } } -void WVerify::DisableReadBarrierDFXInternal(RegionSpace& space) +void WVerify::DisableReadBarrierDFXInternal(RegionalHeap& space) { auto iter = VerifyIterator(space); auto unsetter = ReadBarrierUnsetter(); - iter.IterateRetraced(unsetter, true); + std::unordered_set markSet; + iter.IterateRemarked(unsetter, markSet, true); } -void WVerify::DisableReadBarrierDFX(WCollector& collector) +void WVerify::DisableReadBarrierDFX(ArkCollector& collector) { #if !defined(ENABLE_CMC_RB_DFX) return; #endif - RegionSpace& space = reinterpret_cast(collector.GetAllocator()); + RegionalHeap& space = reinterpret_cast(collector.GetAllocator()); if (!MutatorManager::Instance().WorldStopped()) { - ScopedStopTheWorld stw("WGC-verify-disable-rb-dfx"); + STWParam stwParam{"WGC-verify-disable-rb-dfx"}; + ScopedStopTheWorld stw(stwParam); DisableReadBarrierDFXInternal(space); } else { DisableReadBarrierDFXInternal(space); diff --git a/common_components/heap/verification.h b/common_components/heap/verification.h index 45cd6177219aab9df87e9d843264370e8fa6771d..4b2d63effcdf8c553dab604c701238d52d15aacf 100755 --- a/common_components/heap/verification.h +++ b/common_components/heap/verification.h @@ -16,24 +16,24 @@ #ifndef VERIFICATION_H #define VERIFICATION_H -#include "w_collector/w_collector.h" +#include "ark_collector/ark_collector.h" namespace common { class WVerify { public: - static void VerifyAfterMark(WCollector &collector); - static void VerifyAfterForward(WCollector &collector); - static void VerifyAfterFix(WCollector &collector); - static void EnableReadBarrierDFX(WCollector &collector); - static void DisableReadBarrierDFX(WCollector &collector); + static void VerifyAfterMark(ArkCollector &collector); + static void VerifyAfterForward(ArkCollector &collector); + static void VerifyAfterFix(ArkCollector &collector); + static void EnableReadBarrierDFX(ArkCollector &collector); + static void DisableReadBarrierDFX(ArkCollector &collector); private: - static void VerifyAfterMarkInternal(RegionSpace &space); - static void VerifyAfterForwardInternal(RegionSpace &space); - static void VerifyAfterFixInternal(RegionSpace &space); - static void EnableReadBarrierDFXInternal(RegionSpace &space); - static void DisableReadBarrierDFXInternal(RegionSpace &space); + static void VerifyAfterMarkInternal(RegionalHeap &space); + static void VerifyAfterForwardInternal(RegionalHeap &space); + static void VerifyAfterFixInternal(RegionalHeap &space); + static void EnableReadBarrierDFXInternal(RegionalHeap &space); + static void DisableReadBarrierDFXInternal(RegionalHeap &space); }; } // namespace common diff --git a/common_components/heap/w_collector/tests/copy_barrier_test.cpp b/common_components/heap/w_collector/tests/copy_barrier_test.cpp deleted file mode 100755 index c7fd50712bf4087809c71d14297b4af93b0e4f68..0000000000000000000000000000000000000000 --- a/common_components/heap/w_collector/tests/copy_barrier_test.cpp +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "common_components/heap/w_collector/copy_barrier.h" -#include "common_components/heap/heap.h" -#include "common_components/tests/test_helper.h" - -using namespace common; - -namespace common::test { -class CopyBarrierTest : public common::test::BaseTestWithScope { -}; - -HWTEST_F_L0(CopyBarrierTest, ReadStruct_TEST1) { - Collector& collector = Heap::GetHeap().GetCollector(); - auto copyBarrier = std::make_unique(collector); - ASSERT_TRUE(copyBarrier != nullptr); - - BaseObject obj; - constexpr size_t size = 16; - uint8_t srcBuffer[size] = {}; - uint8_t dstBuffer[size] = {}; - srcBuffer[0] = 1; - HeapAddress src = reinterpret_cast(srcBuffer); - HeapAddress dst = reinterpret_cast(dstBuffer); - copyBarrier->ReadStruct(dst, &obj, src, size); - EXPECT_EQ(dstBuffer[0], 1); - EXPECT_EQ(srcBuffer[0], dstBuffer[0]); -} - -HWTEST_F_L0(CopyBarrierTest, ReadStruct_TEST2) { - Collector& collector = Heap::GetHeap().GetCollector(); - auto copyBarrier = std::make_unique(collector); - ASSERT_TRUE(copyBarrier != nullptr); - - constexpr size_t size = 16; - uint8_t srcBuffer[size] = {}; - uint8_t dstBuffer[size] = {}; - srcBuffer[0] = 1; - HeapAddress src = reinterpret_cast(srcBuffer); - HeapAddress dst = reinterpret_cast(dstBuffer); - copyBarrier->ReadStruct(dst, nullptr, src, size); - EXPECT_EQ(dstBuffer[0], 1); - EXPECT_EQ(srcBuffer[0], dstBuffer[0]); -} - -HWTEST_F_L0(CopyBarrierTest, AtomicWriteRefField_TEST1) { - Collector& collector = Heap::GetHeap().GetCollector(); - auto copyBarrier = std::make_unique(collector); - ASSERT_TRUE(copyBarrier != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - copyBarrier->AtomicWriteRefField(&oldObj, oldField, &newObj, std::memory_order_relaxed); - EXPECT_EQ(oldField.GetFieldValue(), neWAddress); -} - -HWTEST_F_L0(CopyBarrierTest, AtomicWriteRefField_TEST2) { - Collector& collector = Heap::GetHeap().GetCollector(); - auto copyBarrier = std::make_unique(collector); - ASSERT_TRUE(copyBarrier != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - copyBarrier->AtomicWriteRefField(nullptr, oldField, &newObj, std::memory_order_relaxed); - EXPECT_EQ(oldField.GetFieldValue(), neWAddress); -} -} // namespace common::test \ No newline at end of file diff --git a/common_components/heap/w_collector/tests/enum_barrier_test.cpp b/common_components/heap/w_collector/tests/enum_barrier_test.cpp deleted file mode 100755 index e771b6bcac3df30714736bbd33a948d55430ee9d..0000000000000000000000000000000000000000 --- a/common_components/heap/w_collector/tests/enum_barrier_test.cpp +++ /dev/null @@ -1,280 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "common_components/heap/heap.h" -#include "common_components/heap/w_collector/enum_barrier.h" -#include "common_components/tests/test_helper.h" - -using namespace common; - -namespace common::test { -class EnumBarrierTest : public common::test::BaseTestWithScope { -protected: - void SetUp() override - { - Collector& collector = Heap::GetHeap().GetCollector(); - enumBarrier_ = std::make_unique(collector); - } - - void TearDown() override {} - - std::unique_ptr enumBarrier_ {nullptr}; -}; - -HWTEST_F_L0(EnumBarrierTest, ReadRefField_TEST1) { - ASSERT_TRUE(enumBarrier_ != nullptr); - - BaseObject obj; - RefField field(&obj); - - BaseObject* resultObj = enumBarrier_->ReadRefField(&obj, field); - ASSERT_TRUE(resultObj != nullptr); - EXPECT_EQ(resultObj, &obj); -} - -HWTEST_F_L0(EnumBarrierTest, ReadRefField_TEST2) { - ASSERT_TRUE(enumBarrier_ != nullptr); - - BaseObject obj; - RefField field(&obj); - - BaseObject* resultObj = enumBarrier_->ReadRefField(nullptr, field); - ASSERT_TRUE(resultObj != nullptr); - EXPECT_EQ(resultObj, &obj); -} - -HWTEST_F_L0(EnumBarrierTest, ReadStaticRef_TEST1) { - ASSERT_TRUE(enumBarrier_ != nullptr); - - BaseObject obj; - RefField field(&obj); - - BaseObject* resultObj = enumBarrier_->ReadStaticRef(field); - ASSERT_TRUE(resultObj != nullptr); - EXPECT_EQ(resultObj, &obj); -} - -HWTEST_F_L0(EnumBarrierTest, WriteRefField_TEST1) { - ASSERT_TRUE(enumBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField field(&oldObj); - MAddress oldAddress = field.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - enumBarrier_->WriteRefField(&oldObj, field, &newObj); - enumBarrier_->WriteBarrier(&oldObj, field, &newObj); - MAddress newAddress = field.GetFieldValue(); - EXPECT_NE(newAddress, oldAddress); -} - -HWTEST_F_L0(EnumBarrierTest, ReadStruct_TEST1) { - ASSERT_TRUE(enumBarrier_ != nullptr); - - BaseObject obj; - constexpr size_t size = 16; - uint8_t srcBuffer[size] = {}; - uint8_t dstBuffer[size] = {}; - srcBuffer[0] = 1; - HeapAddress src = reinterpret_cast(srcBuffer); - HeapAddress dst = reinterpret_cast(dstBuffer); - enumBarrier_->ReadStruct(dst, &obj, src, size); - EXPECT_EQ(dstBuffer[0], 1); - EXPECT_EQ(srcBuffer[0], dstBuffer[0]); -} - -HWTEST_F_L0(EnumBarrierTest, WriteStaticRef_TEST1) { - ASSERT_TRUE(enumBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField field(&oldObj); - MAddress oldAddress = field.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - enumBarrier_->WriteStaticRef(field, &newObj); - MAddress newAddress = field.GetFieldValue(); - EXPECT_NE(newAddress, oldAddress); -} - -HWTEST_F_L0(EnumBarrierTest, WriteStruct_TEST1) { - ASSERT_TRUE(enumBarrier_ != nullptr); - - auto objPtr = std::make_unique(); - constexpr size_t size = 16; - auto srcBuffer = std::make_unique(size); - auto dstBuffer = std::make_unique(size); - srcBuffer[0] = 1; - HeapAddress src = reinterpret_cast(srcBuffer.get()); - HeapAddress dst = reinterpret_cast(dstBuffer.get()); - enumBarrier_->WriteStruct(objPtr.get(), dst, size, src, size); - EXPECT_EQ(dstBuffer[0], 1); - EXPECT_EQ(srcBuffer[0], dstBuffer[0]); -} - -HWTEST_F_L0(EnumBarrierTest, AtomicReadRefField_TEST1) { - ASSERT_TRUE(enumBarrier_ != nullptr); - - BaseObject obj; - constexpr size_t size = 100; - obj.SetSizeForwarded(size); - EXPECT_EQ(obj.GetSizeForwarded(), size); - RefField field(&obj); - - BaseObject* resultObj = nullptr; - resultObj = enumBarrier_->AtomicReadRefField(&obj, field, std::memory_order_seq_cst); - ASSERT_TRUE(resultObj != nullptr); -} - -HWTEST_F_L0(EnumBarrierTest, AtomicWriteRefField_TEST1) { - ASSERT_TRUE(enumBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - enumBarrier_->AtomicWriteRefField(&oldObj, oldField, &newObj, std::memory_order_relaxed); - EXPECT_EQ(oldField.GetFieldValue(), neWAddress); -} - -HWTEST_F_L0(EnumBarrierTest, AtomicWriteRefField_TEST2) { - ASSERT_TRUE(enumBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - enumBarrier_->AtomicWriteRefField(nullptr, oldField, &newObj, std::memory_order_relaxed); - EXPECT_EQ(oldField.GetFieldValue(), neWAddress); -} - -HWTEST_F_L0(EnumBarrierTest, AtomicSwapRefField_TEST1) { - ASSERT_TRUE(enumBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - BaseObject* resultObj = nullptr; - resultObj = enumBarrier_->AtomicSwapRefField(&oldObj, oldField, &newObj, std::memory_order_relaxed); - ASSERT_TRUE(resultObj != nullptr); - EXPECT_EQ(oldField.GetFieldValue(), newField.GetFieldValue()); -} - -HWTEST_F_L0(EnumBarrierTest, CompareAndSwapRefField_TEST1) { - ASSERT_TRUE(enumBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - bool result = enumBarrier_->CompareAndSwapRefField(&oldObj, oldField, &oldObj, &newObj, - std::memory_order_seq_cst, std::memory_order_seq_cst); - ASSERT_FALSE(result); -} - -HWTEST_F_L0(EnumBarrierTest, CompareAndSwapRefField_TEST2) { - ASSERT_TRUE(enumBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - - bool result = enumBarrier_->CompareAndSwapRefField(&oldObj, oldField, &oldObj, &oldObj, - std::memory_order_seq_cst, std::memory_order_seq_cst); - ASSERT_TRUE(result); -} - -HWTEST_F_L0(EnumBarrierTest, CompareAndSwapRefField_TEST3) { - ASSERT_TRUE(enumBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - bool result = enumBarrier_->CompareAndSwapRefField(&oldObj, newField, &oldObj, &newObj, - std::memory_order_seq_cst, std::memory_order_seq_cst); - ASSERT_FALSE(result); -} - -} // namespace common::test \ No newline at end of file diff --git a/common_components/heap/w_collector/tests/post_trace_barrier_test.cpp b/common_components/heap/w_collector/tests/post_trace_barrier_test.cpp deleted file mode 100755 index d51db25d4009a8b687b06b53fbaccd958bdf7367..0000000000000000000000000000000000000000 --- a/common_components/heap/w_collector/tests/post_trace_barrier_test.cpp +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "common_components/heap/heap.h" -#include "common_components/heap/w_collector/post_trace_barrier.h" -#include "common_components/tests/test_helper.h" - -using namespace common; - -namespace common::test { -class PostTraceBarrierTest : public ::testing::Test { -protected: - void SetUp() override - { - Collector &collector = Heap::GetHeap().GetCollector(); - postTraceBarrier_ = std::make_unique(collector); - } - - void TearDown() override - {} - - std::unique_ptr postTraceBarrier_{nullptr}; -}; - -HWTEST_F_L0(PostTraceBarrierTest, ReadRefField_TEST1) -{ - ASSERT_TRUE(postTraceBarrier_ != nullptr); - - BaseObject obj; - RefField field(&obj); - - BaseObject *resultObj = postTraceBarrier_->ReadRefField(&obj, field); - ASSERT_TRUE(resultObj != nullptr); - EXPECT_EQ(resultObj, &obj); -} - -HWTEST_F_L0(PostTraceBarrierTest, ReadRefField_TEST2) -{ - ASSERT_TRUE(postTraceBarrier_ != nullptr); - - BaseObject obj; - RefField field(&obj); - - BaseObject *resultObj = postTraceBarrier_->ReadRefField(nullptr, field); - ASSERT_TRUE(resultObj != nullptr); - EXPECT_EQ(resultObj, &obj); -} - -HWTEST_F_L0(PostTraceBarrierTest, ReadStaticRef_TEST1) -{ - ASSERT_TRUE(postTraceBarrier_ != nullptr); - - BaseObject obj; - RefField field(&obj); - - BaseObject *resultObj = postTraceBarrier_->ReadStaticRef(field); - ASSERT_TRUE(resultObj != nullptr); - EXPECT_EQ(resultObj, &obj); -} - -HWTEST_F_L0(PostTraceBarrierTest, WriteRefField_TEST1) -{ - ASSERT_TRUE(postTraceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField field(&oldObj); - MAddress oldAddress = field.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - postTraceBarrier_->WriteRefField(&oldObj, field, &newObj); - postTraceBarrier_->WriteBarrier(&oldObj, field, &newObj); - MAddress newAddress = field.GetFieldValue(); - EXPECT_NE(newAddress, oldAddress); -} - -HWTEST_F_L0(PostTraceBarrierTest, ReadStruct_TEST1) -{ - ASSERT_TRUE(postTraceBarrier_ != nullptr); - - BaseObject obj; - constexpr size_t size = 16; - uint8_t srcBuffer[size] = {}; - uint8_t dstBuffer[size] = {}; - srcBuffer[0] = 1; - HeapAddress src = reinterpret_cast(srcBuffer); - HeapAddress dst = reinterpret_cast(dstBuffer); - postTraceBarrier_->ReadStruct(dst, &obj, src, size); - EXPECT_EQ(dstBuffer[0], 1); - EXPECT_EQ(srcBuffer[0], dstBuffer[0]); -} - -HWTEST_F_L0(PostTraceBarrierTest, WriteStaticRef_TEST1) -{ - ASSERT_TRUE(postTraceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField field(&oldObj); - MAddress oldAddress = field.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - postTraceBarrier_->WriteStaticRef(field, &newObj); - MAddress newAddress = field.GetFieldValue(); - EXPECT_NE(newAddress, oldAddress); -} - -HWTEST_F_L0(PostTraceBarrierTest, WriteStruct_TEST1) -{ - ASSERT_TRUE(postTraceBarrier_ != nullptr); - - BaseObject obj; - constexpr size_t size = 16; - uint8_t srcBuffer[size] = {}; - uint8_t dstBuffer[size] = {}; - srcBuffer[0] = 1; - HeapAddress src = reinterpret_cast(srcBuffer); - HeapAddress dst = reinterpret_cast(dstBuffer); - postTraceBarrier_->WriteStruct(&obj, dst, size, src, size); - EXPECT_EQ(dstBuffer[0], 1); - EXPECT_EQ(srcBuffer[0], dstBuffer[0]); -} - -HWTEST_F_L0(PostTraceBarrierTest, AtomicReadRefField_TEST1) -{ - ASSERT_TRUE(postTraceBarrier_ != nullptr); - - BaseObject obj; - constexpr size_t size = 100; - obj.SetSizeForwarded(size); - EXPECT_EQ(obj.GetSizeForwarded(), size); - RefField field(&obj); - - BaseObject *resultObj = nullptr; - resultObj = postTraceBarrier_->AtomicReadRefField(&obj, field, std::memory_order_seq_cst); - ASSERT_TRUE(resultObj != nullptr); -} - -HWTEST_F_L0(PostTraceBarrierTest, AtomicWriteRefField_TEST1) -{ - ASSERT_TRUE(postTraceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - postTraceBarrier_->AtomicWriteRefField(&oldObj, oldField, &newObj, std::memory_order_relaxed); - EXPECT_EQ(oldField.GetFieldValue(), neWAddress); -} - -HWTEST_F_L0(PostTraceBarrierTest, AtomicWriteRefField_TEST2) -{ - ASSERT_TRUE(postTraceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - postTraceBarrier_->AtomicWriteRefField(nullptr, oldField, &newObj, std::memory_order_relaxed); - EXPECT_EQ(oldField.GetFieldValue(), neWAddress); -} - -HWTEST_F_L0(PostTraceBarrierTest, AtomicSwapRefField_TEST1) -{ - ASSERT_TRUE(postTraceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - BaseObject *resultObj = nullptr; - resultObj = postTraceBarrier_->AtomicSwapRefField(&oldObj, oldField, &newObj, std::memory_order_relaxed); - ASSERT_TRUE(resultObj != nullptr); - EXPECT_EQ(oldField.GetFieldValue(), newField.GetFieldValue()); -} - -HWTEST_F_L0(PostTraceBarrierTest, CompareAndSwapRefField_TEST1) -{ - ASSERT_TRUE(postTraceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - bool result = postTraceBarrier_->CompareAndSwapRefField( - &oldObj, oldField, &oldObj, &newObj, std::memory_order_seq_cst, std::memory_order_seq_cst); - ASSERT_TRUE(result); -} - -HWTEST_F_L0(PostTraceBarrierTest, CompareAndSwapRefField_TEST2) -{ - ASSERT_TRUE(postTraceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - - bool result = postTraceBarrier_->CompareAndSwapRefField( - &oldObj, oldField, &oldObj, &oldObj, std::memory_order_seq_cst, std::memory_order_seq_cst); - ASSERT_TRUE(result); -} - -HWTEST_F_L0(PostTraceBarrierTest, CompareAndSwapRefField_TEST3) -{ - ASSERT_TRUE(postTraceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - bool result = postTraceBarrier_->CompareAndSwapRefField( - &oldObj, newField, &oldObj, &newObj, std::memory_order_seq_cst, std::memory_order_seq_cst); - ASSERT_FALSE(result); -} - -HWTEST_F_L0(PostTraceBarrierTest, CopyStructArray_TEST1) -{ - ASSERT_TRUE(postTraceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - - constexpr size_t size = 16; - uint8_t srcBuffer[size] = {}; - uint8_t dstBuffer[size] = {}; - srcBuffer[0] = 1; - HeapAddress src = reinterpret_cast(srcBuffer); - HeapAddress dst = reinterpret_cast(dstBuffer); - - postTraceBarrier_->CopyStructArray(&oldObj, dst, size, &newObj, src, size); - EXPECT_EQ(dstBuffer[0], 1); - EXPECT_EQ(srcBuffer[0], dstBuffer[0]); -} -} // namespace common::test \ No newline at end of file diff --git a/common_components/heap/w_collector/tests/preforward_barrier_test.cpp b/common_components/heap/w_collector/tests/preforward_barrier_test.cpp deleted file mode 100755 index fef53006cc022feda0664a530a122f7361accc7e..0000000000000000000000000000000000000000 --- a/common_components/heap/w_collector/tests/preforward_barrier_test.cpp +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "common_components/heap/w_collector/preforward_barrier.h" -#include "common_components/heap/heap.h" -#include "common_components/tests/test_helper.h" - -using namespace common; - -namespace common::test { -class PreforwardBarrierTest : public BaseTestWithScope {}; - -HWTEST_F_L0(PreforwardBarrierTest, AtomicWriteRefField_TEST1) -{ - Collector &collector = Heap::GetHeap().GetCollector(); - auto preforwardBarrier = std::make_unique(collector); - ASSERT_TRUE(preforwardBarrier != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - preforwardBarrier->AtomicWriteRefField(&oldObj, oldField, &newObj, std::memory_order_relaxed); - EXPECT_EQ(oldField.GetFieldValue(), neWAddress); -} - -HWTEST_F_L0(PreforwardBarrierTest, AtomicWriteRefField_TEST2) -{ - Collector &collector = Heap::GetHeap().GetCollector(); - auto preforwardBarrier = std::make_unique(collector); - ASSERT_TRUE(preforwardBarrier != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - preforwardBarrier->AtomicWriteRefField(nullptr, oldField, &newObj, std::memory_order_relaxed); - EXPECT_EQ(oldField.GetFieldValue(), neWAddress); -} -} // namespace common::test diff --git a/common_components/heap/w_collector/tests/trace_barrier_test.cpp b/common_components/heap/w_collector/tests/trace_barrier_test.cpp deleted file mode 100755 index 1e046cbcdc41e86c5fdca2fbde7eed12de3f1aaf..0000000000000000000000000000000000000000 --- a/common_components/heap/w_collector/tests/trace_barrier_test.cpp +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "common_components/heap/heap.h" -#include "common_components/heap/w_collector/trace_barrier.h" -#include "common_components/tests/test_helper.h" - -using namespace common; - -namespace common::test { -class TraceBarrierTest : public BaseTestWithScope { -protected: - void SetUp() override - { - Collector &collector = Heap::GetHeap().GetCollector(); - traceBarrier_ = std::make_unique(collector); - } - - void TearDown() override - {} - - std::unique_ptr traceBarrier_{nullptr}; -}; - -HWTEST_F_L0(TraceBarrierTest, ReadRefField_TEST1) -{ - ASSERT_TRUE(traceBarrier_ != nullptr); - - BaseObject obj; - RefField field(&obj); - - BaseObject *resultObj = traceBarrier_->ReadRefField(&obj, field); - ASSERT_TRUE(resultObj != nullptr); - EXPECT_EQ(resultObj, &obj); -} - -HWTEST_F_L0(TraceBarrierTest, ReadRefField_TEST2) -{ - ASSERT_TRUE(traceBarrier_ != nullptr); - - BaseObject obj; - RefField field(&obj); - - BaseObject *resultObj = traceBarrier_->ReadRefField(nullptr, field); - ASSERT_TRUE(resultObj != nullptr); - EXPECT_EQ(resultObj, &obj); -} - -HWTEST_F_L0(TraceBarrierTest, ReadStaticRef_TEST1) -{ - ASSERT_TRUE(traceBarrier_ != nullptr); - - BaseObject obj; - RefField field(&obj); - - BaseObject *resultObj = traceBarrier_->ReadStaticRef(field); - ASSERT_TRUE(resultObj != nullptr); - EXPECT_EQ(resultObj, &obj); -} - -HWTEST_F_L0(TraceBarrierTest, ReadStruct_TEST1) -{ - ASSERT_TRUE(traceBarrier_ != nullptr); - - BaseObject obj; - constexpr size_t size = 16; - uint8_t srcBuffer[size] = {}; - uint8_t dstBuffer[size] = {}; - srcBuffer[0] = 1; - HeapAddress src = reinterpret_cast(srcBuffer); - HeapAddress dst = reinterpret_cast(dstBuffer); - traceBarrier_->ReadStruct(dst, &obj, src, size); - EXPECT_EQ(dstBuffer[0], 1); - EXPECT_EQ(srcBuffer[0], dstBuffer[0]); -} - -HWTEST_F_L0(TraceBarrierTest, WriteRefField_TEST1) -{ - ASSERT_TRUE(traceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField field(&oldObj); - BaseObject *target = field.GetTargetObject(); - EXPECT_TRUE(target != nullptr); - MAddress oldAddress = field.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - traceBarrier_->WriteRefField(&oldObj, field, &newObj); - MAddress newAddress = field.GetFieldValue(); - EXPECT_NE(newAddress, oldAddress); -} - -HWTEST_F_L0(TraceBarrierTest, WriteRefField_TEST2) -{ - ASSERT_TRUE(traceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField<> field(MAddress(0)); - BaseObject *target = field.GetTargetObject(); - EXPECT_TRUE(target == nullptr); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - traceBarrier_->WriteRefField(&oldObj, field, &newObj); - MAddress newAddress = field.GetFieldValue(); - EXPECT_EQ(newField.GetFieldValue(), newAddress); -} - -HWTEST_F_L0(TraceBarrierTest, WriteBarrier_TEST1) -{ - ASSERT_TRUE(traceBarrier_ != nullptr); - -#ifndef ARK_USE_SATB_BARRIER - constexpr uint64_t TAG_BITS_SHIFT = 48; - constexpr uint64_t TAG_MARK = 0xFFFFULL << TAG_BITS_SHIFT; - constexpr uint64_t TAG_SPECIAL = 0x02ULL; - constexpr uint64_t TAG_BOOLEAN = 0x04ULL; - constexpr uint64_t TAG_HEAP_OBJECT_MASK = TAG_MARK | TAG_SPECIAL | TAG_BOOLEAN; - - RefField<> field(MAddress(0)); - traceBarrier_->WriteBarrier(nullptr, field, nullptr); - BaseObject *obj = reinterpret_cast(TAG_HEAP_OBJECT_MASK); - traceBarrier_->WriteBarrier(obj, field, obj); - EXPECT_TRUE(obj != nullptr); -#endif -} - -HWTEST_F_L0(TraceBarrierTest, WriteStaticRef_TEST1) -{ - ASSERT_TRUE(traceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField field(&oldObj); - MAddress oldAddress = field.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - traceBarrier_->WriteStaticRef(field, &newObj); - MAddress newAddress = field.GetFieldValue(); - EXPECT_NE(newAddress, oldAddress); -} - -HWTEST_F_L0(TraceBarrierTest, AtomicReadRefField_TEST1) -{ - ASSERT_TRUE(traceBarrier_ != nullptr); - - BaseObject obj; - constexpr size_t size = 100; - obj.SetSizeForwarded(size); - EXPECT_EQ(obj.GetSizeForwarded(), size); - RefField field(&obj); - - BaseObject *resultObj = nullptr; - resultObj = traceBarrier_->AtomicReadRefField(&obj, field, std::memory_order_seq_cst); - ASSERT_TRUE(resultObj != nullptr); -} - -HWTEST_F_L0(TraceBarrierTest, AtomicWriteRefField_TEST1) -{ - ASSERT_TRUE(traceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - traceBarrier_->AtomicWriteRefField(&oldObj, oldField, &newObj, std::memory_order_relaxed); - EXPECT_EQ(oldField.GetFieldValue(), neWAddress); -} - -HWTEST_F_L0(TraceBarrierTest, AtomicWriteRefField_TEST2) -{ - ASSERT_TRUE(traceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - traceBarrier_->AtomicWriteRefField(nullptr, oldField, &newObj, std::memory_order_relaxed); - EXPECT_EQ(oldField.GetFieldValue(), neWAddress); -} - -HWTEST_F_L0(TraceBarrierTest, AtomicSwapRefField_TEST1) -{ - ASSERT_TRUE(traceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - BaseObject *resultObj = nullptr; - resultObj = traceBarrier_->AtomicSwapRefField(&oldObj, oldField, &newObj, std::memory_order_relaxed); - ASSERT_TRUE(resultObj != nullptr); - EXPECT_EQ(oldField.GetFieldValue(), newField.GetFieldValue()); -} - -HWTEST_F_L0(TraceBarrierTest, CompareAndSwapRefField_TEST1) -{ - ASSERT_TRUE(traceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - bool result = traceBarrier_->CompareAndSwapRefField( - &oldObj, oldField, &oldObj, &newObj, std::memory_order_seq_cst, std::memory_order_seq_cst); - ASSERT_TRUE(result); -} - -HWTEST_F_L0(TraceBarrierTest, CompareAndSwapRefField_TEST2) -{ - ASSERT_TRUE(traceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - - bool result = traceBarrier_->CompareAndSwapRefField( - &oldObj, oldField, &oldObj, &oldObj, std::memory_order_seq_cst, std::memory_order_seq_cst); - ASSERT_TRUE(result); -} - -HWTEST_F_L0(TraceBarrierTest, CompareAndSwapRefField_TEST3) -{ - ASSERT_TRUE(traceBarrier_ != nullptr); - - BaseObject oldObj; - constexpr size_t oldSize = 100; - oldObj.SetSizeForwarded(oldSize); - EXPECT_EQ(oldObj.GetSizeForwarded(), oldSize); - RefField oldField(&oldObj); - MAddress oldAddress = oldField.GetFieldValue(); - - BaseObject newObj; - constexpr size_t newSize = 200; - newObj.SetSizeForwarded(newSize); - EXPECT_EQ(newObj.GetSizeForwarded(), newSize); - RefField newField(&newObj); - MAddress neWAddress = newField.GetFieldValue(); - EXPECT_NE(oldAddress, neWAddress); - - bool result = traceBarrier_->CompareAndSwapRefField( - &oldObj, newField, &oldObj, &newObj, std::memory_order_seq_cst, std::memory_order_seq_cst); - ASSERT_FALSE(result); -} -} // namespace common::test \ No newline at end of file diff --git a/common_components/heap/w_collector/tests/w_collector_test.cpp b/common_components/heap/w_collector/tests/w_collector_test.cpp deleted file mode 100644 index 1534f3d9b1343e990960dce124f1e02db7fa09fc..0000000000000000000000000000000000000000 --- a/common_components/heap/w_collector/tests/w_collector_test.cpp +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright (c) 2025 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "common_components/tests/test_helper.h" - -#include "common_components/heap/w_collector/w_collector.h" -#include "common_components/heap/collector/collector_proxy.h" -#include "common_components/heap/heap_manager.h" -#include "common_components/heap/allocator/region_desc.h" -#include "common_components/mutator/mutator_manager-inl.h" - -using namespace common; - -namespace common::test { -using SuspensionType = MutatorBase::SuspensionType; -class WCollectorTest : public BaseTestWithScope { -protected: - static void SetUpTestCase() - { - BaseRuntime::GetInstance()->Init(); - } - - static void TearDownTestCase() - {} - - void SetUp() override - { - MutatorManager::Instance().CreateRuntimeMutator(ThreadType::GC_THREAD); - } - - void TearDown() override - { - MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::GC_THREAD); - } -}; - -std::unique_ptr GetWCollector() -{ - CollectorResources &resources = Heap::GetHeap().GetCollectorResources(); - Allocator &allocator = Heap::GetHeap().GetAllocator(); - - return std::make_unique(allocator, resources); -} - -HWTEST_F_L0(WCollectorTest, IsUnmovableFromObjectTest0) -{ - std::unique_ptr wcollector = GetWCollector(); - ASSERT_TRUE(wcollector != nullptr); - - BaseObject *obj = nullptr; - EXPECT_FALSE(wcollector->IsUnmovableFromObject(obj)); -} - -HWTEST_F_L0(WCollectorTest, IsUnmovableFromObjectTest1) -{ - std::unique_ptr wcollector = GetWCollector(); - ASSERT_TRUE(wcollector != nullptr); - - HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); - BaseObject *obj = reinterpret_cast(addr); - - new (obj) BaseObject(); - - EXPECT_FALSE(wcollector->IsUnmovableFromObject(obj)); -} - -HWTEST_F_L0(WCollectorTest, IsUnmovableFromObjectTest2) -{ - std::unique_ptr wcollector = GetWCollector(); - ASSERT_TRUE(wcollector != nullptr); - - HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::PINNED_OBJECT, true); - BaseObject *obj = reinterpret_cast(addr); - - new (obj) BaseObject(); - - RegionDesc *region = RegionDesc::GetRegionDescAt(addr); - - bool isMarked = region->GetOrAllocResurrectBitmap()->MarkBits(0); - region->SetResurrectedRegionFlag(1); - region->SetRegionType(RegionDesc::RegionType::EXEMPTED_FROM_REGION); - - EXPECT_FALSE(isMarked); - - EXPECT_TRUE(wcollector->IsUnmovableFromObject(obj)); -} - -HWTEST_F_L0(WCollectorTest, ForwardUpdateRawRefTest0) -{ - std::unique_ptr wcollector = GetWCollector(); - ASSERT_TRUE(wcollector != nullptr); - - HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); - BaseObject *obj = reinterpret_cast(addr); - - new (obj) BaseObject(); - - common::ObjectRef root = {obj}; - - BaseObject *oldObj = wcollector->ForwardUpdateRawRef(root); - EXPECT_EQ(oldObj, obj); -} - -void FlipTest() -{ - MutatorManager &mutatorManager = MutatorManager::Instance(); - ThreadHolder::CreateAndRegisterNewThreadHolder(nullptr); - bool stwCallbackExecuted = false; - auto stwTest = [&mutatorManager, &stwCallbackExecuted]() { - EXPECT_TRUE(mutatorManager.WorldStopped()); - stwCallbackExecuted = true; - }; - FlipFunction mutatorTest = [&mutatorManager, &stwCallbackExecuted](Mutator &mutator) { - EXPECT_TRUE(mutator.HasSuspensionRequest(SuspensionType::SUSPENSION_FOR_RUNNING_CALLBACK)); - EXPECT_FALSE(mutatorManager.WorldStopped()); - EXPECT_TRUE(stwCallbackExecuted); - }; - mutatorManager.FlipMutators("flip-test", stwTest, &mutatorTest); -} - -HWTEST_F_L0(WCollectorTest, FlipTest) -{ - std::thread t1(FlipTest); - t1.join(); -} -} // namespace common::test diff --git a/common_components/log/log.h b/common_components/log/log.h index 7dc455d275ec757aee3ef31f21d87bfca05c3dae..1c2f908e28bd4e917aded7fd9318f91628e65c3e 100644 --- a/common_components/log/log.h +++ b/common_components/log/log.h @@ -39,10 +39,10 @@ #if defined(ENABLE_HITRACE) #include "hitrace_meter.h" - #define OHOS_HITRACE(level, name, customArgs) HITRACE_METER_NAME_EX(level, HITRACE_TAG_ARK, name, customArgs) - #define OHOS_HITRACE_START(level, name, customArgs) StartTraceEx(level, HITRACE_TAG_ARK, name, customArgs) - #define OHOS_HITRACE_FINISH(level) FinishTraceEx(level, HITRACE_TAG_ARK) - #define OHOS_HITRACE_COUNT(level, name, count) CountTraceEx(level, HITRACE_TAG_ARK, name, count) + #define OHOS_HITRACE(level, name, customArgs) HITRACE_METER_NAME_EX(level, HITRACE_TAG_ARK, name, customArgs) + #define OHOS_HITRACE_START(level, name, customArgs) StartTraceEx(level, HITRACE_TAG_ARK, name, customArgs) + #define OHOS_HITRACE_FINISH(level) FinishTraceEx(level, HITRACE_TAG_ARK) + #define OHOS_HITRACE_COUNT(level, name, count) CountTraceEx(level, HITRACE_TAG_ARK, name, count) #else #define OHOS_HITRACE(level, name, customArgs) #define OHOS_HITRACE_START(level, name, customArgs) diff --git a/test/fuzztest/containersvectorclone_fuzzer/BUILD.gn b/common_components/log/tests/BUILD.gn similarity index 39% rename from test/fuzztest/containersvectorclone_fuzzer/BUILD.gn rename to common_components/log/tests/BUILD.gn index a388786497ff8c12da108ed272c6c74372cc104a..2ecfd0aa452a942d8f401afe2c0d618c12fc88f6 100644 --- a/test/fuzztest/containersvectorclone_fuzzer/BUILD.gn +++ b/common_components/log/tests/BUILD.gn @@ -1,4 +1,4 @@ -# Copyright (c) 2022 Huawei Device Co., Ltd. +# Copyright (c) 2025 Huawei Device Co., Ltd. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,33 +11,47 @@ # See the License for the specific language governing permissions and # limitations under the License. -##################################hydra-fuzz################################### -import("//arkcompiler/ets_runtime/js_runtime_config.gni") -import("//arkcompiler/ets_runtime/test/test_helper.gni") -import("//build/config/features.gni") -import("//build/ohos.gni") +import("//arkcompiler/ets_runtime/common_components/tests/test_helper.gni") -##################################fuzztest##################################### -ohos_fuzztest("ContainersVectorCloneFuzzTest") { - module_out_path = ets_runtime_output_path +module_output_path = "ets_runtime" - fuzz_config_file = - "//arkcompiler/ets_runtime/test/fuzztest/containersvectorclone_fuzzer" +host_unittest_action("Log_Test") { + module_out_path = module_output_path - sources = [ "containersvectorclone_fuzzer.cpp" ] + sources = [ + # test file + "log_test.cpp", + ] - configs = [ "//arkcompiler/ets_runtime:ecma_test_config" ] + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] - deps = [ "../../../:libark_jsruntime_test" ] + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] # hiviewdfx libraries - external_deps = hiviewdfx_ext_deps - external_deps += [ sdk_libc_secshared_dep ] - deps += hiviewdfx_deps + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] } -group("fuzztest") { +group("unittest") { testonly = true - deps = [] - deps += [ ":ContainersVectorCloneFuzzTest" ] + + # deps file + deps = [ + ":Log_Test", + ] } + +group("host_unittest") { + testonly = true + + # deps file + deps = [ + ":Log_TestAction", + ] +} \ No newline at end of file diff --git a/common_components/log/tests/log_test.cpp b/common_components/log/tests/log_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..017c1b3a6fbb9f891a186755339f39a6de105e75 --- /dev/null +++ b/common_components/log/tests/log_test.cpp @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/log/log.h" +#include "common_components/log/log_base.h" +#include "common_components/tests/test_helper.h" + +using namespace common; + +// ==================== Test Fixture ==================== +namespace common::test { +class LogTest : public common::test::BaseTestWithScope { +protected: + void SetUp() override + { + } + + void TearDown() override + { + } +}; + +// ==================== Test Case ==================== +HWTEST_F_L0(LogTest, ConvertFromRuntime_Info_ReturnsInfo) { + Level result = Log::ConvertFromRuntime(LOG_LEVEL::INFO); + EXPECT_EQ(result, Level::INFO); +} + + +HWTEST_F_L0(LogTest, ConvertFromRuntime_Debug_ReturnsDebug) { + Level result = Log::ConvertFromRuntime(LOG_LEVEL::DEBUG); + EXPECT_EQ(result, Level::DEBUG); +} + + +HWTEST_F_L0(LogTest, ConvertFromRuntime_Fatal_ReturnsFatal) { + Level result = Log::ConvertFromRuntime(LOG_LEVEL::FATAL); + EXPECT_EQ(result, Level::FATAL); +} + + +HWTEST_F_L0(LogTest, ConvertFromRuntime_Default_ReturnsDebug) { + Level result = Log::ConvertFromRuntime(static_cast(999)); + EXPECT_EQ(result, Level::DEBUG); +} + +HWTEST_F_L0(LogTest, PrettyOrderMathNano) { + std::string result = PrettyOrderMathNano(1000000000000, "s"); + EXPECT_EQ(result, "1000s"); +} +} + +namespace common { +class TestLogRedirect { +public: + TestLogRedirect() + { +#ifndef ENABLE_HILOG + originalCoutBuffer = std::cout.rdbuf(); + originalCerrBuffer = std::cerr.rdbuf(); + + std::cout.rdbuf(buffer.rdbuf()); + std::cerr.rdbuf(buffer.rdbuf()); +#endif + } + + ~TestLogRedirect() + { +#ifndef ENABLE_HILOG + std::cout.rdbuf(originalCoutBuffer); + std::cerr.rdbuf(originalCerrBuffer); +#endif + } + + std::string GetOutput() const + { + return buffer.str(); + } + + void ClearOutput() + { + buffer.str(std::string()); + } + +private: + std::stringstream buffer; +#ifndef ENABLE_HILOG + std::streambuf* originalCoutBuffer; + std::streambuf* originalCerrBuffer; +#endif +}; +} // namespace common + +namespace common::test { +class TimerTest : public common::test::BaseTestWithScope { +protected: + void SetUp() override + { + redirect.ClearOutput(); + + LogOptions options; + options.level = Level::DEBUG; + options.component = static_cast(Component::ALL); + + Log::Initialize(options); + } + void TearDown() override {} + + TestLogRedirect redirect; +}; + +static constexpr uint32_t SECOND_TIME = 1000000; +HWTEST_F_L0(TimerTest, Timer_BasicUsage_LogsTime) +{ + { + Timer t("TestScope"); + for (volatile int i = 0; i < SECOND_TIME; ++i); + } + +#ifndef ENABLE_HILOG + std::string output = redirect.GetOutput(); + EXPECT_NE(output.find("TestScope time:"), std::string::npos); + EXPECT_NE(output.find("us"), std::string::npos); +#endif +} + +HWTEST_F_L0(TimerTest, Timer_LevelNotDebug_NoLogging) +{ + LogOptions options; + options.level = Level::INFO; + options.component = static_cast(Component::ALL); + Log::Initialize(options); + + { + Timer t("SilentScope"); + for (volatile int i = 0; i < SECOND_TIME; ++i); + } + + std::string output = redirect.GetOutput(); + EXPECT_EQ(output.find("SilentScope"), std::string::npos); +} + +HWTEST_F_L0(TimerTest, Timer_LongName_CorrectFormat) +{ + { + Timer t("VeryLongTimerNameForTesting"); + for (volatile int i = 0; i < SECOND_TIME; ++i); + } + +#ifndef ENABLE_HILOG + std::string output = redirect.GetOutput(); + EXPECT_NE(output.find("VeryLongTimerNameForTesting time:"), std::string::npos); +#endif +} + +HWTEST_F_L0(TimerTest, Timer_MultipleInstances_DistinctOutput) +{ + { + Timer t1("First"); + for (volatile int i = 0; i < SECOND_TIME; ++i); + } + + { + Timer t2("Second"); + for (volatile int i = 0; i < SECOND_TIME; ++i); + } + +#ifndef ENABLE_HILOG + std::string output = redirect.GetOutput(); + EXPECT_NE(output.find("First time:"), std::string::npos); + EXPECT_NE(output.find("Second time:"), std::string::npos); +#endif +} +} \ No newline at end of file diff --git a/common_components/mutator/mutator.cpp b/common_components/mutator/mutator.cpp index 9b976b8e05ac705654df3edf23e7d7cf1fa84d01..b5ab2a292f47238e9c656666c7fbaf899f39a41c 100755 --- a/common_components/mutator/mutator.cpp +++ b/common_components/mutator/mutator.cpp @@ -17,14 +17,14 @@ #include #include -#include "common_components/base_runtime/hooks.h" +#include "common_components/common_runtime/hooks.h" #include "common_components/common/type_def.h" #if defined(_WIN64) #define NOGDI #include #endif #include "common_components/heap/allocator/region_manager.h" -#include "common_components/heap/collector/trace_collector.h" +#include "common_components/heap/collector/marking_collector.h" #include "common_components/common/scoped_object_access.h" #include "common_components/mutator/mutator_manager.h" @@ -107,12 +107,12 @@ void MutatorBase::HandleSuspensionRequest() reinterpret_cast(mutator_)->WaitFlipFunctionFinish(); } SetInSaferegion(SAFE_REGION_FALSE); - if (HasSuspensionRequest(SUSPENSION_FOR_FINALIZE)) { - HandleJSGCCallback(); - ClearFinalizeRequest(); - } // Leave saferegion if current mutator has no suspend request, otherwise try again - if (LIKELY_CC(!HasAnySuspensionRequest() && !HasObserver())) { + if (LIKELY_CC(!HasAnySuspensionRequestExceptCallbacks() && !HasObserver())) { + if (HasSuspensionRequest(SUSPENSION_FOR_FINALIZE)) { + ClearFinalizeRequest(); + HandleJSGCCallback(); + } return; } } diff --git a/common_components/mutator/mutator.h b/common_components/mutator/mutator.h index f5a4950dc0d69356c489d90145df3521d1b85b7a..b9ce1e978095e40b95ded034370dffec967346f2 100755 --- a/common_components/mutator/mutator.h +++ b/common_components/mutator/mutator.h @@ -50,6 +50,7 @@ public: SatbBuffer::Instance().RetireNode(satbNode_); satbNode_ = nullptr; } + delete holder_; } static Mutator* NewMutator() @@ -243,9 +244,9 @@ public: } #if defined(GCINFO_DEBUG) && GCINFO_DEBUG - void PushFrameInfoForTrace(const GCInfoNode& frameGCInfo) { gcInfos_.PushFrameInfoForTrace(frameGCInfo); } + void PushFrameInfoForMarking(const GCInfoNode& frameGCInfo) { gcInfos_.PushFrameInfoForMarking(frameGCInfo); } - void PushFrameInfoForTrace(const GCInfoNode&& frameGCInfo) { gcInfos_.PushFrameInfoForTrace(frameGCInfo); } + void PushFrameInfoForMarking(const GCInfoNode&& frameGCInfo) { gcInfos_.PushFrameInfoForMarking(frameGCInfo); } void PushFrameInfoForFix(const GCInfoNodeForFix& frameGCInfo) { gcInfos_.PushFrameInfoForFix(frameGCInfo); } diff --git a/common_components/mutator/mutator_manager-inl.h b/common_components/mutator/mutator_manager-inl.h index 0e04b7fc5e5e742bdeb86a87142d8fcb9fdb57de..38c5d73c9e503006b704506fdf0fed86e2d14733 100644 --- a/common_components/mutator/mutator_manager-inl.h +++ b/common_components/mutator/mutator_manager-inl.h @@ -20,11 +20,14 @@ namespace common { template -void MutatorManager::FlipMutators(const char* reason, STWFunction&& stwFunction, FlipFunction *flipFunction) +void MutatorManager::FlipMutators(STWParam& param, STWFunction&& stwFunction, FlipFunction *flipFunction) { std::list undoneMutators; { - ScopedStopTheWorld stw(reason); + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "Waiting-STW", ""); + ScopedStopTheWorld stw(param); + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, param.stwReason, ""); + stwFunction(); bool ignoreFinalizer = true; // Hope process ui thread's flipFunction at last, so add the ui mutator at the end of undoeMuators list. diff --git a/common_components/mutator/mutator_manager.cpp b/common_components/mutator/mutator_manager.cpp index d781b8a6dd417068786ff56e5422689018143115..27e6c94be0780d3f1c03af2896edc17caad896da 100755 --- a/common_components/mutator/mutator_manager.cpp +++ b/common_components/mutator/mutator_manager.cpp @@ -18,11 +18,13 @@ #include "common_components/base/time_utils.h" #include "common_components/heap/collector/finalizer_processor.h" -#include "common_components/heap/collector/trace_collector.h" +#include "common_components/heap/collector/marking_collector.h" #include "common_components/heap/heap.h" #include "common_components/mutator/mutator.inline.h" namespace common { +bool g_enableGCTimeoutCheck = true; + bool IsRuntimeThread() { if (static_cast(ThreadLocal::GetThreadType()) >= static_cast(ThreadType::GC_THREAD)) { @@ -56,6 +58,24 @@ void MutatorManager::UnbindMutator(Mutator& mutator) const tlData->buffer = nullptr; } +bool MutatorManager::BindMutatorOnly(Mutator *mutator) const +{ + // watch dog thread may call this function and copy barrier may occur, so bind mutator here. + common::ThreadLocalData* tlData = common::ThreadLocal::GetThreadLocalData(); + ASSERT(tlData != nullptr); + if (tlData->mutator == nullptr) { + tlData->mutator = mutator; + return true; + } + return false; +} + +void MutatorManager::UnbindMutatorOnly() const +{ + ThreadLocalData* tlData = ThreadLocal::GetThreadLocalData(); + tlData->mutator = nullptr; +} + Mutator* MutatorManager::CreateMutator() { Mutator* mutator = ThreadLocal::GetMutator(); @@ -324,7 +344,7 @@ void MutatorManager::WaitUntilAllStopped() return; } - if (UNLIKELY_CC(TimeUtil::MilliSeconds() - beginTime > + if (UNLIKELY_CC(common::g_enableGCTimeoutCheck && TimeUtil::MilliSeconds() - beginTime > (((remainMutatorsSize / STW_TIMEOUTS_THREADS_BASE_COUNT) * STW_TIMEOUTS_BASE_MS) + STW_TIMEOUTS_BASE_MS))) { timeoutTimes++; beginTime = TimeUtil::MilliSeconds(); diff --git a/common_components/mutator/mutator_manager.h b/common_components/mutator/mutator_manager.h index 706cfe41fc4de21b1a299581840a7ca42523dfbc..9db1fb5b06bb7575c204e5d45773ff782129db6d 100755 --- a/common_components/mutator/mutator_manager.h +++ b/common_components/mutator/mutator_manager.h @@ -43,11 +43,21 @@ const uint32_t LOCK_OWNER_NONE = 0; const uint32_t LOCK_OWNER_GC = LOCK_OWNER_NONE + 1; const uint32_t LOCK_OWNER_MUTATOR = LOCK_OWNER_GC + 1; +extern bool g_enableGCTimeoutCheck; + bool IsRuntimeThread(); bool IsGcThread(); using MutatorVisitor = std::function; +struct STWParam { + const char* stwReason; + uint64_t elapsedTimeNs = 0; + + uint64_t GetElapsedNs() const { return elapsedTimeNs; } + uint64_t GetElapsedUs() const { return elapsedTimeNs / 1000; } +}; + class MutatorManager { public: MutatorManager() {} @@ -135,6 +145,9 @@ public: void BindMutator(Mutator& mutator) const; void UnbindMutator(Mutator& mutator) const; + bool BindMutatorOnly(Mutator *mutator) const; + void UnbindMutatorOnly() const; + // Create and initialize the local mutator, then register to mutatorlist. Mutator* CreateMutator(); @@ -160,7 +173,7 @@ public: void EnsureCpuProfileFinish(std::list &undoneMutators); template - void FlipMutators(const char* reason, STWFunction&& stwFunction, FlipFunction *flipFunction); + void FlipMutators(STWParam& param, STWFunction&& stwFunction, FlipFunction *flipFunction); #if defined(GCINFO_DEBUG) && GCINFO_DEBUG void DumpForDebug(); void DumpAllGcInfos(); @@ -256,17 +269,20 @@ private: // Scoped stop the world. class ScopedStopTheWorld { public: - __attribute__((always_inline)) explicit ScopedStopTheWorld(const char* stwReason, + __attribute__((always_inline)) explicit ScopedStopTheWorld(STWParam& param, bool syncGCPhase = false, GCPhase phase = GC_PHASE_IDLE) + : stwParam_(param) { - reason_ = stwReason; + reason_ = param.stwReason; MutatorManager::Instance().StopTheWorld(syncGCPhase, phase); startTime_ = TimeUtil::NanoSeconds(); } __attribute__((always_inline)) ~ScopedStopTheWorld() { - VLOG(DEBUG, "%s stw time %zu us", reason_, GetElapsedTime()/1000); // 1000:nsec per usec + uint64_t elapsedTimeNs = GetElapsedTime(); + stwParam_.elapsedTimeNs = elapsedTimeNs; + VLOG(DEBUG, "%s stw time %zu us", reason_, elapsedTimeNs / 1000); // 1000:nsec per usec MutatorManager::Instance().StartTheWorld(); } @@ -275,6 +291,7 @@ public: private: uint64_t startTime_ = 0; const char* reason_ = nullptr; + STWParam& stwParam_; }; // Scoped lock STW, this prevent other thread STW during the current scope. diff --git a/common_components/mutator/satb_buffer.cpp b/common_components/mutator/satb_buffer.cpp index cf3eddbc46e2528e00fb2d18580c9c3f18b9b7b5..68c9d42ce45d0617764ee0988f1ecf28889d1633 100755 --- a/common_components/mutator/satb_buffer.cpp +++ b/common_components/mutator/satb_buffer.cpp @@ -14,7 +14,7 @@ */ #include "common_components/mutator/satb_buffer.h" -#include "common_components/heap/allocator/region_space.h" +#include "common_components/heap/allocator/regional_heap.h" #include "common_components/base/immortal_wrapper.h" @@ -28,15 +28,15 @@ bool SatbBuffer::ShouldEnqueue(const BaseObject* obj) if (UNLIKELY_CC(obj == nullptr)) { return false; } - if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG && !RegionSpace::IsYoungSpaceObject(obj)) { + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG && !RegionalHeap::IsYoungSpaceObject(obj)) { return false; } - if (RegionSpace::IsNewObjectSinceTrace(obj)) { + if (RegionalHeap::IsNewObjectSinceMarking(obj)) { return false; } - if (RegionSpace::IsMarkedObject(obj)) { + if (RegionalHeap::IsMarkedObject(obj)) { return false; } - return !RegionSpace::EnqueueObject(obj); + return !RegionalHeap::EnqueueObject(obj); } } // namespace common diff --git a/common_components/mutator/tests/BUILD.gn b/common_components/mutator/tests/BUILD.gn index 1671269613a6f619950f6ebe0bedfeb7fffb458b..af57be74e8f378cd085cd35255b40b9c42ecb5b5 100755 --- a/common_components/mutator/tests/BUILD.gn +++ b/common_components/mutator/tests/BUILD.gn @@ -15,12 +15,12 @@ import("//arkcompiler/ets_runtime/common_components/tests/test_helper.gni") module_output_path = "ets_runtime" -host_unittest_action("Mutator_Test") { +host_unittest_action("Mutator_Manager_Test") { module_out_path = module_output_path sources = [ # test file - "mutator_test.cpp", + "mutator_manager_test.cpp", ] configs = [ @@ -38,12 +38,12 @@ host_unittest_action("Mutator_Test") { ] } -host_unittest_action("Mutator_Manager_Test") { +host_unittest_action("Mutator_Test") { module_out_path = module_output_path sources = [ # test file - "mutator_manager_test.cpp", + "mutator_test.cpp", ] configs = [ @@ -84,14 +84,38 @@ host_unittest_action("Satb_Buffer_Test") { ] } +host_unittest_action("Thread_Local_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "thread_local_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + group("unittest") { testonly = true # deps file deps = [ - ":Mutator_Test", ":Mutator_Manager_Test", + ":Mutator_Test", ":Satb_Buffer_Test", + ":Thread_Local_Test", ] } @@ -100,8 +124,9 @@ group("host_unittest") { # deps file deps = [ - ":Mutator_TestAction", ":Mutator_Manager_TestAction", + ":Mutator_TestAction", ":Satb_Buffer_TestAction", + ":Thread_Local_TestAction", ] } diff --git a/common_components/mutator/tests/mutator_manager_test.cpp b/common_components/mutator/tests/mutator_manager_test.cpp index a8a3989cd8df9daa626d6d5cf6c17995c80069e4..94b11a04e670a00aa4ce4984d57f9826a055c26a 100755 --- a/common_components/mutator/tests/mutator_manager_test.cpp +++ b/common_components/mutator/tests/mutator_manager_test.cpp @@ -13,10 +13,10 @@ * limitations under the License. */ +#include "common_components/heap/ark_collector/ark_collector.h" #include "common_components/tests/test_helper.h" #include "common_components/mutator/mutator_manager.h" -#include "common_components/base_runtime/base_runtime.cpp" -#include "common_components/heap/w_collector/w_collector.h" +#include "common_components/common_runtime/base_runtime.cpp" using namespace common; @@ -27,6 +27,17 @@ protected: void TearDown() override {} }; +HWTEST_F_L0(MutatorManagerTest, BindMutatorOnly_Test1) +{ + MutatorManager *managerPtr = new MutatorManager(); + Mutator mutator; + mutator.Init(); + managerPtr->UnbindMutatorOnly(); + bool res = managerPtr->BindMutatorOnly(&mutator); + ASSERT_TRUE(res); + delete managerPtr; +} + HWTEST_F_L0(MutatorManagerTest, IsRuntimeThread_Test1) { ThreadLocalData* localData = ThreadLocal::GetThreadLocalData(); @@ -57,6 +68,7 @@ HWTEST_F_L0(MutatorManagerTest, BindMutator_Test1) localData->buffer = nullptr; MutatorManager *managerPtr = new MutatorManager(); Mutator mutator; + mutator.Init(); managerPtr->BindMutator(mutator); EXPECT_TRUE(localData->buffer != nullptr); @@ -78,17 +90,6 @@ HWTEST_F_L0(MutatorManagerTest, CreateMutator_Test1) delete localData->mutator; } -HWTEST_F_L0(MutatorManagerTest, TransitMutatorToExit_Test1) -{ - ThreadLocalData* localData = ThreadLocal::GetThreadLocalData(); - localData->mutator = new Mutator(); - MutatorManager *managerPtr = new MutatorManager(); - localData->mutator->SetInSaferegion(MutatorBase::SaferegionState::SAFE_REGION_FALSE); - managerPtr->TransitMutatorToExit(); - delete managerPtr; - delete localData->mutator; -} - HWTEST_F_L0(MutatorManagerTest, CreateRuntimeMutato_Test1) { ThreadType threadType = ThreadType::FP_THREAD; @@ -100,11 +101,121 @@ HWTEST_F_L0(MutatorManagerTest, CreateRuntimeMutato_Test1) EXPECT_TRUE(ptr != nullptr); } -HWTEST_F_L0(MutatorManagerTest, DumpMutators_Test1) +HWTEST_F_L0(MutatorManagerTest, DestroyRuntimeMutator_Test1) { - MutatorManager *managerPtr = new MutatorManager(); - uint32_t timeoutTimes = 1; - managerPtr->DumpMutators(timeoutTimes); - delete managerPtr; + ThreadType threadType = ThreadType::GC_THREAD; + Mutator* ptr = MutatorManager::Instance().CreateRuntimeMutator(threadType); + EXPECT_NE(ptr, nullptr); + + MutatorManager::Instance().DestroyRuntimeMutator(threadType); + ptr = ThreadLocal::GetMutator(); + EXPECT_EQ(ptr, nullptr); +} + +HWTEST_F_L0(MutatorManagerTest, DestroyMutator_Test1) +{ + ThreadType threadType = ThreadType::GC_THREAD; + Mutator* ptr = MutatorManager::Instance().CreateRuntimeMutator(threadType); + + MutatorManager::Instance().DestroyMutator(ptr); + EXPECT_TRUE(MutatorManager::Instance().TryAcquireMutatorManagementRLock()); + MutatorManager::Instance().MutatorManagementRUnlock(); + + MutatorManager::Instance().MutatorManagementWLock(); + MutatorManager::Instance().DestroyMutator(ptr); + EXPECT_FALSE(MutatorManager::Instance().TryAcquireMutatorManagementRLock()); + MutatorManager::Instance().MutatorManagementWUnlock(); +} + +HWTEST_F_L0(MutatorManagerTest, AcquireMutatorManagementWLockForCpuProfile_Test1) +{ + std::atomic threadStarted{false}; + std::thread testthread([&]() { + threadStarted = true; + MutatorManager::Instance().MutatorManagementWLock(); + MutatorManager::Instance().AcquireMutatorManagementWLockForCpuProfile(); + }); + while (!threadStarted) {} + std::this_thread::sleep_for(std::chrono::milliseconds(3)); + MutatorManager::Instance().MutatorManagementWUnlock(); + testthread.join(); + EXPECT_FALSE(MutatorManager::Instance().TryAcquireMutatorManagementRLock()); + MutatorManager::Instance().MutatorManagementWUnlock(); +} + +HWTEST_F_L0(MutatorManagerTest, EnsureCpuProfileFinish_Test1) +{ + std::list undoneMutators; + ThreadType threadType = ThreadType::GC_THREAD; + Mutator* ptr = MutatorManager::Instance().CreateRuntimeMutator(threadType); + ptr->SetCpuProfileState(MutatorBase::CpuProfileState::FINISH_CPUPROFILE); + undoneMutators.push_back(ptr); + MutatorManager::Instance().EnsureCpuProfileFinish(undoneMutators); + EXPECT_EQ(undoneMutators.size(), 0); +} + +HWTEST_F_L0(MutatorManagerTest, EnsureCpuProfileFinish_Test2) +{ + std::list Mutators; + ThreadType threadType = ThreadType::GC_THREAD; + Mutator* ptr = MutatorManager::Instance().CreateRuntimeMutator(threadType); + ptr->SetCpuProfileState(MutatorBase::CpuProfileState::NO_CPUPROFILE); + ptr->SetInSaferegion(MutatorBase::SaferegionState::SAFE_REGION_TRUE); + Mutators.push_back(ptr); + MutatorManager::Instance().EnsureCpuProfileFinish(Mutators); + EXPECT_EQ(Mutators.size(), 0); +} + +HWTEST_F_L0(MutatorManagerTest, EnsureCpuProfileFinish_Test3) +{ + std::list Mutators; + ThreadType threadType = ThreadType::GC_THREAD; + Mutator* ptr = MutatorManager::Instance().CreateRuntimeMutator(threadType); + std::thread testthread([&]() { + std::this_thread::sleep_for(std::chrono::milliseconds(3)); + ptr->SetCpuProfileState(MutatorBase::CpuProfileState::NO_CPUPROFILE); + }); + ptr->SetCpuProfileState(MutatorBase::CpuProfileState::IN_CPUPROFILING); + ptr->SetInSaferegion(MutatorBase::SaferegionState::SAFE_REGION_TRUE); + Mutators.push_back(ptr); + MutatorManager::Instance().EnsureCpuProfileFinish(Mutators); + testthread.join(); + EXPECT_EQ(Mutators.size(), 0); +} + +HWTEST_F_L0(MutatorManagerTest, EnsureCpuProfileFinish_Test4) +{ + std::list Mutators; + ThreadType threadType = ThreadType::GC_THREAD; + Mutator* ptr = MutatorManager::Instance().CreateRuntimeMutator(threadType); + std::thread testthread([&]() { + std::this_thread::sleep_for(std::chrono::milliseconds(3)); + ptr->SetInSaferegion(MutatorBase::SaferegionState::SAFE_REGION_TRUE); + ptr->SetCpuProfileState(MutatorBase::CpuProfileState::NO_CPUPROFILE); + }); + ptr->SetCpuProfileState(MutatorBase::CpuProfileState::IN_CPUPROFILING); + ptr->SetInSaferegion(MutatorBase::SaferegionState::SAFE_REGION_FALSE); + Mutators.push_back(ptr); + MutatorManager::Instance().EnsureCpuProfileFinish(Mutators); + testthread.join(); + EXPECT_EQ(Mutators.size(), 0); +} + +HWTEST_F_L0(MutatorManagerTest, EnsureCpuProfileFinish_Test5) +{ + std::list Mutators; + ThreadType threadType = ThreadType::GC_THREAD; + Mutator* ptr = MutatorManager::Instance().CreateRuntimeMutator(threadType); + std::thread testthread([&]() { + std::this_thread::sleep_for(std::chrono::milliseconds(3)); + ptr->SetInSaferegion(MutatorBase::SaferegionState::SAFE_REGION_TRUE); + + }); + ptr->SetCpuProfileState(MutatorBase::CpuProfileState::NO_CPUPROFILE); + ptr->SetInSaferegion(MutatorBase::SaferegionState::SAFE_REGION_FALSE); + Mutators.push_back(ptr); + MutatorManager::Instance().EnsureCpuProfileFinish(Mutators); + testthread.join(); + EXPECT_EQ(Mutators.size(), 0); } } // namespace common::test diff --git a/common_components/mutator/tests/mutator_test.cpp b/common_components/mutator/tests/mutator_test.cpp index 2214cd728c2bbd439d31c83dd68950d8c4083c81..3a4aa66ea6a9dfe7a210d054aa732e51da3a20bd 100755 --- a/common_components/mutator/tests/mutator_test.cpp +++ b/common_components/mutator/tests/mutator_test.cpp @@ -13,17 +13,27 @@ * limitations under the License. */ -#include "common_components/tests/test_helper.h" +#include "common_components/heap/heap_manager.h" #include "common_components/mutator/mutator.h" -#include "common_components/base_runtime/base_runtime.cpp" +#include "common_components/mutator/mutator_manager.h" +#include "common_components/tests/test_helper.h" +#include "common_interfaces/base_runtime.h" +#include "common_interfaces/objects/base_object.h" using namespace common; namespace common::test { +class TestMutator : public Mutator { +public: + using Mutator::VisitRawObjects; +}; class MutatorTest : public BaseTestWithScope { protected: - void SetUp() override {} - void TearDown() override {} + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + static void TearDownTestCase() {} }; HWTEST_F_L0(MutatorTest, GetThreadLocalData_Test1) @@ -34,21 +44,213 @@ HWTEST_F_L0(MutatorTest, GetThreadLocalData_Test1) HWTEST_F_L0(MutatorTest, TransitionGCPhase_Test1) { - MutatorBase *mutatorBase = new MutatorBase(); - mutatorBase->Init(); - bool ret = mutatorBase->TransitionGCPhase(false); + MutatorBase mutatorBase; + mutatorBase.Init(); + bool ret = mutatorBase.TransitionGCPhase(false); EXPECT_TRUE(ret == true); - MutatorBase::SuspensionType flag = MutatorBase::SuspensionType::SUSPENSION_FOR_GC_PHASE; - mutatorBase->SetSuspensionFlag(flag); - BaseRuntime::GetInstance()->Init(); - ret = mutatorBase->TransitionGCPhase(false); + MutatorBase::SuspensionType flag = MutatorBase::SuspensionType::SUSPENSION_FOR_GC_PHASE; + mutatorBase.SetSuspensionFlag(flag); + + ret = mutatorBase.TransitionGCPhase(false); EXPECT_TRUE(ret == true); - ret = mutatorBase->TransitionGCPhase(true); + ret = mutatorBase.TransitionGCPhase(true); EXPECT_TRUE(ret == true); - ret = mutatorBase->TransitionGCPhase(false); + ret = mutatorBase.TransitionGCPhase(false); EXPECT_TRUE(ret == true); - delete mutatorBase; +} + +HWTEST_F_L0(MutatorTest, HandleSuspensionRequest_Test0) +{ + MutatorBase mutatorBase; + mutatorBase.Init(); + + MutatorBase::SuspensionType flag = MutatorBase::SuspensionType::SUSPENSION_FOR_STW; + mutatorBase.SetSuspensionFlag(flag); + + mutatorBase.HandleSuspensionRequest(); + EXPECT_TRUE(mutatorBase.InSaferegion() == false); + + flag = MutatorBase::SuspensionType::SUSPENSION_FOR_GC_PHASE; + mutatorBase.SetSuspensionFlag(flag); + mutatorBase.HandleSuspensionRequest(); + EXPECT_TRUE(mutatorBase.InSaferegion() == false); +} + +HWTEST_F_L0(MutatorTest, SuspendForStw_Test0) +{ + MutatorBase mutatorBase; + mutatorBase.Init(); + + mutatorBase.SuspendForStw(); + EXPECT_TRUE(mutatorBase.InSaferegion() == false); +} + +HWTEST_F_L0(MutatorTest, VisitRawObjects_Nullptr_NoCall) +{ + TestMutator mutator; + mutator.Init(); + mutator.PushRawObject(nullptr); + + bool called = false; + RootVisitor func = [&called](const ObjectRef& obj) { + called = true; + }; + mutator.VisitRawObjects(func); + EXPECT_FALSE(called); +} + +HWTEST_F_L0(MutatorTest, VisitRawObjects_NonNull_Call) +{ + TestMutator mutator; + mutator.Init(); + BaseObject mockObj; + mutator.PushRawObject(&mockObj); + + bool called = false; + RootVisitor func = [&called, &mockObj](const ObjectRef& obj) { + called = true; + EXPECT_EQ(obj.object, &mockObj); + }; + + mutator.VisitRawObjects(func); + EXPECT_TRUE(called); +} + +HWTEST_F_L0(MutatorTest, HandleSuspensionRequest_Test1) +{ + MutatorBase mutatorBase; + mutatorBase.Init(); + MutatorBase::SuspensionType flag = MutatorBase::SuspensionType::SUSPENSION_FOR_STW; + mutatorBase.SetSuspensionFlag(flag); + flag = MutatorBase::SuspensionType::SUSPENSION_FOR_GC_PHASE; + mutatorBase.SetSuspensionFlag(flag); + mutatorBase.HandleSuspensionRequest(); + EXPECT_FALSE(mutatorBase.InSaferegion()); + EXPECT_TRUE(mutatorBase.FinishedTransition()); +} + +HWTEST_F_L0(MutatorTest, HandleSuspensionRequest_Test2) +{ + MutatorBase mutatorBase; + mutatorBase.Init(); + MutatorBase::SuspensionType flag = MutatorBase::SuspensionType::SUSPENSION_FOR_CPU_PROFILE; + mutatorBase.SetSuspensionFlag(flag); + mutatorBase.SetCpuProfileState(MutatorBase::CpuProfileState::FINISH_CPUPROFILE); + std::thread t([&]() { + std::this_thread::sleep_for(std::chrono::nanoseconds(1)); + mutatorBase.ClearSuspensionFlag(MutatorBase::SUSPENSION_FOR_CPU_PROFILE); + }); + mutatorBase.HandleSuspensionRequest(); + t.join(); + EXPECT_FALSE(mutatorBase.InSaferegion()); + EXPECT_TRUE(mutatorBase.FinishedCpuProfile()); +} + +HWTEST_F_L0(MutatorTest, HandleSuspensionRequest_Test3) +{ + MutatorBase mutatorBase; + mutatorBase.Init(); + mutatorBase.SetSuspensionFlag(MutatorBase::SUSPENSION_FOR_STW); + mutatorBase.SetSuspensionFlag(MutatorBase::SUSPENSION_FOR_CPU_PROFILE); + mutatorBase.SetCpuProfileState(MutatorBase::CpuProfileState::FINISH_CPUPROFILE); + std::thread t([&]() { + std::this_thread::sleep_for(std::chrono::nanoseconds(1)); + mutatorBase.ClearSuspensionFlag(MutatorBase::SUSPENSION_FOR_CPU_PROFILE); + }); + mutatorBase.HandleSuspensionRequest(); + t.join(); + EXPECT_FALSE(mutatorBase.InSaferegion()); + EXPECT_TRUE(mutatorBase.FinishedCpuProfile()); +} + +HWTEST_F_L0(MutatorTest, HandleGCPhase_SatbNodeNotNull) +{ + Mutator* mutator = MutatorManager::Instance().CreateRuntimeMutator(ThreadType::ARK_PROCESSOR); + HeapAddress addr = HeapManager::Allocate(sizeof(BaseObject), AllocType::MOVEABLE_OBJECT, true); + BaseObject *mockObj = reinterpret_cast(addr); + new (mockObj) BaseObject(); + mutator->RememberObjectInSatbBuffer(mockObj); + EXPECT_NE(mutator->GetSatbBufferNode(), nullptr); + MutatorBase* base = static_cast(mutator->GetMutatorBasePtr()); + base->TransitionToGCPhaseExclusive(GCPhase::GC_PHASE_REMARK_SATB); + EXPECT_EQ(mutator->GetSatbBufferNode(), nullptr); + MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::ARK_PROCESSOR); +} + +HWTEST_F_L0(MutatorTest, HandleSuspensionRequest_LeaveSaferegion1) +{ + MutatorBase mutatorBase; + mutatorBase.Init(); + mutatorBase.IncObserver(); + std::thread t([&]() { + std::this_thread::sleep_for(std::chrono::nanoseconds(1)); + mutatorBase.DecObserver(); + }); + mutatorBase.HandleSuspensionRequest(); + t.join(); + EXPECT_FALSE(mutatorBase.InSaferegion()); +} + +HWTEST_F_L0(MutatorTest, HandleSuspensionRequest_LeaveSaferegion2) +{ + MutatorBase mutatorBase; + mutatorBase.Init(); + MutatorBase::SuspensionType flag = MutatorBase::SuspensionType::SUSPENSION_FOR_CPU_PROFILE; + mutatorBase.SetSuspensionFlag(flag); + mutatorBase.SetCpuProfileState(MutatorBase::CpuProfileState::FINISH_CPUPROFILE); + mutatorBase.IncObserver(); + std::thread t([&]() { + std::this_thread::sleep_for(std::chrono::nanoseconds(1)); + mutatorBase.ClearSuspensionFlag(MutatorBase::SUSPENSION_FOR_CPU_PROFILE); + mutatorBase.DecObserver(); + }); + mutatorBase.HandleSuspensionRequest(); + t.join(); + EXPECT_FALSE(mutatorBase.InSaferegion()); +} + +HWTEST_F_L0(MutatorTest, HandleSuspensionRequest_Test4) +{ + MutatorBase mutatorBase; + mutatorBase.Init(); + mutatorBase.SetSuspensionFlag(MutatorBase::SUSPENSION_FOR_STW); + EXPECT_NO_FATAL_FAILURE(mutatorBase.HandleSuspensionRequest()); +} + +HWTEST_F_L0(MutatorTest, HandleSuspensionRequest_Test5) +{ + MutatorBase mutatorBase; + mutatorBase.Init(); + mutatorBase.SetSuspensionFlag(MutatorBase::SUSPENSION_FOR_GC_PHASE); + EXPECT_NO_FATAL_FAILURE(mutatorBase.HandleSuspensionRequest()); +} + +HWTEST_F_L0(MutatorTest, TransitionToGCPhaseExclusive_TestEnum) +{ + MutatorBase mutatorBase; + mutatorBase.Init(); + + GCPhase phase = GCPhase::GC_PHASE_ENUM; + EXPECT_NO_FATAL_FAILURE(mutatorBase.TransitionToGCPhaseExclusive(phase)); +} + +HWTEST_F_L0(MutatorTest, TransitionToGCPhaseExclusive_TestPrecopy) +{ + MutatorBase mutatorBase; + mutatorBase.Init(); + + GCPhase phase = GCPhase::GC_PHASE_PRECOPY; + EXPECT_NO_FATAL_FAILURE(mutatorBase.TransitionToGCPhaseExclusive(phase)); +} + +HWTEST_F_L0(MutatorTest, TransitionToGCPhaseExclusive_TestIdle) +{ + MutatorBase mutatorBase; + mutatorBase.Init(); + + GCPhase phase = GCPhase::GC_PHASE_IDLE; + EXPECT_NO_FATAL_FAILURE(mutatorBase.TransitionToGCPhaseExclusive(phase)); } } // namespace common::test diff --git a/common_components/mutator/tests/satb_buffer_test.cpp b/common_components/mutator/tests/satb_buffer_test.cpp index 91398370875b5d7c6f31ff85547ee71821bbded2..9ad478220c5d636b38e2a0ad7112dca46b824df9 100755 --- a/common_components/mutator/tests/satb_buffer_test.cpp +++ b/common_components/mutator/tests/satb_buffer_test.cpp @@ -13,22 +13,175 @@ * limitations under the License. */ -#include "common_components/tests/test_helper.h" +#include "common_components/heap/allocator/region_desc.h" +#include "common_components/heap/allocator/regional_heap.h" #include "common_components/mutator/satb_buffer.h" +#include "common_components/tests/test_helper.h" +#include "common_interfaces/base_runtime.h" using namespace common; - namespace common::test { + class SatbBufferTest : public BaseTestWithScope { protected: - void SetUp() override {} - void TearDown() override {} + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() + { + BaseRuntime::GetInstance()->Fini(); + } + + void SetUp() override + { + holder_ = ThreadHolder::CreateAndRegisterNewThreadHolder(nullptr); + scope_ = new ThreadHolder::TryBindMutatorScope(holder_); + } + + void TearDown() override + { + if (scope_ != nullptr) { + delete scope_; + scope_ = nullptr; + } + } + + ThreadHolder *holder_ {nullptr}; + ThreadHolder::TryBindMutatorScope *scope_ {nullptr}; }; -TEST_F(SatbBufferTest, ShouldEnqueue_Test1) +HWTEST_F_L0(SatbBufferTest, NullptrReturnsFalse) +{ + EXPECT_FALSE(SatbBuffer::Instance().ShouldEnqueue(nullptr)); +} + +HWTEST_F_L0(SatbBufferTest, IsYoungSpaceObject1) { - BaseObject* obj = nullptr; - bool ret = SatbBuffer::Instance().ShouldEnqueue(obj); - EXPECT_TRUE(ret == false); + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_ENUM); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::ALIVE_REGION_FIRST); + Heap::GetHeap().SetGCReason(GC_REASON_YOUNG); + + BaseObject* obj = reinterpret_cast(addr); + EXPECT_FALSE(SatbBuffer::Instance().ShouldEnqueue(obj)); +} + +HWTEST_F_L0(SatbBufferTest, IsYoungSpaceObject2) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_ENUM); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::FROM_REGION); + Heap::GetHeap().SetGCReason(GC_REASON_HEU); + + BaseObject* obj = reinterpret_cast(addr); + EXPECT_FALSE(SatbBuffer::Instance().ShouldEnqueue(obj)); +} + +HWTEST_F_L0(SatbBufferTest, IsYoungSpaceObject3) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_ENUM); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::FROM_REGION); + Heap::GetHeap().SetGCReason(GC_REASON_YOUNG); + + BaseObject* obj = reinterpret_cast(addr); + EXPECT_FALSE(SatbBuffer::Instance().ShouldEnqueue(obj)); +} + +HWTEST_F_L0(SatbBufferTest, IsYoungSpaceObject4) +{ + auto* mutator = common::Mutator::GetMutator(); + mutator->SetMutatorPhase(GCPhase::GC_PHASE_ENUM); + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::ALIVE_REGION_FIRST); + Heap::GetHeap().SetGCReason(GC_REASON_HEU); + BaseObject* obj = reinterpret_cast(addr); + EXPECT_FALSE(SatbBuffer::Instance().ShouldEnqueue(obj)); +} + +HWTEST_F_L0(SatbBufferTest, IsMarkedObject) +{ + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0U); + BaseObject* obj = reinterpret_cast(addr); + Heap::GetHeap().SetGCReason(GC_REASON_HEU); + + RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + region->SetMarkedRegionFlag(1); + + size_t offset = region->GetAddressOffset(reinterpret_cast(obj)); + RegionBitmap* markBitmap = region->GetOrAllocMarkBitmap(); + markBitmap->MarkBits(offset); + + bool result = SatbBuffer::Instance().ShouldEnqueue(obj); + EXPECT_FALSE(result); +} + +void ClearMarkBit(RegionBitmap* bitmap, size_t offset) +{ + uintptr_t* bits = *reinterpret_cast(bitmap); + + size_t wordIndex = offset / (sizeof(uintptr_t) * 8); + size_t bitIndex = offset % (sizeof(uintptr_t) * 8); + + uintptr_t mask = ~(static_cast(1) << bitIndex); + + uintptr_t* addr = const_cast(bits + wordIndex); + uintptr_t oldVal; + uintptr_t newVal; + + do { + oldVal = __atomic_load_n(addr, __ATOMIC_ACQUIRE); + newVal = oldVal & mask; + if (oldVal == newVal) { + return; + } + } while (!__atomic_compare_exchange_n(addr, &oldVal, newVal, false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)); +} + +HWTEST_F_L0(SatbBufferTest, EnqueueObject) +{ + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0U); + + uintptr_t objAddress = addr + 0x100; + BaseObject* obj = reinterpret_cast(objAddress); + Heap::GetHeap().SetGCReason(GC_REASON_HEU); + + RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + region->SetRegionType(RegionDesc::RegionType::FROM_REGION); + region->SetMarkingLine(); + region->SetMarkedRegionFlag(0); + RegionBitmap* markBitmap = region->GetMarkBitmap(); + size_t offset = region->GetAddressOffset(reinterpret_cast(obj)); + if (markBitmap != nullptr) { + ClearMarkBit(markBitmap, offset); + } + + region->SetEnqueuedRegionFlag(1); + RegionBitmap* enqueueBitmap = region->GetOrAllocEnqueueBitmap(); + enqueueBitmap->MarkBits(offset); + + bool result = SatbBuffer::Instance().ShouldEnqueue(obj); + EXPECT_FALSE(result); } } // namespace common::test diff --git a/common_components/common_runtime/src/inspector/stream.h b/common_components/mutator/tests/thread_local_test.cpp old mode 100755 new mode 100644 similarity index 59% rename from common_components/common_runtime/src/inspector/stream.h rename to common_components/mutator/tests/thread_local_test.cpp index ec8d56ae21358c11d86f2c5897d80804f0f3608d..8ee8f5cbf99655b60e50f1742febd376fd28d4a1 --- a/common_components/common_runtime/src/inspector/stream.h +++ b/common_components/mutator/tests/thread_local_test.cpp @@ -13,22 +13,19 @@ * limitations under the License. */ -#ifndef ARK_COMMON_STREAM_H -#define ARK_COMMON_STREAM_H -#include "common_components/common_runtime/src/base/log_file.h" -namespace panda { -class Stream { -public: - virtual ~Stream() = default; +#include "common_components/mutator/thread_local.h" +#include "common_components/tests/test_helper.h" - virtual void EndOfStream() = 0; +namespace common { - // Get chunk's size - virtual int GetSize() = 0; - - // Writes the chunk of data into the stream - virtual bool WriteChunk(char* data, int32_t size) = 0; +class ThreadLocalTest : public common::test::BaseTestWithScope { +protected: + void SetUp() override {} + void TearDown() override {} }; -} // namespace panda -#endif +HWTEST_F_L0(ThreadLocalTest, GetThreadLocalData_ReturnsNonNull) { + ThreadLocalData* data = ThreadLocal::GetThreadLocalData(); + EXPECT_NE(data, nullptr); +} +} \ No newline at end of file diff --git a/common_components/mutator/thread_local.cpp b/common_components/mutator/thread_local.cpp index 81e82eb82c6cfd814a3cf3e7c564ad9d37686fe8..0d2872a99f158981d85cb4c41902d0a548ceb3c5 100755 --- a/common_components/mutator/thread_local.cpp +++ b/common_components/mutator/thread_local.cpp @@ -30,7 +30,7 @@ void ThreadLocal::ClearAllocBufferRegion() { auto buffer = GetAllocBuffer(); if (buffer != nullptr) { - buffer->ClearRegion(); + buffer->ClearRegions(); } } } // namespace common diff --git a/common_components/mutator/thread_local.h b/common_components/mutator/thread_local.h index a03970e2bfa94b3bfb2e094bfc337e9210dba122..cfb623f7f8b451cea2ad6aa959dd02074978dc42 100755 --- a/common_components/mutator/thread_local.h +++ b/common_components/mutator/thread_local.h @@ -29,7 +29,7 @@ enum class ThreadType { ARK_PROCESSOR = 0, GC_THREAD, FP_THREAD, HOT_UPDATE_THRE struct ThreadLocalData { // External thread local var. AllocationBuffer* buffer; - Mutator* mutator; + Mutator* mutator {nullptr}; uint8_t* thread; uint8_t* schedule; uint8_t* preemptFlag; diff --git a/common_components/objects/base_string.cpp b/common_components/objects/base_string.cpp index 575dbb326e024fc115424bff4ce515ca603a817b..fab3988f35ead9ad9ec085d857441fc706f7c059 100644 --- a/common_components/objects/base_string.cpp +++ b/common_components/objects/base_string.cpp @@ -36,61 +36,267 @@ size_t UtfUtils::ConvertRegionUtf16ToLatin1(const uint16_t* utf16In, uint8_t* la return utf_helper::ConvertRegionUtf16ToLatin1(utf16In, latin1Out, utf16Len, latin1Len); } -// To change the hash algorithm of BaseString, please modify BaseString::CalculateConcatHashCode -// and BaseStringHashHelper::ComputeHashForDataPlatform simultaneously!! -template -uint32_t BaseString::ComputeHashForData(const T* data, size_t size, - uint32_t hashSeed) -{ - if (size <= static_cast(StringHash::MIN_SIZE_FOR_UNROLLING)) { - uint32_t hash = hashSeed; - for (uint32_t i = 0; i < size; i++) { - hash = (hash << static_cast(StringHash::HASH_SHIFT)) - hash + data[i]; + size_t UtfUtils::Utf8ToUtf16Size(const uint8_t* utf8, size_t utf8Len) + { + return common::utf_helper::Utf8ToUtf16Size(utf8, utf8Len); + } + + size_t UtfUtils::Utf16ToUtf8Size(const uint16_t* utf16, uint32_t length, bool modify, bool isGetBufferSize, + bool cesu8) + { + return common::utf_helper::Utf16ToUtf8Size(utf16, length, modify, isGetBufferSize, cesu8); + } + + size_t UtfUtils::ConvertRegionUtf8ToUtf16(const uint8_t* utf8In, uint16_t* utf16Out, size_t utf8Len, + size_t utf16Len) + { + return common::utf_helper::ConvertRegionUtf8ToUtf16(utf8In, utf16Out, utf8Len, utf16Len); + } + + size_t UtfUtils::ConvertRegionUtf16ToLatin1(const uint16_t* utf16In, uint8_t* latin1Out, size_t utf16Len, + size_t latin1Len) + { + return common::utf_helper::ConvertRegionUtf16ToLatin1(utf16In, latin1Out, utf16Len, latin1Len); + } + + size_t UtfUtils::ConvertRegionUtf16ToUtf8(const uint16_t* utf16In, uint8_t* utf8Out, size_t utf16Len, + size_t utf8Len, size_t start, bool modify, bool isWriteBuffer, bool cesu) + { + return common::utf_helper::ConvertRegionUtf16ToUtf8( + utf16In, utf8Out, utf16Len, utf8Len, start, modify, isWriteBuffer, cesu); + } + + + // To change the hash algorithm of BaseString, please modify BaseString::CalculateConcatHashCode + // and BaseStringHashHelper::ComputeHashForDataPlatform simultaneously!! + template + uint32_t BaseString::ComputeHashForData(const T* data, size_t size, + uint32_t hashSeed) + { + if (size <= static_cast(StringHash::MIN_SIZE_FOR_UNROLLING)) { + uint32_t hash = hashSeed; + for (uint32_t i = 0; i < size; i++) { + hash = (hash << static_cast(StringHash::HASH_SHIFT)) - hash + data[i]; + } + return hash; } - return hash; + return StringHashHelper::ComputeHashForDataPlatform(data, size, hashSeed); } - return StringHashHelper::ComputeHashForDataPlatform(data, size, hashSeed); -} -template -uint32_t BaseString::ComputeHashForData(const uint8_t*, size_t, uint32_t); -template -uint32_t BaseString::ComputeHashForData(const uint16_t*, size_t, uint32_t); + template + uint32_t BaseString::ComputeHashForData(const uint8_t*, size_t, uint32_t); + template + uint32_t BaseString::ComputeHashForData(const uint16_t*, size_t, uint32_t); + /* static */ + uint32_t BaseString::ComputeHashcodeUtf8(const uint8_t* utf8Data, size_t utf8Len, bool canBeCompress) + { + if (utf8Len == 0) { + return MixHashcode(0, NOT_INTEGER); + } + if (canBeCompress) { + return ComputeHashForData(utf8Data, utf8Len, 0); + } + auto utf16Len = UtfUtils::Utf8ToUtf16Size(utf8Data, utf8Len); + std::vector tmpBuffer(utf16Len); + [[maybe_unused]] auto len = UtfUtils::ConvertRegionUtf8ToUtf16(utf8Data, tmpBuffer.data(), utf8Len, + utf16Len); + DCHECK_CC(len == utf16Len); + return ComputeHashForData(tmpBuffer.data(), utf16Len, 0); + } -// static -template -uint32_t BaseString::CalculateDataConcatHashCode(const T1* dataFirst, size_t sizeFirst, - const T2* dataSecond, size_t sizeSecond) -{ - uint32_t totalHash = ComputeHashForData(dataFirst, sizeFirst, 0); - totalHash = ComputeHashForData(dataSecond, sizeSecond, totalHash); - return totalHash; -} + /* static */ + uint32_t BaseString::ComputeHashcodeUtf16(const uint16_t* utf16Data, uint32_t length) + { + return ComputeHashForData(utf16Data, length, 0); + } -template -uint32_t BaseString::CalculateDataConcatHashCode(const uint8_t* dataFirst, size_t sizeFirst, - const uint8_t* dataSecond, size_t sizeSecond); -template -uint32_t BaseString::CalculateDataConcatHashCode(const uint16_t* dataFirst, size_t sizeFirst, - const uint16_t* dataSecond, size_t sizeSecond); -template -uint32_t BaseString::CalculateDataConcatHashCode(const uint8_t* dataFirst, size_t sizeFirst, - const uint16_t* dataSecond, size_t sizeSecond); -template -uint32_t BaseString::CalculateDataConcatHashCode(const uint16_t* dataFirst, size_t sizeFirst, - const uint8_t* dataSecond, size_t sizeSecond); + // drop the tail bytes if the remain length can't fill the length it represents. + static size_t FixUtf8Len(const uint8_t* utf8, size_t utf8Len) + { + constexpr size_t TWO_BYTES_LENGTH = 2; + constexpr size_t THREE_BYTES_LENGTH = 3; + size_t trimSize = 0; + if (utf8Len >= 1 && utf8[utf8Len - 1] >= 0xC0) { + // The last one char claim there are more than 1 byte next to it, it's invalid, so drop the last one. + trimSize = 1; + } + if (utf8Len >= TWO_BYTES_LENGTH && utf8[utf8Len - TWO_BYTES_LENGTH] >= 0xE0) { + // The second to last char claim there are more than 2 bytes next to it, it's invalid, so drop the last two. + trimSize = TWO_BYTES_LENGTH; + } + if (utf8Len >= THREE_BYTES_LENGTH && utf8[utf8Len - THREE_BYTES_LENGTH] >= 0xF0) { + // The third to last char claim there are more than 3 bytes next to it, it's invalid, so drop the last + // three. + trimSize = THREE_BYTES_LENGTH; + } + return utf8Len - trimSize; + } -template -bool IsSubStringAtSpan(common::Span& lhsSp, common::Span& rhsSp, uint32_t offset) -{ - size_t rhsSize = rhsSp.size(); - DCHECK_CC(rhsSize + offset <= lhsSp.size()); - for (size_t i = 0; i < rhsSize; ++i) { - auto left = static_cast(lhsSp[offset + static_cast(i)]); - auto right = static_cast(rhsSp[i]); - if (left != right) { + /* static */ + bool BaseString::IsUtf8EqualsUtf16(const uint8_t* utf8Data, size_t utf8Len, + const uint16_t* utf16Data, uint32_t utf16Len) + { + size_t safeUtf8Len = FixUtf8Len(utf8Data, utf8Len); + const uint8_t* utf8End = utf8Data + utf8Len; + const uint8_t* utf8SafeEnd = utf8Data + safeUtf8Len; + const uint16_t* utf16End = utf16Data + utf16Len; + while (utf8Data < utf8SafeEnd && utf16Data < utf16End) { + uint8_t src = *utf8Data; + switch (src & 0xF0) { + case 0xF0: + { + const uint8_t c2 = *(++utf8Data); + const uint8_t c3 = *(++utf8Data); + const uint8_t c4 = *(++utf8Data); + uint32_t codePoint = ((src & LOW_3BITS) << OFFSET_18POS) | ((c2 & LOW_6BITS) << OFFSET_12POS) | + ((c3 & LOW_6BITS) << OFFSET_6POS) | (c4 & LOW_6BITS); + if (codePoint >= SURROGATE_RAIR_START) { + if (utf16Data >= utf16End - 1) { + return false; + } + codePoint -= SURROGATE_RAIR_START; + if (*utf16Data++ != static_cast((codePoint >> OFFSET_10POS) | + H_SURROGATE_START)) { + return false; + } else if (*utf16Data++ != static_cast((codePoint & 0x3FF) | L_SURROGATE_START)) { + return false; + } + } else { + if (*utf16Data++ != static_cast(codePoint)) { + return false; + } + } + utf8Data++; + break; + } + case 0xE0: + { + const uint8_t c2 = *(++utf8Data); + const uint8_t c3 = *(++utf8Data); + if (*utf16Data++ != static_cast(((src & LOW_4BITS) << OFFSET_12POS) | + ((c2 & LOW_6BITS) << OFFSET_6POS) | (c3 & LOW_6BITS))) { + return false; + } + utf8Data++; + break; + } + case 0xD0: + case 0xC0: + { + const uint8_t c2 = *(++utf8Data); + if (*utf16Data++ != static_cast(((src & LOW_5BITS) << OFFSET_6POS) | (c2 & + LOW_6BITS))) { + return false; + } + utf8Data++; + break; + } + default: + do { + if (*utf16Data++ != static_cast(*utf8Data++)) { + return false; + } + } + while (utf8Data < utf8SafeEnd && utf16Data < utf16End && *utf8Data < 0x80); + break; + } + } + // The remain chars should be treated as single byte char. + while (utf8Data < utf8End && utf16Data < utf16End) { + if (*utf16Data++ != static_cast(*utf8Data++)) { + return false; + } + } + return utf8Data == utf8End && utf16Data == utf16End; + } + + // static + template + uint32_t BaseString::CalculateDataConcatHashCode(const T1* dataFirst, size_t sizeFirst, + const T2* dataSecond, size_t sizeSecond) + { + uint32_t totalHash = ComputeHashForData(dataFirst, sizeFirst, 0); + totalHash = ComputeHashForData(dataSecond, sizeSecond, totalHash); + return totalHash; + } + + template + uint32_t BaseString::CalculateDataConcatHashCode(const uint8_t* dataFirst, size_t sizeFirst, + const uint8_t* dataSecond, size_t sizeSecond); + template + uint32_t BaseString::CalculateDataConcatHashCode(const uint16_t* dataFirst, size_t sizeFirst, + const uint16_t* dataSecond, size_t sizeSecond); + template + uint32_t BaseString::CalculateDataConcatHashCode(const uint8_t* dataFirst, size_t sizeFirst, + const uint16_t* dataSecond, size_t sizeSecond); + template + uint32_t BaseString::CalculateDataConcatHashCode(const uint16_t* dataFirst, size_t sizeFirst, + const uint8_t* dataSecond, size_t sizeSecond); + + + bool BaseString::CanBeCompressed(const BaseString* string) + { + DCHECK_CC(string->IsLineString()); + if (string->IsUtf8()) { + return CanBeCompressed(string->GetDataUtf8(), string->GetLength()); + } + return CanBeCompressed(string->GetDataUtf16(), string->GetLength()); + } + + // static + bool BaseString::CanBeCompressed(const uint8_t* utf8Data, uint32_t utf8Len) + { + uint32_t index = 0; + for (; index + 4 <= utf8Len; index += 4) { + // 4: process the data in chunks of 4 elements to improve speed + // Check if all four characters in the current block are ASCII characters + if (!IsASCIICharacter(utf8Data[index]) || + !IsASCIICharacter(utf8Data[index + 1]) || // 1: the second element of the block + !IsASCIICharacter(utf8Data[index + 2]) || // 2: the third element of the block + !IsASCIICharacter(utf8Data[index + 3])) { + // 3: the fourth element of the block + return false; + } + } + // Check remaining characters if they are ASCII + for (; index < utf8Len; ++index) { + if (!IsASCIICharacter(utf8Data[index])) { + return false; + } + } + return true; + } + + /* static */ + bool BaseString::CanBeCompressed(const uint16_t* utf16Data, uint32_t utf16Len) + { + uint32_t index = 0; + for (; index + 4 <= utf16Len; index += 4) { + // 4: process the data in chunks of 4 elements to improve speed + // Check if all four characters in the current block are ASCII characters + if (!IsASCIICharacter(utf16Data[index]) || + !IsASCIICharacter(utf16Data[index + 1]) || // 1: the second element of the block + !IsASCIICharacter(utf16Data[index + 2]) || // 2: the third element of the block + !IsASCIICharacter(utf16Data[index + 3])) { + // 3: the fourth element of the block + return false; + } + } + // Check remaining characters if they are ASCII + for (; index < utf16Len; ++index) { + if (!IsASCIICharacter(utf16Data[index])) { + return false; + } + } + return true; + } + + + bool BaseString::IsASCIICharacter(uint16_t data) + { + if (data == 0) { return false; } } diff --git a/common_components/objects/base_string_table.cpp b/common_components/objects/base_string_table.cpp index 72c9f33d7acc685a0e3a6f876722caef48f05f7b..fd878b49f6200c08943303f993eb7c8bb0984342 100644 --- a/common_components/objects/base_string_table.cpp +++ b/common_components/objects/base_string_table.cpp @@ -32,13 +32,9 @@ template BaseString* BaseStringTableInternal::AllocateLineStringObject(size_t size) { size = AlignUp(size, ALIGN_OBJECT); - BaseString* str; - if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) { - str = reinterpret_cast(HeapAllocator::AllocateInHuge(size, LanguageType::DYNAMIC)); - } else { - str = reinterpret_cast(HeapAllocator::AllocateInOld(size, LanguageType::DYNAMIC)); - } - BaseClass* cls = BaseRuntime::GetInstance()->GetBaseClassRoots().GetBaseClass(ObjectType::LINE_STRING); + BaseString* str = + reinterpret_cast(HeapAllocator::AllocateInOldOrHuge(size, LanguageType::DYNAMIC)); + BaseClass* cls = BaseRuntime::GetInstance()->GetBaseClassRoots().GetBaseClass(CommonType::LINE_STRING); str->SetFullBaseClassWithoutBarrier(cls); return str; } @@ -97,8 +93,8 @@ BaseString* BaseStringTableInternal::GetOrInternStringFromCompr BaseString* str = LineString::CreateFromUtf8CompressedSubString( std::move(allocator), string, offset, utf8Len); str->SetRawHashcode(hashcode); - DCHECK_CC(!str->IsInternString()); - DCHECK_CC(!str->IsTreeString()); + ASSERT(!str->IsInternString()); + ASSERT(str->NotTreeString()); // Strings in string table should not be in the young space. ReadOnlyHandle strHandle = handleCreator(holder, str); return strHandle; @@ -131,10 +127,10 @@ BaseString* BaseStringTableInternal::GetOrInternString(ThreadHo BaseString* result = stringTable_.template LoadOrStore( holder, hashcode, [holder, hashcode, utf8Data, utf8Len, canBeCompress, handleCreator, allocator]() { - BaseString* value = LineString::CreateFromUtf8(std::move(allocator), utf8Data, utf8Len, canBeCompress); + BaseString* value = BaseString::CreateFromUtf8(std::move(allocator), utf8Data, utf8Len, canBeCompress); value->SetRawHashcode(hashcode); - DCHECK_CC(!value->IsInternString()); - DCHECK_CC(!value->IsTreeString()); + ASSERT(!value->IsInternString()); + ASSERT(value->NotTreeString()); ReadOnlyHandle stringHandle = handleCreator(holder, value); return stringHandle; }, @@ -168,8 +164,8 @@ BaseString* BaseStringTableInternal::GetOrInternString( BaseString* value = LineString::CreateFromUtf16(std::move(allocator), utf16Data, utf16Len, canBeCompress); value->SetRawHashcode(hashcode); - DCHECK_CC(!value->IsInternString()); - DCHECK_CC(!value->IsTreeString()); + ASSERT(!value->IsInternString()); + ASSERT(value->NotTreeString()); // Strings in string table should not be in the young space. ReadOnlyHandle stringHandle = handleCreator(holder, value); return stringHandle; @@ -200,12 +196,17 @@ BaseString* BaseStringTableInternal::TryGetInternString(const R template template > -void BaseStringTableInternal::SweepWeakRef(const WeakRefFieldVisitor& visitor, uint32_t index, +void BaseStringTableInternal::SweepWeakRef(const WeakRefFieldVisitor& visitor, uint32_t rootID, std::vector& waitDeleteEntries) { - DCHECK_CC(index >= 0 && index < TrieMapConfig::INDIRECT_SIZE); - auto* rootNode = stringTable_.root_.load(std::memory_order_relaxed); - stringTable_.ClearNodeFromGC(rootNode, index, visitor, waitDeleteEntries); + ASSERT(rootID >= 0 && rootID < TrieMapConfig::ROOT_SIZE); + auto rootNode = stringTable_.root_[rootID].load(std::memory_order_relaxed); + if (rootNode == nullptr) { + return; + } + for (uint32_t index = 0; index < TrieMapConfig::INDIRECT_SIZE; ++index) { + stringTable_.ClearNodeFromGC(rootNode, index, visitor, waitDeleteEntries); + } } template @@ -215,26 +216,22 @@ void BaseStringTableInternal::CleanUp() stringTable_.CleanUp(); } -template -template > -void BaseStringTableInternal::SweepWeakRef(const WeakRefFieldVisitor& visitor, uint32_t index) -{ - DCHECK_CC(index >= 0 && index < TrieMapConfig::INDIRECT_SIZE); - auto* rootNode = stringTable_.root_.load(std::memory_order_relaxed); - stringTable_.ClearNodeFromGC(rootNode, index, visitor); -} - template template > void BaseStringTableInternal::SweepWeakRef(const WeakRefFieldVisitor& visitor) { // No need lock here, only shared gc will sweep string table, meanwhile other threads are suspended. - for (uint32_t index = 0; index < TrieMapConfig::INDIRECT_SIZE; ++index) { - SweepWeakRef(visitor, index); + for (uint32_t rootID = 0; rootID < TrieMapConfig::ROOT_SIZE; ++rootID) { + auto rootNode = stringTable_.root_[rootID].load(std::memory_order_relaxed); + if (rootNode == nullptr) { + continue; + } + for (uint32_t index = 0; index < TrieMapConfig::INDIRECT_SIZE; ++index) { + stringTable_.ClearNodeFromGC(rootNode, index, visitor); + } } } -template void BaseStringTableInternal::SweepWeakRef(const WeakRefFieldVisitor& visitor, uint32_t index); template void BaseStringTableInternal::SweepWeakRef(const WeakRefFieldVisitor& visitor); BaseString* BaseStringTableImpl::GetOrInternFlattenString(ThreadHolder* holder, const HandleCreator& handleCreator, @@ -271,7 +268,7 @@ void BaseStringTableCleaner::StartSweepWeakRefTask() { // No need lock here, only the daemon thread will reset the state. sweepWeakRefFinished_ = false; - PendingTaskCount_.store(TrieMapConfig::INDIRECT_SIZE, std::memory_order_relaxed); + PendingTaskCount_.store(TrieMapConfig::ROOT_SIZE, std::memory_order_relaxed); } void BaseStringTableCleaner::WaitSweepWeakRefTask() @@ -328,7 +325,7 @@ void BaseStringTableCleaner::ProcessSweepWeakRef( const WeakRefFieldVisitor &visitor) { uint32_t index = 0U; - while ((index = GetNextIndexId(iter)) < TrieMapConfig::INDIRECT_SIZE) { + while ((index = GetNextIndexId(iter)) < TrieMapConfig::ROOT_SIZE) { cleaner->waitFreeEntries_[index].clear(); cleaner->stringTable_->SweepWeakRef(visitor, index, cleaner->waitFreeEntries_[index]); if (ReduceCountAndCheckFinish(cleaner)) { diff --git a/common_components/objects/string_table/hashtriemap-inl.h b/common_components/objects/string_table/hashtriemap-inl.h index e771c3973f611cf6fb1b12ee0eb2dd2355b0f426..315f0af5829bc3e257b487ec58010c033c393aac 100644 --- a/common_components/objects/string_table/hashtriemap-inl.h +++ b/common_components/objects/string_table/hashtriemap-inl.h @@ -29,9 +29,8 @@ namespace common { template template typename HashTrieMap::Node* HashTrieMap::Expand( - Entry* oldEntry, Entry* newEntry, uint32_t newHash, uint32_t hashShift, Indirect* parent) + Entry* oldEntry, Entry* newEntry, uint32_t oldHash, uint32_t newHash, uint32_t hashShift, Indirect* parent) { - uint32_t oldHash = oldEntry->Key(); // Check for hash conflicts. if (oldHash == newHash) { // Store the old entry in the overflow list of the new entry, and then store @@ -41,7 +40,7 @@ typename HashTrieMap::Node* HashTrieMap::Node* HashTrieMap> hashShift) & TrieMapConfig::N_CHILDREN_MASK; uint32_t newIdx = (newHash >> hashShift) & TrieMapConfig::N_CHILDREN_MASK; if (oldIdx != newIdx) { - newIndirect->children_[oldIdx].store(oldEntry, std::memory_order_release); - newIndirect->children_[newIdx].store(newEntry, std::memory_order_release); + newIndirect->GetChild(oldIdx).store(oldEntry, std::memory_order_release); + newIndirect->GetChild(newIdx).store(newEntry, std::memory_order_release); break; } - Indirect* nextIndirect = new Indirect(newIndirect); + Indirect* nextIndirect = new Indirect(); - newIndirect->children_[oldIdx].store(nextIndirect, std::memory_order_release); + newIndirect->GetChild(oldIdx).store(nextIndirect, std::memory_order_release); newIndirect = nextIndirect; } return top; @@ -79,13 +78,13 @@ BaseString* HashTrieMap::Load(ReadBarrier&& re BaseString* value) { uint32_t hash = key; - Indirect* current = root_.load(std::memory_order_relaxed); + Indirect* current = GetRootAndProcessHash(hash); for (uint32_t hashShift = 0; hashShift < TrieMapConfig::TOTAL_HASH_BITS; hashShift += TrieMapConfig::N_CHILDREN_LOG2) { size_t index = (hash >> hashShift) & TrieMapConfig::N_CHILDREN_MASK; - std::atomic* slot = ¤t->children_[index]; + std::atomic* slot = ¤t->GetChild(index); Node* node = slot->load(std::memory_order_acquire); if (node == nullptr) { return nullptr; @@ -96,9 +95,6 @@ BaseString* HashTrieMap::Load(ReadBarrier&& re } for (Entry* currentEntry = node->AsEntry(); currentEntry != nullptr; currentEntry = currentEntry->Overflow().load(std::memory_order_acquire)) { - if (currentEntry->Key() != key) { - continue; - } auto oldValue = currentEntry->Value(); bool valuesEqual = false; if (!IsNull(oldValue) && BaseString::StringsAreEqual(std::forward(readBarrier), oldValue, @@ -137,13 +133,13 @@ BaseString* HashTrieMap::LoadOrStore(ThreadHol [[maybe_unused]] bool haveInsertPoint = false; ReadOnlyHandle str; bool isStrCreated = false; // Flag to track whether an object has been created - Indirect* current = root_.load(std::memory_order_acquire); + Indirect* current = GetRootAndProcessHash(hash); while (true) { haveInsertPoint = false; // find the key or insert the candidate position. for (; hashShift < TrieMapConfig::TOTAL_HASH_BITS; hashShift += TrieMapConfig::N_CHILDREN_LOG2) { size_t index = (hash >> hashShift) & TrieMapConfig::N_CHILDREN_MASK; - slot = ¤t->children_[index]; + slot = ¤t->GetChild(index); node = slot->load(std::memory_order_acquire); if (node == nullptr) { haveInsertPoint = true; @@ -157,14 +153,14 @@ BaseString* HashTrieMap::LoadOrStore(ThreadHol } for (Entry* currentEntry = node->AsEntry(); currentEntry != nullptr; currentEntry = currentEntry->Overflow().load(std::memory_order_acquire)) { - if (currentEntry->Key() != key) { - continue; - } auto oldValue = currentEntry->Value(); if (IsNull(oldValue)) { continue; } if (std::invoke(std::forward(equalsCallback), oldValue)) { +#if ECMASCRIPT_ENABLE_TRACE_STRING_TABLE + TraceFindSuccessDepth(hashShift); +#endif return oldValue; } } @@ -200,18 +196,23 @@ BaseString* HashTrieMap::LoadOrStore(ThreadHol hashShift += TrieMapConfig::N_CHILDREN_LOG2; } +#if ECMASCRIPT_ENABLE_TRACE_STRING_TABLE + TraceFindFail(); +#endif Entry* oldEntry = nullptr; + uint32_t oldHash = key; if (node != nullptr) { oldEntry = node->AsEntry(); for (Entry* currentEntry = oldEntry; currentEntry; currentEntry = currentEntry->Overflow().load(std::memory_order_acquire)) { - if (currentEntry->Key() != key) { - continue; - } auto oldValue = currentEntry->Value(); if (IsNull(oldValue)) { continue; } + if (currentEntry->Key() != key) { + oldHash = currentEntry->Key(); + continue; + } if (std::invoke(std::forward(equalsCallback), oldValue)) { if constexpr (IsLock) { GetMutex().Unlock(); @@ -222,9 +223,10 @@ BaseString* HashTrieMap::LoadOrStore(ThreadHol } BaseString* value = *str; + ASSERT(value != nullptr); value->SetIsInternString(); IntegerCache::InitIntegerCache(value); - Entry* newEntry = new Entry(key, value); + Entry* newEntry = new Entry(value); oldEntry = PruneHead(oldEntry); if (oldEntry == nullptr) { // The simple case: Create a new entry and store it. @@ -232,7 +234,8 @@ BaseString* HashTrieMap::LoadOrStore(ThreadHol } else { // Expand an existing entry to one or more new nodes. // Release the node, which will make both oldEntry and newEntry visible - auto expandedNode = Expand(oldEntry, newEntry, hash, hashShift, current); + auto expandedNode = Expand(oldEntry, newEntry, + oldHash >> TrieMapConfig::ROOT_BIT, hash, hashShift, current); slot->store(expandedNode, std::memory_order_release); } if constexpr (IsLock) { @@ -258,13 +261,13 @@ BaseString* HashTrieMap::LoadOrStoreForJit(Thr Node* node = nullptr; [[maybe_unused]] bool haveInsertPoint = false; BaseString* value = nullptr; - Indirect* current = root_.load(std::memory_order_acquire); + Indirect* current = GetRootAndProcessHash(hash); while (true) { haveInsertPoint = false; // find the key or insert the candidate position. for (; hashShift < TrieMapConfig::TOTAL_HASH_BITS; hashShift += TrieMapConfig::N_CHILDREN_LOG2) { size_t index = (hash >> hashShift) & TrieMapConfig::N_CHILDREN_MASK; - slot = ¤t->children_[index]; + slot = ¤t->GetChild(index); node = slot->load(std::memory_order_acquire); if (node == nullptr) { haveInsertPoint = true; @@ -278,9 +281,6 @@ BaseString* HashTrieMap::LoadOrStoreForJit(Thr } for (Entry* currentEntry = node->AsEntry(); currentEntry != nullptr; currentEntry = currentEntry->Overflow().load(std::memory_order_acquire)) { - if (currentEntry->Key() != key) { - continue; - } auto oldValue = currentEntry->Value(); if (IsNull(oldValue)) { continue; @@ -315,17 +315,19 @@ BaseString* HashTrieMap::LoadOrStoreForJit(Thr } Entry* oldEntry = nullptr; + uint32_t oldHash = key; if (node != nullptr) { oldEntry = node->AsEntry(); for (Entry* currentEntry = oldEntry; currentEntry; currentEntry = currentEntry->Overflow().load(std::memory_order_acquire)) { - if (currentEntry->Key() != key) { - continue; - } auto oldValue = currentEntry->Value(); if (IsNull(oldValue)) { continue; } + if (currentEntry->Key() != key) { + oldHash = currentEntry->Key(); + continue; + } if (std::invoke(std::forward(equalsCallback), oldValue)) { GetMutex().Unlock(); return oldValue; @@ -333,9 +335,10 @@ BaseString* HashTrieMap::LoadOrStoreForJit(Thr } } + ASSERT(value != nullptr); value->SetIsInternString(); IntegerCache::InitIntegerCache(value); - Entry* newEntry = new Entry(key, value); + Entry* newEntry = new Entry(value); oldEntry = PruneHead(oldEntry); if (oldEntry == nullptr) { // The simple case: Create a new entry and store it. @@ -343,7 +346,8 @@ BaseString* HashTrieMap::LoadOrStoreForJit(Thr } else { // Expand an existing entry to one or more new nodes. // Release the node, which will make both oldEntry and newEntry visible - auto expandedNode = Expand(oldEntry, newEntry, hash, hashShift, current); + auto expandedNode = Expand(oldEntry, newEntry, + oldHash >> TrieMapConfig::ROOT_BIT, hash, hashShift, current); slot->store(expandedNode, std::memory_order_release); } GetMutex().Unlock(); @@ -361,6 +365,7 @@ BaseString* HashTrieMap::StoreOrLoad(ThreadHol { HashTrieMapInUseScope mapInUse(this); uint32_t hash = key; + ProcessHash(hash); uint32_t hashShift = loadResult.hashShift; std::atomic* slot = loadResult.slot; Node* node = nullptr; @@ -379,7 +384,7 @@ BaseString* HashTrieMap::StoreOrLoad(ThreadHol // find the key or insert the candidate position. for (; hashShift < TrieMapConfig::TOTAL_HASH_BITS; hashShift += TrieMapConfig::N_CHILDREN_LOG2) { size_t index = (hash >> hashShift) & TrieMapConfig::N_CHILDREN_MASK; - slot = ¤t->children_[index]; + slot = ¤t->GetChild(index); node = slot->load(std::memory_order_acquire); if (node == nullptr) { haveInsertPoint = true; @@ -389,12 +394,10 @@ BaseString* HashTrieMap::StoreOrLoad(ThreadHol if (node->IsEntry()) { for (Entry* currentEntry = node->AsEntry(); currentEntry != nullptr; currentEntry = currentEntry->Overflow().load(std::memory_order_acquire)) { - if (currentEntry->Key() == key) { - auto oldValue = currentEntry->Value(); - if (!IsNull(oldValue) && std::invoke(std::forward(equalsCallback), - oldValue)) { - return oldValue; - } + auto oldValue = currentEntry->Value(); + if (!IsNull(oldValue) && std::invoke(std::forward(equalsCallback), + oldValue)) { + return oldValue; } } haveInsertPoint = true; @@ -422,17 +425,19 @@ BaseString* HashTrieMap::StoreOrLoad(ThreadHol } } Entry* oldEntry = nullptr; + uint32_t oldHash = key; if (node != nullptr) { oldEntry = node->AsEntry(); for (Entry* currentEntry = oldEntry; currentEntry != nullptr; currentEntry = currentEntry->Overflow().load(std::memory_order_acquire)) { - if (currentEntry->Key() != key) { - continue; - } auto oldValue = currentEntry->Value(); if (IsNull(oldValue)) { continue; } + if (currentEntry->Key() != key) { + oldHash = currentEntry->Key(); + continue; + } if (std::invoke(std::forward(equalsCallback), oldValue)) { GetMutex().Unlock(); return oldValue; @@ -441,9 +446,10 @@ BaseString* HashTrieMap::StoreOrLoad(ThreadHol } BaseString* value = *str; + ASSERT(value != nullptr); value->SetIsInternString(); IntegerCache::InitIntegerCache(value); - Entry* newEntry = new Entry(key, value); + Entry* newEntry = new Entry(value); oldEntry = PruneHead(oldEntry); if (oldEntry == nullptr) { // The simple case: Create a new entry and store it. @@ -451,7 +457,8 @@ BaseString* HashTrieMap::StoreOrLoad(ThreadHol } else { // Expand an existing entry to one or more new nodes. // Release the node, which will make both oldEntry and newEntry visible - auto expandedNode = Expand(oldEntry, newEntry, hash, hashShift, current); + auto expandedNode = Expand(oldEntry, newEntry, + oldHash >> TrieMapConfig::ROOT_BIT, hash, hashShift, current); slot->store(expandedNode, std::memory_order_release); } @@ -466,12 +473,12 @@ HashTrieMapLoadResult HashTrieMap::Load(ReadBa const uint32_t key, BaseString* value) { uint32_t hash = key; - Indirect* current = root_.load(std::memory_order_relaxed); + Indirect* current = GetRootAndProcessHash(hash); for (uint32_t hashShift = 0; hashShift < TrieMapConfig::TOTAL_HASH_BITS; hashShift += TrieMapConfig::N_CHILDREN_LOG2) { size_t index = (hash >> hashShift) & TrieMapConfig::N_CHILDREN_MASK; - std::atomic* slot = ¤t->children_[index]; + std::atomic* slot = ¤t->GetChild(index); Node* node = slot->load(std::memory_order_acquire); if (node == nullptr) { return {nullptr, current, hashShift, slot}; @@ -479,9 +486,6 @@ HashTrieMapLoadResult HashTrieMap::Load(ReadBa if (node->IsEntry()) { for (Entry* currentEntry = node->AsEntry(); currentEntry != nullptr; currentEntry = currentEntry->Overflow().load(std::memory_order_acquire)) { - if (currentEntry->Key() != key) { - continue; - } auto oldValue = currentEntry->Value(); if (IsNull(oldValue)) { continue; @@ -507,13 +511,14 @@ HashTrieMapLoadResult HashTrieMap::Load(ReadBa uint32_t offset, uint32_t utf8Len) { uint32_t hash = key; - Indirect* current = root_.load(std::memory_order_relaxed); - const uint8_t* utf8Data = ReadOnlyHandle::Cast(string)->GetDataUtf8() + offset; + Indirect* current = GetRootAndProcessHash(hash); + // UDAV + const uint8_t* utf8Data = nullptr; //string->GetDataUtf8() + offset; for (uint32_t hashShift = 0; hashShift < TrieMapConfig::TOTAL_HASH_BITS; hashShift += TrieMapConfig::N_CHILDREN_LOG2) { size_t index = (hash >> hashShift) & TrieMapConfig::N_CHILDREN_MASK; - std::atomic* slot = ¤t->children_[index]; + std::atomic* slot = ¤t->GetChild(index); Node* node = slot->load(std::memory_order_acquire); if (node == nullptr) { return {nullptr, current, hashShift, slot}; @@ -524,9 +529,6 @@ HashTrieMapLoadResult HashTrieMap::Load(ReadBa } for (Entry* currentEntry = node->AsEntry(); currentEntry != nullptr; currentEntry = currentEntry->Overflow().load(std::memory_order_acquire)) { - if (currentEntry->Key() != key) { - continue; - } auto oldValue = currentEntry->Value(); if (IsNull(oldValue)) { continue; @@ -554,6 +556,7 @@ BaseString* HashTrieMap::StoreOrLoad(ThreadHol { HashTrieMapInUseScope mapInUse(this); uint32_t hash = key; + ProcessHash(hash); uint32_t hashShift = loadResult.hashShift; std::atomic* slot = loadResult.slot; Node* node = nullptr; @@ -573,7 +576,7 @@ BaseString* HashTrieMap::StoreOrLoad(ThreadHol haveInsertPoint = false; for (; hashShift < TrieMapConfig::TOTAL_HASH_BITS; hashShift += TrieMapConfig::N_CHILDREN_LOG2) { size_t index = (hash >> hashShift) & TrieMapConfig::N_CHILDREN_MASK; - slot = ¤t->children_[index]; + slot = ¤t->GetChild(index); node = slot->load(std::memory_order_acquire); if (node == nullptr) { haveInsertPoint = true; @@ -587,9 +590,6 @@ BaseString* HashTrieMap::StoreOrLoad(ThreadHol } for (Entry* currentEntry = node->AsEntry(); currentEntry != nullptr; currentEntry = currentEntry->Overflow().load(std::memory_order_acquire)) { - if (currentEntry->Key() != key) { - continue; - } BaseString* oldValue = currentEntry->Value(); if (IsNull(oldValue)) { continue; @@ -625,17 +625,19 @@ BaseString* HashTrieMap::StoreOrLoad(ThreadHol } Entry* oldEntry = nullptr; + uint32_t oldHash = key; if (node != nullptr) { oldEntry = node->AsEntry(); for (Entry* currentEntry = oldEntry; currentEntry != nullptr; currentEntry = currentEntry->Overflow().load(std::memory_order_acquire)) { - if (currentEntry->Key() != key) { - continue; - } BaseString* oldValue = currentEntry->Value(); if (IsNull(oldValue)) { continue; } + if (currentEntry->Key() != key) { + oldHash = currentEntry->Key(); + continue; + } if (BaseString::StringsAreEqual(std::forward(readBarrier), oldValue, *str)) { GetMutex().Unlock(); return oldValue; @@ -644,9 +646,10 @@ BaseString* HashTrieMap::StoreOrLoad(ThreadHol } BaseString* value = *str; + ASSERT(value != nullptr); value->SetIsInternString(); IntegerCache::InitIntegerCache(value); - Entry* newEntry = new Entry(key, value); + Entry* newEntry = new Entry(value); oldEntry = PruneHead(oldEntry); if (oldEntry == nullptr) { // The simple case: Create a new entry and store it. @@ -654,7 +657,8 @@ BaseString* HashTrieMap::StoreOrLoad(ThreadHol } else { // Expand an existing entry to one or more new nodes. // Release the node, which will make both oldEntry and newEntry visible - auto expandedNode = Expand(oldEntry, newEntry, hash, hashShift, current); + auto expandedNode = Expand(oldEntry, newEntry, + oldHash >> TrieMapConfig::ROOT_BIT, hash, hashShift, current); slot->store(expandedNode, std::memory_order_release); } GetMutex().Unlock(); @@ -702,7 +706,8 @@ void HashTrieMap::Iter(ReadBarrier&& readBarri if (node == nullptr) return; - for (std::atomic& child : node->children_) { + for (std::atomic& temp : node->children_) { + auto &child = reinterpret_cast&>(temp); Node* childNode = child.load(std::memory_order_relaxed); if (childNode == nullptr) continue; @@ -751,7 +756,7 @@ bool HashTrieMap::ClearNodeFromGC(Indirect* pa std::vector& waitDeleteEntries) { // load sub-nodes - Node* child = parent->children_[index].load(std::memory_order_relaxed); + Node* child = parent->GetChild(index).load(std::memory_order_relaxed); if (child == nullptr) return true; @@ -786,7 +791,7 @@ bool HashTrieMap::ClearNodeFromGC(Indirect* pa const WeakRefFieldVisitor& visitor) { // load sub-nodes - Node* child = parent->children_[index].load(std::memory_order_relaxed); + Node* child = parent->GetChild(index).load(std::memory_order_relaxed); if (child == nullptr) { return true; } @@ -815,12 +820,12 @@ bool HashTrieMap::ClearNodeFromGC(Indirect* pa if (e == nullptr) { // Delete the empty Entry node and update the parent reference delete entry; - parent->children_[index].store(nullptr, std::memory_order_relaxed); + parent->GetChild(index).store(nullptr, std::memory_order_relaxed); return true; } // Delete the Entry node and update the parent reference delete entry; - parent->children_[index].store(e, std::memory_order_relaxed); + parent->GetChild(index).store(e, std::memory_order_relaxed); } return false; } else { @@ -836,7 +841,7 @@ bool HashTrieMap::ClearNodeFromGC(Indirect* pa if (cleanCount == TrieMapConfig::INDIRECT_SIZE) { // Remove the empty Indirect and update the parent reference delete indirect; - parent->children_[index].store(nullptr, std::memory_order_relaxed); + parent->GetChild(index).store(nullptr, std::memory_order_relaxed); return true; } return false; @@ -849,7 +854,7 @@ bool HashTrieMap::ClearNodeFromGC(Indirect* pa const WeakRootVisitor& visitor) { // load sub-nodes - Node* child = parent->children_[index].load(std::memory_order_relaxed); + Node* child = parent->GetChild(index).load(std::memory_order_relaxed); if (child == nullptr) return true; @@ -877,12 +882,12 @@ bool HashTrieMap::ClearNodeFromGC(Indirect* pa if (e == nullptr) { // Delete the empty Entry node and update the parent reference delete entry; - parent->children_[index].store(nullptr, std::memory_order_relaxed); + parent->GetChild(index).store(nullptr, std::memory_order_relaxed); return true; } // Delete the Entry node and update the parent reference delete entry; - parent->children_[index].store(e, std::memory_order_relaxed); + parent->GetChild(index).store(e, std::memory_order_relaxed); } return false; } else { @@ -898,7 +903,7 @@ bool HashTrieMap::ClearNodeFromGC(Indirect* pa if (cleanCount == TrieMapConfig::INDIRECT_SIZE && inuseCount_ == 0) { // Remove the empty Indirect and update the parent reference delete indirect; - parent->children_[index].store(nullptr, std::memory_order_relaxed); + parent->GetChild(index).store(nullptr, std::memory_order_relaxed); return true; } return false; diff --git a/common_components/objects/string_table/hashtriemap.h b/common_components/objects/string_table/hashtriemap.h index f79c452bcc54945168703af2403ee5167ae83c8b..75d7712637cb22b923284f5d4e1b6c7b63b48ce4 100644 --- a/common_components/objects/string_table/hashtriemap.h +++ b/common_components/objects/string_table/hashtriemap.h @@ -30,8 +30,12 @@ class TaggedObject; namespace common { class TrieMapConfig { public: + static constexpr uint32_t ROOT_BIT = 11U; + static constexpr uint32_t ROOT_SIZE = (1 << ROOT_BIT); + static constexpr uint32_t ROOT_BIT_MASK = ROOT_SIZE - 1U; + static constexpr uint32_t N_CHILDREN_LOG2 = 3U; - static constexpr uint32_t TOTAL_HASH_BITS = 32U; + static constexpr uint32_t TOTAL_HASH_BITS = 32U - ROOT_BIT; static constexpr uint32_t N_CHILDREN = 1 << N_CHILDREN_LOG2; static constexpr uint32_t N_CHILDREN_MASK = N_CHILDREN - 1U; @@ -39,6 +43,8 @@ public: static constexpr uint32_t INDIRECT_SIZE = 8U; // 8: 2^3 static constexpr uint32_t INDIRECT_MASK = INDIRECT_SIZE - 1U; + static constexpr uint64_t HIGH_8_BIT_MASK = 0xFFULL << 56; // used to detect 56-63 Bits + enum SlotBarrier { NeedSlotBarrier, NoSlotBarrier, @@ -50,42 +56,64 @@ class HashTrieMapIndirect; class HashTrieMapNode { public: - explicit HashTrieMapNode(bool isEntry) : isEntry_(isEntry) {} + // Do not use 57-64bits, HWAsan uses 57-64 bits as pointer tag + static constexpr uint64_t POINTER_LENGTH = 48; + static constexpr uint64_t ENTRY_TAG_MASK = 1ULL << POINTER_LENGTH; + + using Pointer = BitField; + using EntryBit = Pointer::NextFlag; + + explicit HashTrieMapNode() {} bool IsEntry() const { - return isEntry_; + uint64_t bitField = *reinterpret_cast(this); + return EntryBit::Decode(bitField); } HashTrieMapEntry* AsEntry(); HashTrieMapIndirect* AsIndirect(); - -private: - const bool isEntry_; }; class HashTrieMapEntry final : public HashTrieMapNode { public: - HashTrieMapEntry(uint32_t k, BaseString* v) : HashTrieMapNode(true), key_(k), value_(v), overflow_(nullptr) {} + HashTrieMapEntry(BaseString* v) : overflow_(nullptr) + { + // Note: CMC GC assumes string is always a non-young object and tries to optimize it out in young GC + ASSERT_LOGF(Heap::GetHeap().IsHeapAddress(v) + ? Heap::GetHeap().InRecentSpace(v) == false + : true, + "Violate CMC-GC assumption: should not be young object"); + + bitField_ = (ENTRY_TAG_MASK | reinterpret_cast(v)); + } + template uint32_t Key() const { - return key_; + return Value()->GetRawHashcode(); } template BaseString* Value() const { + uint64_t value = Pointer::Decode(bitField_); if constexpr (SlotBarrier == TrieMapConfig::NoSlotBarrier) { - return value_; + return reinterpret_cast(static_cast(value)); } return reinterpret_cast(Heap::GetBarrier().ReadStringTableStaticRef( - *reinterpret_cast*>((void*)(&value_)))); + *reinterpret_cast*>((void*)(&value)))); } void SetValue(BaseString* v) { - value_ = v; + // Note: CMC GC assumes string is always a non-young object and tries to optimize it out in young GC + ASSERT_LOGF(Heap::GetHeap().IsHeapAddress(v) + ? Heap::GetHeap().InRecentSpace(v) == false + : true, + "Violate CMC-GC assumption: should not be young object"); + + bitField_ = ENTRY_TAG_MASK | reinterpret_cast(v); } std::atomic& Overflow() @@ -93,22 +121,26 @@ public: return overflow_; } + uint64_t GetBitField() const + { + return bitField_; + } + private: - uint32_t key_; - BaseString* value_; + uint64_t bitField_; std::atomic overflow_; }; class HashTrieMapIndirect final : public HashTrieMapNode { public: - std::array, TrieMapConfig::INDIRECT_SIZE> children_{}; - HashTrieMapIndirect* parent_; + std::array, TrieMapConfig::INDIRECT_SIZE> children_{}; - explicit HashTrieMapIndirect(HashTrieMapIndirect* p = nullptr) : HashTrieMapNode(false), parent_(p) {}; + explicit HashTrieMapIndirect() {} ~HashTrieMapIndirect() { - for (std::atomic& child : children_) { + for (std::atomic& temp : children_) { + auto &child = reinterpret_cast&>(temp); HashTrieMapNode* node = child.exchange(nullptr, std::memory_order_relaxed); if (node == nullptr) { continue; @@ -132,6 +164,11 @@ public: delete e; } } + + std::atomic& GetChild(size_t index) + { + return reinterpret_cast&>(children_[index]); + } }; struct HashTrieMapLoadResult { @@ -143,13 +180,15 @@ struct HashTrieMapLoadResult { inline HashTrieMapEntry* HashTrieMapNode::AsEntry() { - DCHECK_CC(isEntry_ && "HashTrieMap: called entry on non-entry node"); + // UDAV + //ASSERT(IsEntry() && "HashTrieMap: called entry on non-entry node"); return static_cast(this); } inline HashTrieMapIndirect* HashTrieMapNode::AsIndirect() { - DCHECK_CC(!isEntry_ && "HashTrieMap: called indirect on entry node"); + // UDAV + //ASSERT(!IsEntry() && "HashTrieMap: called indirect on entry node"); return static_cast(this); } @@ -162,16 +201,87 @@ public: using Indirect = HashTrieMapIndirect; using Entry = HashTrieMapEntry; using LoadResult = HashTrieMapLoadResult; - HashTrieMap() - { - root_.store(new Indirect(nullptr), std::memory_order_relaxed); - } + HashTrieMap() {} ~HashTrieMap() { Clear(); }; + +#if ECMASCRIPT_ENABLE_TRACE_STRING_TABLE + class StringTableTracer { + public: + static constexpr uint32_t DUMP_THRESHOLD = 40000; + static StringTableTracer& GetInstance() + { + static StringTableTracer tracer; + return tracer; + } + + NO_COPY_SEMANTIC_CC(StringTableTracer); + NO_MOVE_SEMANTIC_CC(StringTableTracer); + + void TraceFindSuccess(uint32_t hashShift) + { + totalDepth_.fetch_add(hashShift / TrieMapConfig::N_CHILDREN_LOG2 + 1, std::memory_order_relaxed); + uint64_t currentSuccess = totalSuccessNum_.fetch_add(1, std::memory_order_relaxed) + 1; + if (currentSuccess >= lastDumpPoint_.load(std::memory_order_relaxed) + DUMP_THRESHOLD) { + DumpWithLock(currentSuccess); + } + } + + void TraceFindFail() + { + totalFailNum_.fetch_add(1, std::memory_order_relaxed); + } + + private: + StringTableTracer() = default; + + void DumpWithLock(uint64_t triggerPoint) + { + std::lock_guard lock(mu_); + + if (triggerPoint >= lastDumpPoint_.load(std::memory_order_relaxed) + DUMP_THRESHOLD) { + lastDumpPoint_ = triggerPoint; + DumpInfo(); + } + } + + void DumpInfo() const + { + uint64_t depth = totalDepth_.load(std::memory_order_relaxed); + uint64_t success = totalSuccessNum_.load(std::memory_order_relaxed); + uint64_t fail = totalFailNum_.load(std::memory_order_relaxed); + + double avgDepth = (static_cast(depth) / success); + + LOG_COMMON(INFO) << "------------------------------------------------------------" + << "---------------------------------------------------------"; + LOG_COMMON(INFO) << "StringTableTotalSuccessFindNum: " << success; + LOG_COMMON(INFO) << "StringTableTotalInsertNum: " << fail; + LOG_COMMON(INFO) << "StringTableAverageDepth: " << avgDepth; + LOG_COMMON(INFO) << "------------------------------------------------------------" + << "---------------------------------------------------------"; + } + + std::mutex mu_; + std::atomic totalDepth_{0}; + std::atomic totalSuccessNum_{0}; + std::atomic totalFailNum_{0}; + std::atomic lastDumpPoint_{0}; + }; + + void TraceFindSuccessDepth(uint32_t hashShift) + { + StringTableTracer::GetInstance().TraceFindSuccess(hashShift); + } + void TraceFindFail() + { + StringTableTracer::GetInstance().TraceFindFail(); + } +#endif template LoadResult Load(ReadBarrier&& readBarrier, const uint32_t key, BaseString* value); @@ -194,6 +304,32 @@ public: template BaseString* LoadOrStoreForJit(ThreadHolder* holder, const uint32_t key, LoaderCallback loaderCallback, EqualsCallback equalsCallback); + + static void ProcessHash(uint32_t &hash) + { + hash >>= TrieMapConfig::ROOT_BIT; + } + + Indirect* GetRootAndProcessHash(uint32_t &hash) + { + uint32_t rootID = (hash & TrieMapConfig::ROOT_BIT_MASK); + hash >>= TrieMapConfig::ROOT_BIT; + auto root = root_[rootID].load(std::memory_order_acquire); + if (root != nullptr) { + return root; + } else { + Indirect* expected = nullptr; + Indirect* newRoot = new Indirect(); + + if (root_[rootID].compare_exchange_strong(expected, newRoot, + std::memory_order_release, std::memory_order_acquire)) { + return newRoot; + } else { + delete newRoot; + return expected; + } + } + } // All other threads have stopped due to the gc and Clear phases. // Therefore, the operations related to atoms in ClearNodeFromGc and Clear use std::memory_order_relaxed, @@ -210,27 +346,34 @@ public: template = 0> bool ClearNodeFromGC(Indirect* parent, int index, const WeakRootVisitor& visitor); + // Iterator - template + template void Range(ReadBarrier&& readBarrier, bool& isValid) { - Iter(std::forward(readBarrier), root_.load(std::memory_order_relaxed), isValid); + for (uint32_t i = 0; i < TrieMapConfig::ROOT_SIZE; i++) { + Iter(std::forward(readBarrier), root_[i].load(std::memory_order_relaxed), isValid); + } } void Clear() { - // The atom replaces the root node with nullptr and obtains the old root node - Indirect* oldRoot = root_.exchange(nullptr, std::memory_order_relaxed); - if (oldRoot != nullptr) { - // Clear the entire HashTreeMap based on the Indirect destructor - delete oldRoot; + for (uint32_t i = 0; i < TrieMapConfig::ROOT_SIZE; i++) { + // The atom replaces the root node with nullptr and obtains the old root node + Indirect* oldRoot = root_[i].exchange(nullptr, std::memory_order_relaxed); + if (oldRoot != nullptr) { + // Clear the entire HashTreeMap based on the Indirect destructor + delete oldRoot; + } } } // ut used - const std::atomic& GetRoot() const + const std::atomic& GetRoot(uint32_t index) const { - return root_; + // UDAV + //ASSERT(index < TrieMapConfig::ROOT_SIZE); + return root_[index]; } void IncreaseInuseCount() @@ -271,16 +414,19 @@ public: isSweeping = false; GetMutex().Unlock(); } - std::atomic root_; + std::atomic root_[TrieMapConfig::ROOT_SIZE] = {}; private: Mutex mu_; std::vector waitFreeEntries_{}; std::atomic inuseCount_{0}; bool isSweeping{false}; template - Node* Expand(Entry* oldEntry, Entry* newEntry, uint32_t newHash, uint32_t hashShift, Indirect* parent); + Node* Expand(Entry* oldEntry, Entry* newEntry, + uint32_t oldHash, uint32_t newHash, uint32_t hashShift, Indirect* parent); + template void Iter(ReadBarrier&& readBarrier, Indirect* node, bool& isValid); + bool CheckWeakRef(const WeakRefFieldVisitor& visitor, Entry* entry); bool CheckWeakRef(const WeakRootVisitor& visitor, Entry* entry); diff --git a/common_components/objects/string_table_internal.h b/common_components/objects/string_table_internal.h index 6a61cd84a3f4b5647a35b07dcc77c7cc5a62e00c..67edfcde547822ceadf53708b6c82000947d92a4 100644 --- a/common_components/objects/string_table_internal.h +++ b/common_components/objects/string_table_internal.h @@ -35,7 +35,7 @@ public: return; } #ifndef NDEBUG - BaseRuntime::RequestGC(GcType::ASYNC); // Trigger CMC FULL GC + BaseRuntime::RequestGC(GC_REASON_USER, true, GC_TYPE_FULL); // Trigger CMC FULL GC #endif ThreadStateTransitionScope ts(holder); mtx_.Lock(); @@ -112,7 +112,7 @@ private: IteratorPtr iter_{}; BaseStringTableInternal* stringTable_; std::atomic PendingTaskCount_{0U}; - std::array, TrieMapConfig::INDIRECT_SIZE> waitFreeEntries_{}; + std::array, TrieMapConfig::ROOT_SIZE> waitFreeEntries_{}; Mutex sweepWeakRefMutex_{}; bool sweepWeakRefFinished_{true}; ConditionVariable sweepWeakRefCV_{}; @@ -164,7 +164,7 @@ public: } template = 0> - void SweepWeakRef(const WeakRefFieldVisitor& visitor, uint32_t index, + void SweepWeakRef(const WeakRefFieldVisitor& visitor, uint32_t rootID, std::vector& waitDeleteEntries); template = 0> @@ -173,8 +173,6 @@ public: template = 0> void SweepWeakRef(const WeakRefFieldVisitor& visitor); private: - template = 0> - void SweepWeakRef(const WeakRefFieldVisitor& visitor, uint32_t index); HashTrieMapType stringTable_{}; BaseStringTableCleaner* cleaner_ = nullptr; diff --git a/common_components/objects/tests/BUILD.gn b/common_components/objects/tests/BUILD.gn index e6d55ad7b48d80040124371fc808a3d69a2713c7..f6d6aef286200b053228b029438d395273870142 100755 --- a/common_components/objects/tests/BUILD.gn +++ b/common_components/objects/tests/BUILD.gn @@ -40,12 +40,64 @@ host_unittest_action("Base_String_Test") { ] } +host_unittest_action("Base_String_Table_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "base_string_table_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +host_unittest_action("Composite_Base_Class_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "composite_base_class_test.cpp", + "//arkcompiler/ets_runtime/common_components/objects/base_string.cpp", + "//arkcompiler/ets_runtime/common_components/base/utf_helper.cpp", + "base_string_table_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + + group("unittest") { testonly = true # deps file deps = [ ":Base_String_Test", + ":Base_String_Table_Test", + ":Composite_Base_Class_Test", ] } @@ -55,5 +107,7 @@ group("host_unittest") { # deps file deps = [ ":Base_String_TestAction", + ":Base_String_Table_TestAction", + ":Composite_Base_Class_TestAction", ] } diff --git a/common_components/objects/tests/base_string_table_test.cpp b/common_components/objects/tests/base_string_table_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..aa165055f1b83685b0faa961d924c2bb8705d48a --- /dev/null +++ b/common_components/objects/tests/base_string_table_test.cpp @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/tests/test_helper.h" +#include "common_interfaces/objects/base_string_table.h" +#include "common_components/objects/string_table_internal.h" +#include "common_interfaces/thread/mutator_base.h" +#include "common_interfaces/objects/base_string.h" +#include "common_interfaces/thread/thread_holder.h" +#include "common_interfaces/base_runtime.h" +#include "common_interfaces/heap/heap_allocator.h" +#include "common_interfaces/objects/string/base_string-inl2.h" + + +namespace common { + +struct DummyMutator : public MutatorBase { + explicit DummyMutator(LanguageType lang) : lang_(lang) {} + LanguageType lang_; +}; + +class BaseStringTableTest : public common::test::BaseTestWithScope { +protected: + using TableType = BaseStringTableInternal; + BaseRuntime* runtime_; + std::unique_ptr mutator_; + std::unique_ptr table_; + ThreadHolder* threadHolder_; + + void SetUp() override + { + mutator_ = std::make_unique(LanguageType::DYNAMIC); + threadHolder_ = new ThreadHolder(mutator_.get()); + + runtime_ = BaseRuntime::GetInstance(); + ASSERT_TRUE(runtime_ != nullptr); + + runtime_->Init(); + + table_ = std::make_unique(); + } + + void TearDown() override + { + table_.reset(); + + if (runtime_) { + runtime_->Fini(); + } + + mutator_.reset(); + threadHolder_ = nullptr; + } + + BaseString* CreateUtf8String(const char* utf8Data, uint32_t length, bool canBeCompress) + { + auto allocator = [](size_t size, CommonType type) -> BaseString* { + void* mem = reinterpret_cast(HeapAllocator::AllocateInOldOrHuge(size, LanguageType::DYNAMIC)); + if (mem == nullptr) { + return nullptr; + } + return reinterpret_cast(mem); + }; + + BaseString* str = BaseString::CreateFromUtf8(allocator, + reinterpret_cast(utf8Data), length, canBeCompress); + + if (str == nullptr) { + return nullptr; + } + return str; + } + + static ReadOnlyHandle MockHandleCreator(ThreadHolder* holder, BaseString* str) + { + uintptr_t handleValue = reinterpret_cast(str); + return ReadOnlyHandle(handleValue); + } +}; + +HWTEST_F_L0(BaseStringTableTest, SweepWeakRef) +{ + WeakRefFieldVisitor mockVisitor = [](RefField& field) { + return true; + }; + + table_->GetHashTrieMap().StartSweeping(); + table_->SweepWeakRef(mockVisitor); + table_->GetHashTrieMap().FinishSweeping(); + + EXPECT_TRUE(true); +} + +HWTEST_F_L0(BaseStringTableTest, CleanUp) +{ + table_->GetHashTrieMap().Clear(); + EXPECT_TRUE(true); +} + +} \ No newline at end of file diff --git a/common_components/objects/tests/base_string_test.cpp b/common_components/objects/tests/base_string_test.cpp index ae004c97c0017e4b38137c093070a14ab71618d5..495a30af2373d5ca1364719403757f43fced9725 100755 --- a/common_components/objects/tests/base_string_test.cpp +++ b/common_components/objects/tests/base_string_test.cpp @@ -14,6 +14,7 @@ */ #include "common_interfaces/objects/base_string.h" +#include "common_components/platform/string_hash.h" #include "common_components/tests/test_helper.h" using namespace common; @@ -22,6 +23,286 @@ namespace common::test { class BaseStringTest : public common::test::BaseTestWithScope { }; +HWTEST_F_L0(BaseStringTest, ComputeHashForData_TEST0) +{ + const uint8_t data[] = {'a', 'b', 'c'}; + size_t size = sizeof(data) / sizeof(data[0]); + uint32_t hashSeed = 0; + uint32_t expectedHash = hashSeed; + for (uint32_t i = 0; i < size; ++i) { + expectedHash = (expectedHash << static_cast(StringHash::HASH_SHIFT)) - expectedHash + data[i]; + } + + EXPECT_EQ(BaseString::ComputeHashForData(data, size, hashSeed), expectedHash); +} + +HWTEST_F_L0(BaseStringTest, ComputeHashForData_TEST1) +{ + std::vector largeData(1000, 'x'); + size_t size = largeData.size(); + uint32_t hashSeed = 0; + + uint32_t result = BaseString::ComputeHashForData(largeData.data(), size, hashSeed); + EXPECT_NE(result, 0); +} + +HWTEST_F_L0(BaseStringTest, ComputeHashcodeUtf8_TEST0) +{ + const uint8_t utf8Data[] = u8"hello"; + size_t utf8Len = sizeof(utf8Data) / sizeof(utf8Data[0]) - 1; + + uint32_t expectedHash = 0; + for (uint32_t i = 0; i < utf8Len; ++i) { + expectedHash = (expectedHash << static_cast(StringHash::HASH_SHIFT)) - expectedHash + utf8Data[i]; + } + + EXPECT_EQ(BaseString::ComputeHashcodeUtf8(utf8Data, utf8Len, true), expectedHash); +} + +HWTEST_F_L0(BaseStringTest, ComputeHashcodeUtf8_TEST1) +{ + const uint8_t utf8Data[] = u8"hello"; + size_t utf8Len = sizeof(utf8Data) / sizeof(utf8Data[0]) - 1; + + uint32_t expectedHash = 0; + for (uint32_t i = 0; i < utf8Len; ++i) { + expectedHash = (expectedHash << static_cast(StringHash::HASH_SHIFT)) - expectedHash + utf8Data[i]; + } + + EXPECT_EQ(BaseString::ComputeHashcodeUtf8(utf8Data, utf8Len, false), expectedHash); +} + +HWTEST_F_L0(BaseStringTest, IsASCIICharacter_TEST0) +{ + const uint16_t num = 0; + bool result = BaseString::IsASCIICharacter(num); + ASSERT_FALSE(result); +} + +HWTEST_F_L0(BaseStringTest, IsASCIICharacter_TEST1) +{ + const uint16_t num = 0x7f; + bool result = BaseString::IsASCIICharacter(num); + ASSERT_TRUE(result); +} + +HWTEST_F_L0(BaseStringTest, CanBeCompressed_TEST0) +{ + uint8_t data[] = {}; + EXPECT_TRUE(BaseString::CanBeCompressed(data, 0)); + + uint8_t data1[] = {1, 1, 1, 1, 0}; + ASSERT_FALSE(BaseString::CanBeCompressed(data1, 5)); + ASSERT_TRUE(BaseString::CanBeCompressed(data1, 2)); + + uint8_t data2[] = {'a', 'b', 'c', 'd'}; + ASSERT_TRUE(BaseString::CanBeCompressed(data2, 4)); + + uint8_t data3[] = {'a', '\0', 'c', 'd'}; + ASSERT_FALSE(BaseString::CanBeCompressed(data3, 4)); +} + +HWTEST_F_L0(BaseStringTest, CanBeCompressed_TEST1) +{ + uint16_t data[] = {}; + EXPECT_TRUE(BaseString::CanBeCompressed(data, 0)); + + uint16_t data1[] = {1, 1, 1, 1, 0}; + ASSERT_FALSE(BaseString::CanBeCompressed(data1, 5)); + ASSERT_TRUE(BaseString::CanBeCompressed(data1, 2)); + + uint16_t data2[] = {'a', 'b', 'c', 'd'}; + ASSERT_TRUE(BaseString::CanBeCompressed(data2, 4)); + + uint16_t data3[] = {'a', '\0', 'c', 'd'}; + ASSERT_FALSE(BaseString::CanBeCompressed(data3, 4)); +} + +HWTEST_F_L0(BaseStringTest, IndexOf_TEST0) +{ + const uint8_t lhs[] = {'a', 'b', 'c', 'd', 'e'}; + const uint8_t rhs[] = {'a', 'b', 'c'}; + Span lhsSp(lhs, 5); + Span rhsSp(rhs, 3); + + EXPECT_EQ(BaseString::IndexOf(lhsSp, rhsSp, 0, 4), 0); +} + +HWTEST_F_L0(BaseStringTest, IndexOf_TEST1) +{ + const uint8_t lhs[] = {'a', 'b', 'c', 'd', 'e'}; + const uint8_t rhs[] = {'c', 'd'}; + Span lhsSp(lhs, 5); + Span rhsSp(rhs, 2); + + EXPECT_EQ(BaseString::IndexOf(lhsSp, rhsSp, 0, 4), 2); +} + +HWTEST_F_L0(BaseStringTest, IndexOf_TEST2) +{ + const uint8_t lhs[] = {'a', 'b', 'c', 'd', 'e'}; + const uint8_t rhs[] = {'x', 'y', 'z'}; + Span lhsSp(lhs, 5); + Span rhsSp(rhs, 3); + + EXPECT_EQ(BaseString::IndexOf(lhsSp, rhsSp, 0, 4), -1); +} + +HWTEST_F_L0(BaseStringTest, IndexOf_TEST3) +{ + const uint8_t lhs[] = {'a', 'b', 'a', 'b', 'c'}; + const uint8_t rhs[] = {'a', 'b', 'c'}; + Span lhsSp(lhs, 5); + Span rhsSp(rhs, 3); + + EXPECT_EQ(BaseString::IndexOf(lhsSp, rhsSp, 0, 4), 2); +} + +HWTEST_F_L0(BaseStringTest, IndexOf_TEST4) +{ + const uint8_t lhs[] = {'a', 'b', 'c', 'd', 'e'}; + const uint8_t rhs[] = {'a', 'b', 'x'}; + Span lhsSp(lhs, 5); + Span rhsSp(rhs, 3); + + EXPECT_EQ(BaseString::IndexOf(lhsSp, rhsSp, 0, 4), -1); +} + +HWTEST_F_L0(BaseStringTest, CompareStringSpan_TEST0) +{ + const uint8_t lhs[] = {1, 2, 3}; + const uint8_t rhs[] = {1, 2, 3}; + Span lhsSp(lhs, 5); + Span rhsSp(rhs, 3); + EXPECT_EQ(CompareStringSpan(lhsSp, rhsSp, 3), 0); +} + +HWTEST_F_L0(BaseStringTest, CompareStringSpan_TEST1) +{ + const uint8_t lhs[] = {1, 2, 4}; + const uint8_t rhs[] = {1, 2, 3}; + Span lhsSp(lhs, 5); + Span rhsSp(rhs, 3); + EXPECT_EQ(CompareStringSpan(lhsSp, rhsSp, 3), 1); +} + +HWTEST_F_L0(BaseStringTest, IsSubStringAtSpan_TEST1) +{ + const uint8_t lhs[] = {'a', 'b', 'c'}; + const uint8_t rhs[] = {'x', 'y'}; + Span lhsSp(lhs, 3); + Span rhsSp(rhs, 2); + ASSERT_FALSE(IsSubStringAtSpan(lhsSp, rhsSp, 1)); +} + +HWTEST_F_L0(BaseStringTest, IsSubStringAtSpan_TEST2) +{ + const uint8_t lhs[] = {'a', 'b'}; + const uint8_t rhs[] = {'b'}; + Span lhsSp(lhs, 2); + Span rhsSp(rhs, 1); + ASSERT_TRUE(IsSubStringAtSpan(lhsSp, rhsSp, 1)); +} + +HWTEST_F_L0(BaseStringTest, IndexOf_TEST5) +{ + const uint8_t lhs[] = {'a', 'b', 'c', 'd', 'e'}; + const uint16_t rhs[] = {'a', 'b', 'c'}; + Span lhsSp(lhs, 5); + Span rhsSp(rhs, 3); + EXPECT_EQ(BaseString::IndexOf(lhsSp, rhsSp, 0, 4), 0); + + const uint8_t lhs1[] = {'a', 'b', 'c', 'd', 'e'}; + const uint16_t rhs1[] = {'c', 'd'}; + Span lhsSp1(lhs1, 5); + Span rhsSp1(rhs1, 2); + EXPECT_EQ(BaseString::IndexOf(lhsSp1, rhsSp1, 0, 4), 2); + + const uint8_t lhs2[] = {'a', 'b', 'c', 'd', 'e'}; + const uint16_t rhs2[] = {'x', 'y', 'z'}; + Span lhsSp2(lhs2, 5); + Span rhsSp2(rhs2, 3); + EXPECT_EQ(BaseString::IndexOf(lhsSp2, rhsSp2, 0, 4), -1); + + const uint8_t lhs3[] = {'a', 'b', 'a', 'b', 'c'}; + const uint16_t rhs3[] = {'a', 'b', 'c'}; + Span lhsSp3(lhs3, 5); + Span rhsSp3(rhs3, 3); + EXPECT_EQ(BaseString::IndexOf(lhsSp3, rhsSp3, 0, 4), 2); + + const uint8_t lhs4[] = {'a', 'b', 'c', 'd', 'e'}; + const uint16_t rhs4[] = {'a', 'b', 'x'}; + Span lhsSp4(lhs4, 5); + Span rhsSp4(rhs4, 3); + EXPECT_EQ(BaseString::IndexOf(lhsSp4, rhsSp4, 0, 4), -1); +} + +HWTEST_F_L0(BaseStringTest, IndexOf_TEST6) +{ + const uint16_t lhs[] = {'a', 'b', 'c', 'd', 'e'}; + const uint16_t rhs[] = {'a', 'b', 'c'}; + Span lhsSp(lhs, 5); + Span rhsSp(rhs, 3); + EXPECT_EQ(BaseString::IndexOf(lhsSp, rhsSp, 0, 4), 0); + + const uint16_t lhs1[] = {'a', 'b', 'c', 'd', 'e'}; + const uint16_t rhs1[] = {'c', 'd'}; + Span lhsSp1(lhs1, 5); + Span rhsSp1(rhs1, 2); + EXPECT_EQ(BaseString::IndexOf(lhsSp1, rhsSp1, 0, 4), 2); + + const uint16_t lhs2[] = {'a', 'b', 'c', 'd', 'e'}; + const uint16_t rhs2[] = {'x', 'y', 'z'}; + Span lhsSp2(lhs2, 5); + Span rhsSp2(rhs2, 3); + EXPECT_EQ(BaseString::IndexOf(lhsSp2, rhsSp2, 0, 4), -1); + + const uint16_t lhs3[] = {'a', 'b', 'a', 'b', 'c'}; + const uint16_t rhs3[] = {'a', 'b', 'c'}; + Span lhsSp3(lhs3, 5); + Span rhsSp3(rhs3, 3); + EXPECT_EQ(BaseString::IndexOf(lhsSp3, rhsSp3, 0, 4), 2); + + const uint16_t lhs4[] = {'a', 'b', 'c', 'd', 'e'}; + const uint16_t rhs4[] = {'a', 'b', 'x'}; + Span lhsSp4(lhs4, 5); + Span rhsSp4(rhs4, 3); + EXPECT_EQ(BaseString::IndexOf(lhsSp4, rhsSp4, 0, 4), -1); +} + +HWTEST_F_L0(BaseStringTest, IndexOf_TEST7) +{ + const uint16_t lhs[] = {'a', 'b', 'c', 'd', 'e'}; + const uint8_t rhs[] = {'a', 'b', 'c'}; + Span lhsSp(lhs, 5); + Span rhsSp(rhs, 3); + EXPECT_EQ(BaseString::IndexOf(lhsSp, rhsSp, 0, 4), 0); + + const uint16_t lhs1[] = {'a', 'b', 'c', 'd', 'e'}; + const uint8_t rhs1[] = {'c', 'd'}; + Span lhsSp1(lhs1, 5); + Span rhsSp1(rhs1, 2); + EXPECT_EQ(BaseString::IndexOf(lhsSp1, rhsSp1, 0, 4), 2); + + const uint16_t lhs2[] = {'a', 'b', 'c', 'd', 'e'}; + const uint8_t rhs2[] = {'x', 'y', 'z'}; + Span lhsSp2(lhs2, 5); + Span rhsSp2(rhs2, 3); + EXPECT_EQ(BaseString::IndexOf(lhsSp2, rhsSp2, 0, 4), -1); + + const uint16_t lhs3[] = {'a', 'b', 'a', 'b', 'c'}; + const uint8_t rhs3[] = {'a', 'b', 'c'}; + Span lhsSp3(lhs3, 5); + Span rhsSp3(rhs3, 3); + EXPECT_EQ(BaseString::IndexOf(lhsSp3, rhsSp3, 0, 4), 2); + + const uint16_t lhs4[] = {'a', 'b', 'c', 'd', 'e'}; + const uint8_t rhs4[] = {'a', 'b', 'x'}; + Span lhsSp4(lhs4, 5); + Span rhsSp4(rhs4, 3); + EXPECT_EQ(BaseString::IndexOf(lhsSp4, rhsSp4, 0, 4), -1); +} + HWTEST_F_L0(BaseStringTest, IsUtf8EqualsUtf16_TEST0) { const uint8_t utf8_01[] = {0xF0, 0xE0, 0xC0}; @@ -106,4 +387,148 @@ HWTEST_F_L0(BaseStringTest, LastIndexOf_TEST4) EXPECT_EQ(BaseString::LastIndexOf(lhsSp, rhsSp, 3), -1); } + +HWTEST_F_L0(BaseStringTest, IsSubStringAtSpan_TEST3) +{ + const uint8_t lhs[] = {'a', 'b', 'c'}; + const uint16_t rhs[] = {'x', 'y'}; + Span lhsSp(lhs, 3); + Span rhsSp(rhs, 2); + ASSERT_FALSE(IsSubStringAtSpan(lhsSp, rhsSp, 1)); + + const uint8_t lhs1[] = {'a', 'b'}; + const uint16_t rhs1[] = {'b'}; + Span lhsSp1(lhs1, 2); + Span rhsSp1(rhs1, 1); + ASSERT_TRUE(IsSubStringAtSpan(lhsSp1, rhsSp1, 1)); +} + +HWTEST_F_L0(BaseStringTest, IsSubStringAtSpan_TEST4) +{ + const uint16_t lhs[] = {'a', 'b', 'c'}; + const uint8_t rhs[] = {'x', 'y'}; + Span lhsSp(lhs, 3); + Span rhsSp(rhs, 2); + ASSERT_FALSE(IsSubStringAtSpan(lhsSp, rhsSp, 1)); + + const uint16_t lhs1[] = {'a', 'b'}; + const uint8_t rhs1[] = {'b'}; + Span lhsSp1(lhs1, 2); + Span rhsSp1(rhs1, 1); + ASSERT_TRUE(IsSubStringAtSpan(lhsSp1, rhsSp1, 1)); +} + +HWTEST_F_L0(BaseStringTest, IsSubStringAtSpan_TEST5) +{ + const uint16_t lhs[] = {'a', 'b', 'c'}; + const uint16_t rhs[] = {'x', 'y'}; + Span lhsSp(lhs, 3); + Span rhsSp(rhs, 2); + ASSERT_FALSE(IsSubStringAtSpan(lhsSp, rhsSp, 1)); + + const uint16_t lhs1[] = {'a', 'b'}; + const uint16_t rhs1[] = {'b'}; + Span lhsSp1(lhs1, 2); + Span rhsSp1(rhs1, 1); + ASSERT_TRUE(IsSubStringAtSpan(lhsSp1, rhsSp1, 1)); +} + +HWTEST_F_L0(BaseStringTest, LastIndexOf_TEST5) +{ + const uint8_t lhs[] = {'a', 'b', 'c', 'd', 'e'}; + const uint16_t rhs[] = {'c', 'd'}; + Span lhsSp(lhs, 5); + Span rhsSp(rhs, 2); + EXPECT_EQ(BaseString::LastIndexOf(lhsSp, rhsSp, 4), 2); + + const uint8_t lhs1[] = {'a', 'b', 'c', 'x', 'e'}; + const uint16_t rhs1[] = {'c', 'd'}; + Span lhsSp1(lhs1, 5); + Span rhsSp1(rhs1, 2); + EXPECT_EQ(BaseString::LastIndexOf(lhsSp1, rhsSp1, 4), -1); + + const uint8_t lhs2[] = {'a', 'b', 'c', 'd', 'e'}; + const uint16_t rhs2[] = {'c', 'x'}; + Span lhsSp2(lhs2, 5); + Span rhsSp2(rhs2, 2); + EXPECT_EQ(BaseString::LastIndexOf(lhsSp2, rhsSp2, 4), -1); + + const uint8_t lhs3[] = {'a', 'b', 'a', 'b', 'c'}; + const uint16_t rhs3[] = {'a', 'b'}; + Span lhsSp3(lhs3, 5); + Span rhsSp3(rhs3, 2); + EXPECT_EQ(BaseString::LastIndexOf(lhsSp3, rhsSp3, 4), 2); + + const uint8_t lhs4[] = {'a', 'b', 'c', 'd'}; + const uint16_t rhs4[] = {'x', 'y'}; + Span lhsSp4(lhs4, 4); + Span rhsSp4(rhs4, 2); + EXPECT_EQ(BaseString::LastIndexOf(lhsSp4, rhsSp4, 3), -1); +} + +HWTEST_F_L0(BaseStringTest, LastIndexOf_TEST6) +{ + const uint16_t lhs[] = {'a', 'b', 'c', 'd', 'e'}; + const uint16_t rhs[] = {'c', 'd'}; + Span lhsSp(lhs, 5); + Span rhsSp(rhs, 2); + EXPECT_EQ(BaseString::LastIndexOf(lhsSp, rhsSp, 4), 2); + + const uint16_t lhs1[] = {'a', 'b', 'c', 'x', 'e'}; + const uint16_t rhs1[] = {'c', 'd'}; + Span lhsSp1(lhs1, 5); + Span rhsSp1(rhs1, 2); + EXPECT_EQ(BaseString::LastIndexOf(lhsSp1, rhsSp1, 4), -1); + + const uint16_t lhs2[] = {'a', 'b', 'c', 'd', 'e'}; + const uint16_t rhs2[] = {'c', 'x'}; + Span lhsSp2(lhs2, 5); + Span rhsSp2(rhs2, 2); + EXPECT_EQ(BaseString::LastIndexOf(lhsSp2, rhsSp2, 4), -1); + + const uint16_t lhs3[] = {'a', 'b', 'a', 'b', 'c'}; + const uint16_t rhs3[] = {'a', 'b'}; + Span lhsSp3(lhs3, 5); + Span rhsSp3(rhs3, 2); + EXPECT_EQ(BaseString::LastIndexOf(lhsSp3, rhsSp3, 4), 2); + + const uint16_t lhs4[] = {'a', 'b', 'c', 'd'}; + const uint16_t rhs4[] = {'x', 'y'}; + Span lhsSp4(lhs4, 4); + Span rhsSp4(rhs4, 2); + EXPECT_EQ(BaseString::LastIndexOf(lhsSp4, rhsSp4, 3), -1); +} + +HWTEST_F_L0(BaseStringTest, LastIndexOf_TEST7) +{ + const uint16_t lhs[] = {'a', 'b', 'c', 'd', 'e'}; + const uint8_t rhs[] = {'c', 'd'}; + Span lhsSp(lhs, 5); + Span rhsSp(rhs, 2); + EXPECT_EQ(BaseString::LastIndexOf(lhsSp, rhsSp, 4), 2); + + const uint16_t lhs1[] = {'a', 'b', 'c', 'x', 'e'}; + const uint8_t rhs1[] = {'c', 'd'}; + Span lhsSp1(lhs1, 5); + Span rhsSp1(rhs1, 2); + EXPECT_EQ(BaseString::LastIndexOf(lhsSp1, rhsSp1, 4), -1); + + const uint16_t lhs2[] = {'a', 'b', 'c', 'd', 'e'}; + const uint8_t rhs2[] = {'c', 'x'}; + Span lhsSp2(lhs2, 5); + Span rhsSp2(rhs2, 2); + EXPECT_EQ(BaseString::LastIndexOf(lhsSp2, rhsSp2, 4), -1); + + const uint16_t lhs3[] = {'a', 'b', 'a', 'b', 'c'}; + const uint8_t rhs3[] = {'a', 'b'}; + Span lhsSp3(lhs3, 5); + Span rhsSp3(rhs3, 2); + EXPECT_EQ(BaseString::LastIndexOf(lhsSp3, rhsSp3, 4), 2); + + const uint16_t lhs4[] = {'a', 'b', 'c', 'd'}; + const uint8_t rhs4[] = {'x', 'y'}; + Span lhsSp4(lhs4, 4); + Span rhsSp4(rhs4, 2); + EXPECT_EQ(BaseString::LastIndexOf(lhsSp4, rhsSp4, 3), -1); +} } // namespace common::test diff --git a/common_components/objects/tests/composite_base_class_test.cpp b/common_components/objects/tests/composite_base_class_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d5a1c91b7f19b1f4c39b001150f8cdc4a4e6a2b7 --- /dev/null +++ b/common_components/objects/tests/composite_base_class_test.cpp @@ -0,0 +1,114 @@ +/* +* Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/tests/test_helper.h" +#include "common_interfaces/objects/composite_base_class.h" +#include "common_interfaces/objects/base_object.h" + +#include +#include +#include + +namespace common { +class CompositeBaseClassTest : public test::BaseTestWithScope { +protected: + static CompositeBaseClass* MockAllocator() + { + void* memory = ::operator new(sizeof(uint64_t) * 16); + + auto* baseClass = reinterpret_cast(memory); + baseClass->SetObjectType(CommonType::LINE_STRING); + baseClass->ClearBitField(); + + return reinterpret_cast(baseClass); + } + + void SetUp() override + { + roots_ = std::make_unique(); + } + + void TearDown() override + { + roots_.reset(); + } + + std::unique_ptr roots_; +}; + +HWTEST_F_L0(CompositeBaseClassTest, InitializeOnce) +{ + auto allocator = []() -> CompositeBaseClass* { + return CompositeBaseClassTest::MockAllocator(); + }; + + roots_->InitializeCompositeBaseClass(allocator); + roots_->InitializeCompositeBaseClass(allocator); + + EXPECT_TRUE(true); +} + +HWTEST_F_L0(CompositeBaseClassTest, CreateAndGetType) +{ + auto allocator = []() -> CompositeBaseClass* { + return CompositeBaseClassTest::MockAllocator(); + }; + + roots_->InitializeCompositeBaseClass(allocator); + + auto* baseClass = roots_->GetBaseClass(CommonType::LINE_STRING); + ASSERT_NE(baseClass, nullptr); + EXPECT_EQ(baseClass->GetObjectType(), CommonType::LINE_STRING); +} + +HWTEST_F_L0(CompositeBaseClassTest, GetBaseClassReturnsCorrectType) +{ + auto allocator = []() -> CompositeBaseClass* { + return CompositeBaseClassTest::MockAllocator(); + }; + + roots_->InitializeCompositeBaseClass(allocator); + + auto* lineString = roots_->GetBaseClass(CommonType::LINE_STRING); + auto* slicedString = roots_->GetBaseClass(CommonType::SLICED_STRING); + auto* treeString = roots_->GetBaseClass(CommonType::TREE_STRING); + + ASSERT_NE(lineString, nullptr); + ASSERT_NE(slicedString, nullptr); + ASSERT_NE(treeString, nullptr); + + EXPECT_EQ(lineString->GetObjectType(), CommonType::LINE_STRING); + EXPECT_EQ(slicedString->GetObjectType(), CommonType::SLICED_STRING); + EXPECT_EQ(treeString->GetObjectType(), CommonType::TREE_STRING); +} + +HWTEST_F_L0(CompositeBaseClassTest, IterateCompositeBaseClass) +{ + auto allocator = []() -> CompositeBaseClass* { + return CompositeBaseClassTest::MockAllocator(); + }; + + roots_->InitializeCompositeBaseClass(allocator); + + std::vector visited; + + roots_->IterateCompositeBaseClass([&visited](RefField<>& field) { + auto* ptr = reinterpret_cast(const_cast(static_cast(&field))); + visited.push_back(ptr); + }); + + EXPECT_EQ(visited.size(), 3); +} +} \ No newline at end of file diff --git a/common_components/platform/arm64/string_hash_internal.h b/common_components/platform/arm64/string_hash_internal.h index 699fdec49da7c0554ba209e3dc7afbeebbac0386..3f1c9e4266605ac0cccca0af292318c727ee233e 100644 --- a/common_components/platform/arm64/string_hash_internal.h +++ b/common_components/platform/arm64/string_hash_internal.h @@ -27,28 +27,12 @@ namespace common { class StringHashInternal { friend class StringHashHelper; private: -#if ENABLE_NEXT_OPTIMIZATION template static uint32_t ComputeHashForDataOfLongString(const T *data, size_t size, uint32_t hashSeed) { - /** - * process the first {remainder} items of data[] and hashSeed - * for example, if remainder = 2, - * then hash[2] = data[0] * 31^1, hash[3] = data[1] * 31^0; - * hash[0] = hashSeed * 31^{remainder} - * - * the rest elements in data[] will be processed with for loop as follows - * hash[0]: hash[0] * 31^4 + data[i] * 31^3 - * hash[1]: hash[1] * 31^4 + data[i+1] * 31^2 - * hash[2]: hash[2] * 31^4 + data[i+2] * 31^1 - * hash[3]: hash[3] * 31^4 + data[i+3] * 31^0 - * i starts at {remainder} and every time += 4, - * at last, totolHash = hash[0] + hash[1] + hash[2] + hash[3]; - */ - static_assert(std::is_same_v || std::is_same_v); - constexpr size_t blockSize = StringHash::BLOCK_SIZE; - constexpr size_t loopSize = StringHash::SIMD_U8_LOOP_SIZE; + constexpr uint32_t blockSize = StringHash::BLOCK_SIZE; + constexpr uint32_t scale = StringHash::BLOCK_MULTIPLY; uint32_t hash[blockSize] = {}; uint32_t index = 0; uint32_t remainder = size & (blockSize - 1); @@ -64,66 +48,88 @@ private: } hash[0] += hashSeed * StringHash::MULTIPLIER[blockSize - 1 - remainder]; - uint32x4_t dataVec; - uint32x4_t hashVec; - uint32x4_t multiplierVec = vld1q_u32(StringHash::MULTIPLIER); - uint32x4_t scaleVec = vdupq_n_u32(StringHash::BLOCK_MULTIPLY); - - if constexpr (std::is_same_v) { - // process 4 elements with for loop if (size-index) % 8 = 4 - if ((size - index) % loopSize == blockSize) { - for (size_t i = 0; i < blockSize; i++) { - hash[i] = hash[i] * StringHash::BLOCK_MULTIPLY + data[index++] * StringHash::MULTIPLIER[i]; - } - } - hashVec = vld1q_u32(hash); - for (; index < size; index += loopSize) { - uint8x8_t dataVec8 = vld1_u8(data + index); - uint16x8_t dataVec16 = vmovl_u8(dataVec8); - dataVec = vmovl_u16(vget_low_u16(dataVec16)); - hashVec = vaddq_u32(vmulq_u32(hashVec, scaleVec), vmulq_u32(dataVec, multiplierVec)); - dataVec = vmovl_u16(vget_high_u16(dataVec16)); - hashVec = vaddq_u32(vmulq_u32(hashVec, scaleVec), vmulq_u32(dataVec, multiplierVec)); - } - } else { - hashVec = vld1q_u32(hash); - for (; index < size; index += blockSize) { - dataVec = vmovl_u16(vld1_u16(data + index)); - hashVec = vaddq_u32(vmulq_u32(hashVec, scaleVec), vmulq_u32(dataVec, multiplierVec)); + uint32_t dataMul[blockSize] = {}; + for (; index < size; index += blockSize) { + for (size_t i = 0; i < blockSize; i++) { + dataMul[i] = data[index + i] * StringHash::MULTIPLIER[i]; + hash[i] = hash[i] * scale + dataMul[i]; } } - return vaddvq_u32(hashVec); + uint32_t hashTotal = 0; + for (size_t i = 0; i < blockSize; i++) { + hashTotal += hash[i]; + } + return hashTotal; } -#else - template - static uint32_t ComputeHashForDataOfLongString(const T *data, size_t size, - uint32_t hashSeed) + + template <> + uint32_t ComputeHashForDataOfLongString(const uint8_t *data, + size_t size, uint32_t hashSeed) { - constexpr uint32_t hashShift = static_cast(StringHash::HASH_SHIFT); - constexpr uint32_t blockSize = static_cast(StringHash::BLOCK_SIZE); - uint32_t hash[blockSize] = {0}; - uint32_t index = 0; - uint32x4_t hashVec = vld1q_u32(hash); - uint32x4_t multiplier_vec = vdupq_n_u32(static_cast(StringHash::HASH_MULTIPLY)); - uint32x4_t dataVec; - for (; index + blockSize <= size; index += blockSize) { - dataVec[0] = data[index]; - dataVec[1] = data[index + 1]; // 1: the second element of the block - dataVec[2] = data[index + 2]; // 2: the third element of the block - dataVec[3] = data[index + 3]; // 3: the fourth element of the block - hashVec = vaddq_u32(vmulq_u32(hashVec, multiplier_vec), dataVec); + const uint32x4_t multiplierVec = vld1q_u32(StringHash::MULTIPLIER); + constexpr uint32_t multiplierHash = StringHash::MULTIPLIER[0] * StringHash::MULTIPLIER[2]; + + uint32_t hash = hashSeed; + const uint8_t *dataEnd = data + size; + const uint8_t *vecEnd = data + (size & (~15)); + const uint8_t *p = data; + constexpr size_t UINT8_LOOP_SIZE = 16; // neon 128bit / uint8_t 8bit = 16 + for (; p < vecEnd; p += UINT8_LOOP_SIZE) { + uint8x16_t dataVec8 = vld1q_u8(p); + uint16x8_t dataVec16_1 = vmovl_u8(vget_low_u16(dataVec8)); + uint16x8_t dataVec16_2 = vmovl_u8(vget_high_u16(dataVec8)); + uint32x4_t dataVec32_1 = vmovl_u16(vget_low_u16(dataVec16_1)); + uint32x4_t dataVec32_3 = vmovl_u16(vget_low_u16(dataVec16_2)); + uint32x4_t dataVec32_2 = vmovl_u16(vget_high_u16(dataVec16_1)); + uint32x4_t dataVec32_4 = vmovl_u16(vget_high_u16(dataVec16_2)); + + dataVec32_1 = vmulq_u32(dataVec32_1, multiplierVec); + hash = hash * multiplierHash + vaddvq_u32(dataVec32_1); + + dataVec32_2 = vmulq_u32(dataVec32_2, multiplierVec); + hash = hash * multiplierHash + vaddvq_u32(dataVec32_2); + + dataVec32_3 = vmulq_u32(dataVec32_3, multiplierVec); + hash = hash * multiplierHash + vaddvq_u32(dataVec32_3); + + dataVec32_4 = vmulq_u32(dataVec32_4, multiplierVec); + hash = hash * multiplierHash + vaddvq_u32(dataVec32_4); } - vst1q_u32(hash, hashVec); - for (; index < size; ++index) { - hash[0] = (hash[0] << hashShift) - hash[0] + data[index]; + + for (; p < dataEnd; p++) { + hash = (hash << static_cast(StringHash::HASH_SHIFT)) - hash + *p; + } + return hash; + } + + template <> + uint32_t ComputeHashForDataOfLongString(const uint16_t *data, + size_t size, uint32_t hashSeed) + { + const uint32x4_t multiplierVec = vld1q_u32(StringHash::MULTIPLIER); + constexpr uint32_t multiplierHash = StringHash::MULTIPLIER[0] * StringHash::MULTIPLIER[2]; + + uint32_t hash = hashSeed; + const uint16_t *dataEnd = data + size; + const uint16_t *vecEnd = data + (size & (~7)); + const uint16_t *p = data; + constexpr size_t UINT16_LOOP_SIZE = 8; // neon 128bit / uint16_t 16bit = 8 + for (; p < vecEnd; p += UINT16_LOOP_SIZE) { + uint16x8_t dataVec16 = vld1q_u16(p); + uint32x4_t dataVec32_1 = vmovl_u16(vget_low_u16(dataVec16)); + dataVec32_1 = vmulq_u32(dataVec32_1, multiplierVec); + hash = hash * multiplierHash + vaddvq_u32(dataVec32_1); + + uint32x4_t dataVec32_2 = vmovl_u16(vget_high_u16(dataVec16)); + dataVec32_2 = vmulq_u32(dataVec32_2, multiplierVec); + hash = hash * multiplierHash + vaddvq_u32(dataVec32_2); } - uint32_t totalHash = hashSeed; - for (uint32_t i = 0; i < blockSize; ++i) { - totalHash = (totalHash << hashShift) - totalHash + hash[i]; + + for (; p < dataEnd; p++) { + hash = (hash << static_cast(StringHash::HASH_SHIFT)) - hash + *p; } - return totalHash; + return hash; } -#endif }; } // namespace common #endif // COMMON_COMPONENTS_PLATFORM_STRING_HASH_ARM64_H \ No newline at end of file diff --git a/common_components/platform/common/string_hash_internal.h b/common_components/platform/common/string_hash_internal.h index c1456274ba3fa5b13d7c4921c083c3ca21ebe1cc..fe8442b10f1e783911e8650d3dce40e82953afdd 100644 --- a/common_components/platform/common/string_hash_internal.h +++ b/common_components/platform/common/string_hash_internal.h @@ -25,7 +25,6 @@ namespace common { class StringHashInternal { friend class StringHashHelper; private: -#if ENABLE_NEXT_OPTIMIZATION template static uint32_t ComputeHashForDataOfLongString(const T *data, size_t size, uint32_t hashSeed) @@ -60,31 +59,6 @@ private: } return hashTotal; } -#else - template - static uint32_t ComputeHashForDataOfLongString(const T *data, size_t size, - uint32_t hashSeed) - { - constexpr uint32_t hashShift = static_cast(StringHash::HASH_SHIFT); - constexpr uint32_t blockSize = static_cast(StringHash::BLOCK_SIZE); - uint32_t hash[blockSize] = {0}; - uint32_t index = 0; - for (; index + blockSize <= size; index += blockSize) { - hash[0] = (hash[0] << hashShift) - hash[0] + data[index]; - hash[1] = (hash[1] << hashShift) - hash[1] + data[index + 1]; // 1: the second element of the block - hash[2] = (hash[2] << hashShift) - hash[2] + data[index + 2]; // 2: the third element of the block - hash[3] = (hash[3] << hashShift) - hash[3] + data[index + 3]; // 3: the fourth element of the block - } - for (; index < size; ++index) { - hash[0] = (hash[0] << hashShift) - hash[0] + data[index]; - } - uint32_t totalHash = hashSeed; - for (uint32_t i = 0; i < blockSize; ++i) { - totalHash = (totalHash << hashShift) - totalHash + hash[i]; - } - return totalHash; - } -#endif }; } // namespace common #endif // COMMON_COMPONENTS_PLATFORM_STRING_HASH_COMMON_H \ No newline at end of file diff --git a/common_components/platform/unix/tests/BUILD.gn b/common_components/platform/unix/tests/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..645bc239e757bdcf47648a53405ed36f343d7c4d --- /dev/null +++ b/common_components/platform/unix/tests/BUILD.gn @@ -0,0 +1,57 @@ +# Copyright (c) 2025 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//arkcompiler/ets_runtime/common_components/tests/test_helper.gni") + +module_output_path = "ets_runtime" + +host_unittest_action("Map_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "map_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +group("unittest") { + testonly = true + + # deps file + deps = [ + ":Map_Test", + ] +} + +group("host_unittest") { + testonly = true + + # deps file + deps = [ + ":Map_TestAction", + ] +} \ No newline at end of file diff --git a/common_components/platform/unix/tests/map_test.cpp b/common_components/platform/unix/tests/map_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c6f38a5786b90b5718c8e8a3cf46b4a871995fcd --- /dev/null +++ b/common_components/platform/unix/tests/map_test.cpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/tests/test_helper.h" +#include "common_components/platform/map.h" + +#include +#include + +class PageProtectTest : public common::test::BaseTestWithScope { +protected: + void SetUp() override {} + void TearDown() override {} +}; + +HWTEST_F_L0(PageProtectTest, TestPageProtect) +{ + size_t pageSize = getpagesize(); + void* mem = mmap(nullptr, pageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + ASSERT_NE(mem, MAP_FAILED); + + bool success = common::PageProtect(mem, pageSize, PROT_READ); + EXPECT_TRUE(success); + + munmap(mem, pageSize); +} + +HWTEST_F_L0(PageProtectTest, TestNullptrMemory) +{ + bool success = common::PageProtect(nullptr, getpagesize(), -1); + EXPECT_FALSE(success); +} \ No newline at end of file diff --git a/common_components/profiler/heap_profiler_listener.cpp b/common_components/profiler/heap_profiler_listener.cpp index d78845faba505bcd51af966eaa945474f83f6b35..fd9656ed55a427769a897406875089424bc96a40 100644 --- a/common_components/profiler/heap_profiler_listener.cpp +++ b/common_components/profiler/heap_profiler_listener.cpp @@ -13,29 +13,50 @@ * limitations under the License. */ +#include "common_components/log/log.h" +#include "common_components/mutator/mutator_manager.h" #include "common_interfaces/profiler/heap_profiler_listener.h" namespace common { - HeapProfilerListener &HeapProfilerListener::GetInstance() - { - static HeapProfilerListener instance; - return instance; - } +HeapProfilerListener &HeapProfilerListener::GetInstance() +{ + static HeapProfilerListener instance; + return instance; +} - void HeapProfilerListener::RegisterMoveEventCb(const std::function &cb) - { - moveEventCb_ = cb; - } +uint32_t HeapProfilerListener::RegisterMoveEventCb(const std::function &cb) +{ + std::unique_lock lock(mutex_); + moveEventCbs_.emplace(moveEventCbId_, cb); + return moveEventCbId_++; +} - void HeapProfilerListener::UnRegisterMoveEventCb() - { - moveEventCb_ = nullptr; - } +void HeapProfilerListener::UnRegisterMoveEventCb(uint32_t key) +{ + std::unique_lock lock(mutex_); + moveEventCbs_.erase(key); +} - void HeapProfilerListener::OnMoveEvent(uintptr_t fromObj, uintptr_t toObj, size_t size) - { - if (moveEventCb_) { - moveEventCb_(fromObj, toObj, size); +void HeapProfilerListener::OnMoveEvent(uintptr_t fromObj, uintptr_t toObj, size_t size) +{ + std::shared_lock lock(mutex_); + for (const auto &pair : moveEventCbs_) { + if (pair.second) { + pair.second(fromObj, toObj, size); } } +} + +void HeapProfilerListener::RegisterOutOfMemoryEventCb(const std::function &cb) +{ + outOfMemoryEventCb_ = cb; +} +void HeapProfilerListener::OnOutOfMemoryEventCb() +{ + void *thread = nullptr; + if (!IsGcThread()) { + thread = Mutator::GetMutator()->GetThreadHolder()->GetJSThread(); + } + outOfMemoryEventCb_(thread); +} } // namespace common \ No newline at end of file diff --git a/common_components/serialize/serialize_utils.cpp b/common_components/serialize/serialize_utils.cpp index b8514404de40504bb44f0463d8614e533383ec6b..ba9b8b72b1c97e9b22d5cf6f2902f65398d2ce93 100755 --- a/common_components/serialize/serialize_utils.cpp +++ b/common_components/serialize/serialize_utils.cpp @@ -21,10 +21,10 @@ namespace common { SerializedBaseObjectSpace SerializeUtils::GetSerializeObjectSpace(uintptr_t obj) { - RegionDesc *info = RegionDesc::GetRegionDescAt(obj); - RegionDesc::RegionType type = info->GetRegionType(); + RegionDesc::RegionType type = RegionDesc::GetAliveRegionType(obj); switch (type) { case RegionDesc::RegionType::THREAD_LOCAL_REGION: + case RegionDesc::RegionType::THREAD_LOCAL_OLD_REGION: case RegionDesc::RegionType::RECENT_FULL_REGION: case RegionDesc::RegionType::FROM_REGION: case RegionDesc::RegionType::LONE_FROM_REGION: @@ -32,10 +32,10 @@ SerializedBaseObjectSpace SerializeUtils::GetSerializeObjectSpace(uintptr_t obj) case RegionDesc::RegionType::TO_REGION: case RegionDesc::RegionType::OLD_REGION: return SerializedBaseObjectSpace::REGULAR_SPACE; - case RegionDesc::RegionType::FULL_PINNED_REGION: - case RegionDesc::RegionType::RECENT_PINNED_REGION: - case RegionDesc::RegionType::FIXED_PINNED_REGION: - case RegionDesc::RegionType::FULL_FIXED_PINNED_REGION: + case RegionDesc::RegionType::FULL_POLYSIZE_NONMOVABLE_REGION: + case RegionDesc::RegionType::RECENT_POLYSIZE_NONMOVABLE_REGION: + case RegionDesc::RegionType::MONOSIZE_NONMOVABLE_REGION: + case RegionDesc::RegionType::FULL_MONOSIZE_NONMOVABLE_REGION: case RegionDesc::RegionType::READ_ONLY_REGION: case RegionDesc::RegionType::APPSPAWN_REGION: return SerializedBaseObjectSpace::PIN_SPACE; @@ -46,9 +46,4 @@ SerializedBaseObjectSpace SerializeUtils::GetSerializeObjectSpace(uintptr_t obj) return SerializedBaseObjectSpace::OTHER; } } - -size_t SerializeUtils::GetRegionSize() -{ - return BaseRuntime::GetInstance()->GetHeapParam().regionSize * KB; -} } // namespace common diff --git a/common_components/serialize/serialize_utils.h b/common_components/serialize/serialize_utils.h index 7eec8691492f006ef5c8ba3594917e37b9a3ecf3..363d6a6531d7ee9e6669ccc3ea6fed17b12ace83 100644 --- a/common_components/serialize/serialize_utils.h +++ b/common_components/serialize/serialize_utils.h @@ -32,7 +32,6 @@ enum class SerializedBaseObjectSpace : uint8_t { class SerializeUtils { public: static SerializedBaseObjectSpace GetSerializeObjectSpace(uintptr_t obj); - static size_t GetRegionSize(); }; } // namespace common #endif // COMMON_COMPONENTS_SERIALIZE_UTILS_H diff --git a/common_components/serialize/tests/BUILD.gn b/common_components/serialize/tests/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..bfcdc438399f306cfb36395018127eaa5fa5208f --- /dev/null +++ b/common_components/serialize/tests/BUILD.gn @@ -0,0 +1,57 @@ +# Copyright (c) 2025 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//arkcompiler/ets_runtime/common_components/tests/test_helper.gni") + +module_output_path = "ets_runtime" + +host_unittest_action("Serialize_Utils_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "serialize_utils_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +group("unittest") { + testonly = true + + # deps file + deps = [ + ":Serialize_Utils_Test", + ] +} + +group("host_unittest") { + testonly = true + + # deps file + deps = [ + ":Serialize_Utils_TestAction", + ] +} diff --git a/common_components/serialize/tests/serialize_utils_test.cpp b/common_components/serialize/tests/serialize_utils_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3336be0feddbd1685f38f4b51cf3321b51aa1ff5 --- /dev/null +++ b/common_components/serialize/tests/serialize_utils_test.cpp @@ -0,0 +1,57 @@ +/* +* Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/heap/allocator/region_desc.h" +#include "common_components/heap/allocator/regional_heap.h" +#include "common_components/serialize/serialize_utils.h" +#include "common_components/serialize/serialize_utils.cpp" +#include "common_components/tests/test_helper.h" + +using namespace common; + +namespace common::test { +class SerializeUtilsTest : public common::test::BaseTestWithScope { +protected: + static void SetUpTestCase() + { + BaseRuntime::GetInstance()->Init(); + } + + static void TearDownTestCase() + { + BaseRuntime::GetInstance()->Fini(); + } + void SetUp() override + { + MutatorManager::Instance().CreateRuntimeMutator(ThreadType::GC_THREAD); + } + + void TearDown() override + { + MutatorManager::Instance().DestroyRuntimeMutator(ThreadType::GC_THREAD); + } +}; + +HWTEST_F_L0(SerializeUtilsTest, GetSerializeObjectSpace) +{ + RegionalHeap& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + uintptr_t addr = theAllocator.AllocOldRegion(); + ASSERT_NE(addr, 0); + RegionDesc* region = RegionDesc::GetRegionDescAt(addr); + region->SetRegionType(RegionDesc::RegionType::END_OF_REGION_TYPE); + SerializedBaseObjectSpace spaceType = SerializeUtils::GetSerializeObjectSpace(addr); + EXPECT_EQ(spaceType, SerializedBaseObjectSpace::OTHER); +} +} // namespace common::test \ No newline at end of file diff --git a/common_components/taskpool/runner.cpp b/common_components/taskpool/runner.cpp index ce0c3ca0e28a42847ce6c44d55180501eb85981c..085bbe293b01f14c583f1f7f95b982701456f9c2 100644 --- a/common_components/taskpool/runner.cpp +++ b/common_components/taskpool/runner.cpp @@ -20,10 +20,6 @@ #include "qos.h" #endif -#ifdef ENABLE_RSS -#include "res_sched_client.h" -#endif - namespace common { Runner::Runner(uint32_t threadNum, const std::function prologueHook, const std::function epilogueHook) @@ -86,6 +82,7 @@ void Runner::ForEachTask(const std::function &f) void Runner::SetQosPriority([[maybe_unused]] PriorityMode mode) { #ifdef ENABLE_QOS + std::lock_guard guard(mtx_); switch (mode) { case PriorityMode::STW: { for (uint32_t threadId : gcThreadId_) { @@ -112,21 +109,6 @@ void Runner::SetQosPriority([[maybe_unused]] PriorityMode mode) #endif } -void Runner::SetRssPriority([[maybe_unused]] RssPriorityType type) -{ -#ifdef ENABLE_RSS - uint64_t pid = getpid(); - int64_t status = static_cast(type); - for (uint32_t threadId : gcThreadId_) { - std::unordered_map payLoad = { { "pid", std::to_string(pid) }, - { "tid", std::to_string(threadId) } }; - OHOS::ResourceSchedule::ResSchedClient::GetInstance() - .ReportData(OHOS::ResourceSchedule::ResType::RES_TYPE_GC_THREAD_QOS_STATUS_CHANGE, - status, payLoad); - } -#endif -} - void Runner::RecordThreadId() { std::lock_guard guard(mtx_); diff --git a/common_components/taskpool/runner.h b/common_components/taskpool/runner.h index d8ef8534d2806ba43fba430ddb7f1fa8e9cb3e46..a9c1ad4e9697092d09966b7fdb5be0e34bcc9f36 100644 --- a/common_components/taskpool/runner.h +++ b/common_components/taskpool/runner.h @@ -38,11 +38,6 @@ enum class PriorityMode { BACKGROUND }; -enum class RssPriorityType { - COMMON = 0, - KEY = 1 -}; - class Runner { public: explicit Runner(uint32_t threadNum, @@ -66,7 +61,6 @@ public: void PUBLIC_API TerminateThread(); void TerminateTask(int32_t id, TaskType type); void SetQosPriority(PriorityMode mode); - void SetRssPriority(RssPriorityType type); void RecordThreadId(); uint32_t GetTotalThreadNum() const diff --git a/common_components/taskpool/task.h b/common_components/taskpool/task.h index edf2c34c10ed033b1608131d860bf26c0d3718a7..102439db1b5a9594694af2c2bdcfc166ae42a15e 100644 --- a/common_components/taskpool/task.h +++ b/common_components/taskpool/task.h @@ -16,6 +16,7 @@ #ifndef COMMON_COMPONENTS_TASKPOOL_TASK_H #define COMMON_COMPONENTS_TASKPOOL_TASK_H +#include #include #include @@ -68,17 +69,17 @@ private: class TaskPackMonitor { public: - explicit TaskPackMonitor(int running, int maxRunning) : running_(running), maxRunning_(maxRunning) + explicit TaskPackMonitor(int posted, int capacity) : posted_(posted), capacity_(capacity) { - DCHECK_CC(running_ >= 0); - DCHECK_CC(running_ <= maxRunning_); + DCHECK_CC(posted_ >= 0); + DCHECK_CC(posted_ <= capacity_); } - ~TaskPackMonitor() = default; + virtual ~TaskPackMonitor() = default; void WaitAllFinished() { std::unique_lock lock(mutex_); - while (running_ > 0) { + while (posted_ > 0) { cv_.wait(lock); } } @@ -87,8 +88,8 @@ public: { std::lock_guard guard(mutex_); DCHECK_CC(running_ >= 0); - if (running_ < maxRunning_) { - ++running_; + if (posted_ < capacity_) { + ++posted_; return true; } return false; @@ -97,16 +98,63 @@ public: void NotifyFinishOne() { std::lock_guard guard(mutex_); + DCHECK_CC(posted_ >= 0); + if (--posted_ == 0) { + cv_.notify_all(); + } + } + + bool WaitNextStepOrFinished() + { + std::unique_lock lock(mutex_); + if (terminated_) { + return false; + } + cv_.wait(lock); + if (terminated_) { + return false; + } + return true; + } + + bool TryStartStep() + { + std::lock_guard guard(mutex_); + if (terminated_) { + return false; + } + ++running_; + DCHECK_CC(running_ <= capacity_ + 1); + return true; + } + + void FinishStep() + { + std::lock_guard guard(mutex_); + DCHECK_CC(!terminated_); + DCHECK_CC(running_ > 0); if (--running_ == 0) { + terminated_ = true; cv_.notify_all(); } } + void WakeUpRunnerApproximately() + { + // This check may fail because is not inside lock, but for an approximate waking up it is ok + size_t current = reinterpret_cast *>(&running_)->load(std::memory_order_relaxed); + if (UNLIKELY_CC(current < posted_)) { + cv_.notify_one(); + } + } + NO_COPY_SEMANTIC_CC(TaskPackMonitor); NO_MOVE_SEMANTIC_CC(TaskPackMonitor); private: - int running_ {0}; - int maxRunning_ {0}; + size_t running_ {0}; + size_t posted_ {0}; + size_t capacity_ {0}; + bool terminated_ {false}; std::condition_variable cv_; std::mutex mutex_; }; diff --git a/common_components/taskpool/taskpool.h b/common_components/taskpool/taskpool.h index 0bf345144ae0e52235f28bd574828d2abb180f7d..95b1f0420a8fee9da858380baec182e46f32ab03 100644 --- a/common_components/taskpool/taskpool.h +++ b/common_components/taskpool/taskpool.h @@ -77,11 +77,6 @@ public: runner_->SetQosPriority(mode); } - void SetThreadRssPriority(RssPriorityType type) - { - runner_->SetRssPriority(type); - } - void ForEachTask(const std::function &f); private: diff --git a/common_components/taskpool/tests/BUILD.gn b/common_components/taskpool/tests/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..3c3a88f482efe1b5e3ab7b225a6934db3831d7f9 --- /dev/null +++ b/common_components/taskpool/tests/BUILD.gn @@ -0,0 +1,107 @@ +# Copyright (c) 2025 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//arkcompiler/ets_runtime/common_components/tests/test_helper.gni") + +module_output_path = "ets_runtime" + +host_unittest_action("Runner_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "runner_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +host_unittest_action("Taskpool_Task_Queue_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "task_queue_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +host_unittest_action("Taskpool_Test") { + module_out_path = module_output_path + + sources = [ + # test file + "taskpool_test.cpp", + ] + + configs = [ + "//arkcompiler/ets_runtime/common_components:common_components_test_config", + "//arkcompiler/ets_runtime:icu_path_test_config", + ] + + deps = [ "//arkcompiler/ets_runtime/common_components:libark_common_components_test" ] + + # hiviewdfx libraries + external_deps = [ + "icu:shared_icui18n", + "icu:shared_icuuc", + "zlib:libz", + ] +} + +group("unittest") { + testonly = true + + # deps file + deps = [ + ":Runner_Test", + ":Taskpool_Task_Queue_Test", + ":Taskpool_Test", + ] +} + +group("host_unittest") { + testonly = true + + # deps file + deps = [ + ":Runner_TestAction", + ":Taskpool_Task_Queue_TestAction", + ":Taskpool_TestAction", + ] +} \ No newline at end of file diff --git a/ecmascript/jit/rewriter/reloc_rewriter_aarch64.h b/common_components/taskpool/tests/runner_test.cpp similarity index 34% rename from ecmascript/jit/rewriter/reloc_rewriter_aarch64.h rename to common_components/taskpool/tests/runner_test.cpp index ab6081ddae998e586b48e05ebf24ef914621a01b..4e005bdccb77c35977e50caa47511aa4166e141a 100644 --- a/ecmascript/jit/rewriter/reloc_rewriter_aarch64.h +++ b/common_components/taskpool/tests/runner_test.cpp @@ -13,23 +13,53 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_COECMASCRIPT_COMPILER_ASSEMBLER_RELOC_REWRITER_AARCH64_RELOC_REWRITER_H -#define ECMASCRIPT_COECMASCRIPT_COMPILER_ASSEMBLER_RELOC_REWRITER_AARCH64_RELOC_REWRITER_H - -#include "ecmascript/compiler/assembler/assembler.h" -#include "ecmascript/ecma_vm.h" -#include "ecmascript/mem/native_area_allocator.h" -#include "ecmascript/jit/rewriter/reloc_rewriter.h" - -namespace panda::ecmascript::kungfu { - -class PUBLIC_API RelocWriterAArch64 : public RelocWriter { - static const int INSTRUCT_SIZE = 4; - static const int MAX_JUMP_SIZE = 128 * 1024 * 1024; - public: - bool PUBLIC_API RewriteRelocInfo(uint8_t *codeAddr, uint8_t *jitAllocAddr, RelocMap &relocInfo); - virtual ~RelocWriterAArch64() = default; +#include "common_components/tests/test_helper.h" +#include "common_components/taskpool/runner.h" + +#include +#include +#include +#include + +namespace common { +class RunnerTest : public common::test::BaseTestWithScope { +protected: + void SetUp() override {} + void TearDown() override {} + + static std::function CreateMockPrologueHook() + { + return [](native_handle_type handle) {}; + } + + static std::function CreateMockEpilogueHook() + { + return [](native_handle_type handle) {}; + } +}; + +class MockTask : public Task { +public: + explicit MockTask(int32_t id) + : Task(id), executed_(false) {} + + bool Run(uint32_t threadId) override + { + executed_ = true; + return true; + } + + bool IsExecuted() const { return executed_; } + +private: + std::atomic executed_; }; -} // namespace panda::ecmascript::kungfu -#endif // ECMASCRIPT_COECMASCRIPT_COMPILER_ASSEMBLER_RELOC_REWRITER_AARCH64_RELOC_REWRITER_H \ No newline at end of file +HWTEST_F_L0(RunnerTest, InitializeRunnerWithThreads) { + constexpr uint32_t threadNum = 4; + Runner runner(threadNum, RunnerTest::CreateMockPrologueHook(), RunnerTest::CreateMockEpilogueHook()); + EXPECT_EQ(runner.GetTotalThreadNum(), threadNum); + + runner.TerminateThread(); +} +} \ No newline at end of file diff --git a/common_components/taskpool/tests/task_queue_test.cpp b/common_components/taskpool/tests/task_queue_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e9d39728913d82279ced0637ae88eb430ceda26c --- /dev/null +++ b/common_components/taskpool/tests/task_queue_test.cpp @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/tests/test_helper.h" +#include "common_components/taskpool/task_queue.h" +#include "common_components/taskpool/task.h" + +#include +#include + +namespace common { + +class TaskQueueTest : public common::test::BaseTestWithScope { +protected: + void SetUp() override + { + queue_ = new TaskQueue(); + } + + void TearDown() override + { + delete queue_; + queue_ = nullptr; + } + + TaskQueue* queue_; +}; + +class MockTask : public Task { +public: + explicit MockTask(int id) : Task(id), executed_(false) {} + + bool Run(uint32_t threadId) override + { + executed_ = true; + return true; + } + + bool IsExecuted() const { return executed_; } + +private: + mutable bool executed_; +}; + + +HWTEST_F_L0(TaskQueueTest, PopTask_DelayedTaskExpired_CanBeExecuted) +{ + auto task = std::make_unique(2); + queue_->PostDelayedTask(std::move(task), 500); + + usleep(600 * 1000); + + auto poppedTask = queue_->PopTask(); + ASSERT_NE(poppedTask, nullptr); + + MockTask* mockTask = static_cast(poppedTask.get()); + EXPECT_FALSE(mockTask->IsExecuted()); + + (void)poppedTask->Run(0); + EXPECT_TRUE(mockTask->IsExecuted()); +} + +HWTEST_F_L0(TaskQueueTest, PopTask_MultipleDelayedTasks_ExecuteInOrder) +{ + auto task1 = std::make_unique(1); + auto task2 = std::make_unique(2); + auto task3 = std::make_unique(3); + + queue_->PostDelayedTask(std::move(task1), 800); + queue_->PostDelayedTask(std::move(task2), 500); + queue_->PostDelayedTask(std::move(task3), 1000); + + usleep(600 * 1000); + + auto poppedTask = queue_->PopTask(); + ASSERT_NE(poppedTask, nullptr); + EXPECT_EQ(poppedTask->GetId(), 2); + + poppedTask->Run(0); + MockTask* mockTask2 = static_cast(poppedTask.get()); + EXPECT_TRUE(mockTask2->IsExecuted()); + + usleep(300 * 1000); + + poppedTask = queue_->PopTask(); + ASSERT_NE(poppedTask, nullptr); + EXPECT_EQ(poppedTask->GetId(), 1); + + poppedTask->Run(0); + MockTask* mockTask1 = static_cast(poppedTask.get()); + EXPECT_TRUE(mockTask1->IsExecuted()); + + usleep(200 * 1000); + + poppedTask = queue_->PopTask(); + ASSERT_NE(poppedTask, nullptr); + EXPECT_EQ(poppedTask->GetId(), 3); + + poppedTask->Run(0); + MockTask* mockTask3 = static_cast(poppedTask.get()); + EXPECT_TRUE(mockTask3->IsExecuted()); +} +} \ No newline at end of file diff --git a/common_components/taskpool/tests/taskpool_test.cpp b/common_components/taskpool/tests/taskpool_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..11e0741a266a50345833a9e3027f231fc32b61cb --- /dev/null +++ b/common_components/taskpool/tests/taskpool_test.cpp @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/tests/test_helper.h" +#include "common_components/taskpool/taskpool.h" +#include "common_components/taskpool/task.h" + +#include +#include +#include + +namespace common { + +class MockTask : public Task { +public: + explicit MockTask(int32_t id) + : Task(id), executed_(false), terminated_(false) {} + + bool Run(uint32_t threadId) override + { + executed_ = true; + return true; + } + + bool IsExecuted() const { return executed_; } + + bool IsTerminate() const + { + return terminated_; + } + + void Terminate() + { + terminated_ = true; + } + +private: + std::atomic executed_; + std::atomic terminated_; +}; + +class TaskpoolTest : public common::test::BaseTestWithScope { +protected: + void SetUp() override {} + void TearDown() override {} + + class ScopedTaskpool { + public: + explicit ScopedTaskpool(int threadNum = DEFAULT_TASKPOOL_THREAD_NUM) + : isInitialized_(false) + { + isInitialized_ = true; + pool_.Initialize(threadNum); + } + + Taskpool* Get() + { + return &pool_; + } + + private: + Taskpool pool_; + bool isInitialized_; + }; +}; + +HWTEST_F_L0(TaskpoolTest, InitializeAndDestroy) { + TaskpoolTest::ScopedTaskpool pool(2); + EXPECT_NE(pool.Get(), nullptr); + EXPECT_GT(pool.Get()->GetTotalThreadNum(), 0U); +} + +HWTEST_F_L0(TaskpoolTest, TerminateTask) { + TaskpoolTest::ScopedTaskpool pool(2); + Taskpool* taskpool = pool.Get(); + ASSERT_NE(taskpool, nullptr); + + auto task1 = std::make_unique(1); + auto task2 = std::make_unique(2); + taskpool->PostTask(std::move(task1)); + taskpool->PostTask(std::move(task2)); + + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + + taskpool->TerminateTask(1, TaskType::ALL); + + bool isTerminated = false; + taskpool->ForEachTask([&isTerminated](Task* task) { + if (task->GetId() == 1 && task->IsTerminate()) { + isTerminated = true; + } + }); + + EXPECT_FALSE(isTerminated); +} +} \ No newline at end of file diff --git a/common_components/tests/ohos_test.xml b/common_components/tests/ohos_test.xml index 864e893b2a23f2daf2129c99fce1d82913b6e73b..86f31897cf04778cc523dd4c4ba9cff4c9ea3bbb 100644 --- a/common_components/tests/ohos_test.xml +++ b/common_components/tests/ohos_test.xml @@ -18,16 +18,46 @@