代码拉取完成,页面将自动刷新
Date: Fri, 14 Mar 2025 07:02:01 +0000
Subject: !1 add port jbolt feature * add port jbolt feature
---
make/hotspot/lib/JvmFeatures.gmk | 2 +
make/hotspot/lib/JvmFlags.gmk | 6 +
make/hotspot/lib/JvmMapfile.gmk | 6 +
src/hotspot/os/linux/os_linux.cpp | 40 +
src/hotspot/os/linux/os_linux.hpp | 30 +-
src/hotspot/share/ci/ciEnv.cpp | 41 +-
src/hotspot/share/code/codeBlob.hpp | 9 +-
src/hotspot/share/code/codeCache.cpp | 33 +
src/hotspot/share/code/codeCache.hpp | 16 +-
src/hotspot/share/code/nmethod.cpp | 19 +
src/hotspot/share/code/nmethod.hpp | 8 +
src/hotspot/share/compiler/compileBroker.cpp | 9 +
src/hotspot/share/compiler/compileBroker.hpp | 3 +
src/hotspot/share/compiler/compileTask.hpp | 12 +
src/hotspot/share/gc/g1/g1CollectedHeap.cpp | 8 +
src/hotspot/share/jbolt/jBoltCallGraph.cpp | 480 ++++++
src/hotspot/share/jbolt/jBoltCallGraph.hpp | 277 ++++
.../share/jbolt/jBoltControlThread.cpp | 220 +++
.../share/jbolt/jBoltControlThread.hpp | 69 +
src/hotspot/share/jbolt/jBoltDcmds.cpp | 221 +++
src/hotspot/share/jbolt/jBoltDcmds.hpp | 129 ++
src/hotspot/share/jbolt/jBoltManager.cpp | 1429 +++++++++++++++++
src/hotspot/share/jbolt/jBoltManager.hpp | 335 ++++
src/hotspot/share/jbolt/jBoltUtils.cpp | 38 +
src/hotspot/share/jbolt/jBoltUtils.hpp | 55 +
src/hotspot/share/jbolt/jBoltUtils.inline.hpp | 38 +
src/hotspot/share/jbolt/jbolt_globals.hpp | 62 +
src/hotspot/share/jfr/metadata/metadata.xml | 2 +
.../share/jfr/periodic/jfrPeriodic.cpp | 7 +
.../periodic/sampling/jfrThreadSampler.cpp | 13 +-
.../jfr/recorder/stacktrace/jfrStackTrace.cpp | 12 +-
.../jfr/recorder/stacktrace/jfrStackTrace.hpp | 29 +
.../stacktrace/jfrStackTraceRepository.cpp | 114 ++
.../stacktrace/jfrStackTraceRepository.hpp | 23 +
src/hotspot/share/logging/logTag.hpp | 1 +
src/hotspot/share/opto/doCall.cpp | 4 +-
src/hotspot/share/opto/parse1.cpp | 2 +-
src/hotspot/share/runtime/flags/allFlags.hpp | 15 +-
src/hotspot/share/runtime/java.cpp | 9 +
src/hotspot/share/runtime/threads.cpp | 19 +
src/hotspot/share/utilities/growableArray.hpp | 7 +
src/hotspot/share/utilities/macros.hpp | 15 +
.../cli/common/CodeCacheCLITestCase.java | 12 +-
.../cli/common/CodeCacheOptions.java | 62 +-
.../codecache/jbolt/JBoltDumpModeTest.java | 187 +++
.../codecache/jbolt/JBoltVMOptionsTest.java | 291 ++++
.../jtreg/compiler/codecache/jbolt/o1.log | 2 +
.../jtreg/compiler/codecache/jbolt/o2.log | 2 +
.../jtreg/compiler/codecache/jbolt/o3.log | 4 +
.../jtreg/compiler/codecache/jbolt/o4.log | 12 +
.../runtime/cds/appcds/ClassLoaderTest.java | 2 +-
test/lib/jdk/test/whitebox/code/BlobType.java | 24 +-
52 files changed, 4428 insertions(+), 37 deletions(-)
create mode 100644 src/hotspot/share/jbolt/jBoltCallGraph.cpp
create mode 100644 src/hotspot/share/jbolt/jBoltCallGraph.hpp
create mode 100644 src/hotspot/share/jbolt/jBoltControlThread.cpp
create mode 100644 src/hotspot/share/jbolt/jBoltControlThread.hpp
create mode 100644 src/hotspot/share/jbolt/jBoltDcmds.cpp
create mode 100644 src/hotspot/share/jbolt/jBoltDcmds.hpp
create mode 100644 src/hotspot/share/jbolt/jBoltManager.cpp
create mode 100644 src/hotspot/share/jbolt/jBoltManager.hpp
create mode 100644 src/hotspot/share/jbolt/jBoltUtils.cpp
create mode 100644 src/hotspot/share/jbolt/jBoltUtils.hpp
create mode 100644 src/hotspot/share/jbolt/jBoltUtils.inline.hpp
create mode 100644 src/hotspot/share/jbolt/jbolt_globals.hpp
create mode 100644 test/hotspot/jtreg/compiler/codecache/jbolt/JBoltDumpModeTest.java
create mode 100644 test/hotspot/jtreg/compiler/codecache/jbolt/JBoltVMOptionsTest.java
create mode 100644 test/hotspot/jtreg/compiler/codecache/jbolt/o1.log
create mode 100644 test/hotspot/jtreg/compiler/codecache/jbolt/o2.log
create mode 100644 test/hotspot/jtreg/compiler/codecache/jbolt/o3.log
create mode 100644 test/hotspot/jtreg/compiler/codecache/jbolt/o4.log
diff --git a/make/hotspot/lib/JvmFeatures.gmk b/make/hotspot/lib/JvmFeatures.gmk
index cbe60fde2..f57406dc1 100644
--- a/make/hotspot/lib/JvmFeatures.gmk
+++ b/make/hotspot/lib/JvmFeatures.gmk
@@ -160,6 +160,8 @@ endif
ifneq ($(call check-jvm-feature, jfr), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_JFR=0
JVM_EXCLUDE_PATTERNS += jfr
+ JVM_CFLAGS_FEATURES += -DINCLUDE_JBOLT=0
+ JVM_EXCLUDE_PATTERNS += jbolt
JVM_EXCLUDE_FILES += compilerEvent.cpp
endif
diff --git a/make/hotspot/lib/JvmFlags.gmk b/make/hotspot/lib/JvmFlags.gmk
index 1fadd5331..b77f1e3a6 100644
--- a/make/hotspot/lib/JvmFlags.gmk
+++ b/make/hotspot/lib/JvmFlags.gmk
@@ -41,6 +41,12 @@ JVM_SRC_DIRS += $(call uniq, $(wildcard $(foreach d, $(JVM_SRC_ROOTS), \
$(JVM_VARIANT_OUTPUTDIR)/gensrc
#
+JVM_ACC_PLUGIN_DIR := $(call FindSrcDirsForLib, java.base, jplugin)
+JVM_ACC_PLUGIN_SRC := $(JVM_ACC_PLUGIN_DIR)/feature
+ifeq ($(wildcard $(JVM_ACC_PLUGIN_SRC)), $(JVM_ACC_PLUGIN_SRC))
+ JVM_SRC_DIRS += $(JVM_ACC_PLUGIN_SRC)
+endif
+
JVM_CFLAGS_INCLUDES += \
$(patsubst %,-I%,$(JVM_SRC_DIRS)) \
-I$(TOPDIR)/src/hotspot/share/precompiled \
diff --git a/make/hotspot/lib/JvmMapfile.gmk b/make/hotspot/lib/JvmMapfile.gmk
index 2808ac2af..916aff4a3 100644
--- a/make/hotspot/lib/JvmMapfile.gmk
+++ b/make/hotspot/lib/JvmMapfile.gmk
@@ -48,6 +48,12 @@ ifneq ($(findstring debug, $(DEBUG_LEVEL)), )
endif
endif
+JVM_ACC_PLUGIN_DIR := $(call FindSrcDirsForLib, java.base, jplugin)
+JVM_ACC_PLUGIN_SYMBOLS_SRC := $(JVM_ACC_PLUGIN_DIR)/make/hotspot-symbols
+ifeq ($(wildcard $(JVM_ACC_PLUGIN_SYMBOLS_SRC)), $(JVM_ACC_PLUGIN_SYMBOLS_SRC))
+ SYMBOLS_SRC += $(JVM_ACC_PLUGIN_SYMBOLS_SRC)/symbols-plugin
+endif
+
################################################################################
# Create a dynamic list of symbols from the built object files. This is highly
# platform dependent.
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index ddfa97ecb..3cb529bc8 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -4582,6 +4582,46 @@ void os::Linux::numa_init() {
}
}
+#if INCLUDE_JBOLT
+os::Linux::jboltLog_precalc_t os::Linux::_jboltLog_precalc;
+os::Linux::jboltLog_do_t os::Linux::_jboltLog_do;
+os::Linux::jboltMerge_judge_t os::Linux::_jboltMerge_judge;
+#endif // INCLUDE_JBOLT
+
+void os::Linux::load_plugin_library() {
+
+#if INCLUDE_JBOLT
+ _jboltLog_precalc = CAST_TO_FN_PTR(jboltLog_precalc_t, dlsym(RTLD_DEFAULT, "JBoltLog_PreCalc"));
+ _jboltLog_do = CAST_TO_FN_PTR(jboltLog_do_t, dlsym(RTLD_DEFAULT, "JBoltLog_DO"));
+ _jboltMerge_judge = CAST_TO_FN_PTR(jboltMerge_judge_t, dlsym(RTLD_DEFAULT, "JBoltMerge_Judge"));
+#endif // INCLUDE_JBOLT
+
+ char path[JVM_MAXPATHLEN];
+ char ebuf[1024];
+ void* handle = NULL;
+ if (os::dll_locate_lib(path, sizeof(path), Arguments::get_dll_dir(), "jvm21_Acc") ||
+ os::dll_locate_lib(path, sizeof(path), "/usr/lib64", "jvm21_Acc")) {
+ handle = dlopen(path, RTLD_LAZY);
+ }
+ if (handle != NULL) {
+#if INCLUDE_JBOLT
+ if (_jboltLog_precalc == NULL) {
+ _jboltLog_precalc = CAST_TO_FN_PTR(jboltLog_precalc_t, dlsym(handle, "JBoltLog_PreCalc"));
+ }
+ if (_jboltLog_do == NULL) {
+ _jboltLog_do = CAST_TO_FN_PTR(jboltLog_do_t, dlsym(handle, "JBoltLog_DO"));
+ }
+ if (_jboltMerge_judge == NULL) {
+ _jboltMerge_judge = CAST_TO_FN_PTR(jboltMerge_judge_t, dlsym(handle, "JBoltMerge_Judge"));
+ }
+#endif // INCLUDE_JBOLT
+ }
+
+ JBOLT_ONLY(log_debug(jbolt)("Plugin library for JBolt: %s %s %s", BOOL_TO_STR(_jboltLog_precalc != nullptr),
+ BOOL_TO_STR(_jboltLog_do != nullptr),
+ BOOL_TO_STR(_jboltMerge_judge != nullptr));)
+}
+
#if defined(IA32) && !defined(ZERO)
/*
* Work-around (execute code at a high address) for broken NX emulation using CS limit,
diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp
index 029f2aa7a..4192c90bd 100644
--- a/src/hotspot/os/linux/os_linux.hpp
+++ b/src/hotspot/os/linux/os_linux.hpp
@@ -139,6 +139,7 @@ class os::Linux {
static const char *libc_version() { return _libc_version; }
static const char *libpthread_version() { return _libpthread_version; }
+ static void load_plugin_library();
static void libpthread_init();
static void sched_getcpu_init();
static bool libnuma_init();
@@ -214,7 +215,14 @@ class os::Linux {
typedef void (*numa_set_bind_policy_func_t)(int policy);
typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n);
typedef int (*numa_distance_func_t)(int node1, int node2);
-
+#if INCLUDE_JBOLT
+ typedef void (*jboltLog_precalc_t)(unsigned int topFrameIndex, unsigned int &max_frames);
+ typedef bool (*jboltLog_do_t)(uintptr_t related_data[], address stacktrace, unsigned int i, int comp_level, address new_func, address *tempfunc);
+ typedef int (*jboltMerge_judge_t)(uintptr_t data_layout[], int candidate, address clusters, address merged, address cluster);
+ static jboltLog_precalc_t _jboltLog_precalc;
+ static jboltLog_do_t _jboltLog_do;
+ static jboltMerge_judge_t _jboltMerge_judge;
+#endif
static sched_getcpu_func_t _sched_getcpu;
static numa_node_to_cpus_func_t _numa_node_to_cpus;
static numa_node_to_cpus_v2_func_t _numa_node_to_cpus_v2;
@@ -431,6 +439,26 @@ class os::Linux {
// otherwise does nothing and returns -2.
static int malloc_info(FILE* stream);
#endif // GLIBC
+
+#if INCLUDE_JBOLT
+ static void jboltLog_precalc(unsigned int topFrameIndex, unsigned int &max_frames) {
+ if (_jboltLog_precalc != nullptr) {
+ _jboltLog_precalc(topFrameIndex, max_frames);
+ }
+ }
+ static bool jboltLog_do(uintptr_t related_data[], address stacktrace, unsigned int i, int comp_level, address new_func, address *tempfunc) {
+ if (_jboltLog_do != nullptr) {
+ return _jboltLog_do(related_data, stacktrace, i, comp_level, new_func, tempfunc);
+ }
+ return false;
+ }
+ static int jboltMerge_judge(uintptr_t data_layout[], int candidate, address clusters, address merged, address cluster) {
+ if (_jboltMerge_judge != nullptr) {
+ return _jboltMerge_judge(data_layout, candidate, clusters, merged, cluster);
+ }
+ return -1;
+ }
+#endif // INCLUDE_JBOLT
};
#endif // OS_LINUX_OS_LINUX_HPP
diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp
index 6112b46ac..0c48b6153 100644
--- a/src/hotspot/share/ci/ciEnv.cpp
+++ b/src/hotspot/share/ci/ciEnv.cpp
@@ -83,6 +83,9 @@
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
+#if INCLUDE_JBOLT
+#include "jbolt/jBoltManager.hpp"
+#endif
// ciEnv
//
@@ -1117,15 +1120,35 @@ void ciEnv::register_method(ciMethod* target,
assert(offsets->value(CodeOffsets::Deopt) != -1, "must have deopt entry");
assert(offsets->value(CodeOffsets::Exceptions) != -1, "must have exception entry");
- nm = nmethod::new_nmethod(method,
- compile_id(),
- entry_bci,
- offsets,
- orig_pc_offset,
- debug_info(), dependencies(), code_buffer,
- frame_words, oop_map_set,
- handler_table, inc_table,
- compiler, CompLevel(task()->comp_level()));
+#if INCLUDE_JBOLT
+ if (UseJBolt && JBoltManager::reorder_phase_is_collecting_or_reordering()) {
+ CodeBlobType code_blob_type = JBoltManager::calc_code_blob_type(method(), task(), THREAD);
+ nm = nmethod::new_nmethod(method,
+ compile_id(),
+ entry_bci,
+ offsets,
+ orig_pc_offset,
+ debug_info(), dependencies(), code_buffer,
+ frame_words, oop_map_set,
+ handler_table, inc_table,
+ compiler, CompLevel(task()->comp_level()),
+#if INCLUDE_JVMCI
+ nullptr, 0, nullptr,
+#endif
+ code_blob_type);
+ } else
+#endif // INCLUDE_JBOLT
+ {
+ nm = nmethod::new_nmethod(method,
+ compile_id(),
+ entry_bci,
+ offsets,
+ orig_pc_offset,
+ debug_info(), dependencies(), code_buffer,
+ frame_words, oop_map_set,
+ handler_table, inc_table,
+ compiler, CompLevel(task()->comp_level()));
+ }
// Free codeBlobs
code_buffer->free_blob();
diff --git a/src/hotspot/share/code/codeBlob.hpp b/src/hotspot/share/code/codeBlob.hpp
index e842bd88a..8392499d3 100644
--- a/src/hotspot/share/code/codeBlob.hpp
+++ b/src/hotspot/share/code/codeBlob.hpp
@@ -44,9 +44,12 @@ class OopMapSet;
enum class CodeBlobType {
MethodNonProfiled = 0, // Execution level 1 and 4 (non-profiled) nmethods (including native nmethods)
MethodProfiled = 1, // Execution level 2 and 3 (profiled) nmethods
- NonNMethod = 2, // Non-nmethods like Buffers, Adapters and Runtime Stubs
- All = 3, // All types (No code cache segmentation)
- NumTypes = 4 // Number of CodeBlobTypes
+ MethodJBoltHot = 2, // Hot methods (determined by JBolt) of level 1 and 4 nmethods
+ MethodJBoltTmp = 3, // Temporary storage of JBolt hot methods
+ NonNMethod = 4, // Non-nmethods like Buffers, Adapters and Runtime Stubs
+ All = 5, // All types (No code cache segmentation)
+ AOT = 6, // AOT methods
+ NumTypes = 7 // Number of CodeBlobTypes
};
// CodeBlob - superclass for all entries in the CodeCache.
diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp
index 9b0bdc364..f63e4f266 100644
--- a/src/hotspot/share/code/codeCache.cpp
+++ b/src/hotspot/share/code/codeCache.cpp
@@ -77,6 +77,9 @@
#include "opto/compile.hpp"
#include "opto/node.hpp"
#endif
+#if INCLUDE_JBOLT
+#include "jbolt/jBoltManager.hpp"
+#endif // INCLUDE_JBOLT
// Helper class for printing in CodeCache
class CodeBlob_sizes {
@@ -329,6 +332,16 @@ void CodeCache::initialize_heaps() {
profiled_size = align_down(profiled_size, alignment);
non_profiled_size = align_down(non_profiled_size, alignment);
+#if INCLUDE_JBOLT
+ if (UseJBolt && !JBoltDumpMode) {
+ // We replace the original add-heap logic with the JBolt one. manual dump mode doesn't need that
+ JBoltManager::init_code_heaps(non_nmethod_size, profiled_size, non_profiled_size, cache_size, ps, alignment);
+ return;
+ }
+ // The following add-heap logic will not be executed if JBolt load mode is on.
+ // If the following logic is modified, remember to modify the JBolt logic accordingly.
+#endif // INCLUDE_JBOLT
+
// Reserve one continuous chunk of memory for CodeHeaps and split it into
// parts for the individual heaps. The memory layout looks like this:
// ---------- high -----------
@@ -384,6 +397,12 @@ ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
// Heaps available for allocation
bool CodeCache::heap_available(CodeBlobType code_blob_type) {
+ if (code_blob_type == CodeBlobType::MethodJBoltHot) {
+ return JBOLT_ONLY(UseJBolt && !JBoltDumpMode) NOT_JBOLT(false);
+ } else if (code_blob_type == CodeBlobType::MethodJBoltTmp) {
+ return JBOLT_ONLY(UseJBolt && !JBoltDumpMode) NOT_JBOLT(false);
+ }
+
if (!SegmentedCodeCache) {
// No segmentation: use a single code heap
return (code_blob_type == CodeBlobType::All);
@@ -411,6 +430,12 @@ const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) {
case CodeBlobType::MethodProfiled:
return "ProfiledCodeHeapSize";
break;
+ case CodeBlobType::MethodJBoltHot:
+ return "JBoltHotCodeHeapSize";
+ break;
+ case CodeBlobType::MethodJBoltTmp:
+ return "JBoltTmpCodeHeapSize";
+ break;
default:
ShouldNotReachHere();
return nullptr;
@@ -558,6 +583,14 @@ CodeBlob* CodeCache::allocate(int size, CodeBlobType code_blob_type, bool handle
type = CodeBlobType::MethodNonProfiled;
}
break;
+#if INCLUDE_JBOLT
+ case CodeBlobType::MethodJBoltHot:
+ type = CodeBlobType::MethodNonProfiled;
+ break;
+ case CodeBlobType::MethodJBoltTmp:
+ type = CodeBlobType::MethodNonProfiled;
+ break;
+#endif // INCLUDE_JBOLT
default:
break;
}
diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp
index fbcaefd2a..df38664e7 100644
--- a/src/hotspot/share/code/codeCache.hpp
+++ b/src/hotspot/share/code/codeCache.hpp
@@ -48,6 +48,10 @@
// executed at level 2 or 3
// - Non-Profiled nmethods: nmethods that are not profiled, i.e., those
// executed at level 1 or 4 and native methods
+// - JBolt nmethods: sorted non-profiled nmethods that are judged to be hot
+// by JBolt
+// - JBolt tmp nmethods: non-profiled nmethods that are judged to be hot by
+// JBolt but not sorted yet
// - All: Used for code of all types if code cache segmentation is disabled.
//
// In the rare case of the non-nmethod code heap getting full, non-nmethod code
@@ -87,6 +91,10 @@ class CodeCache : AllStatic {
friend class WhiteBox;
friend class CodeCacheLoader;
friend class ShenandoahParallelCodeHeapIterator;
+#if INCLUDE_JBOLT
+ friend class JBoltManager;
+#endif // INCLUDE_JBOLT
+
private:
// CodeHeaps of the cache
static GrowableArray<CodeHeap*>* _heaps;
@@ -266,12 +274,16 @@ class CodeCache : AllStatic {
}
static bool code_blob_type_accepts_compiled(CodeBlobType code_blob_type) {
- bool result = code_blob_type == CodeBlobType::All || code_blob_type <= CodeBlobType::MethodProfiled;
+ // Modified `type <= CodeBlobType::MethodProfiled` to `type < CodeBlobType::NonNMethod`
+ // after adding the JBolt heap. The two logics are still equivalent even without JBolt.
+ bool result = code_blob_type == CodeBlobType::All || code_blob_type < CodeBlobType::NonNMethod;
return result;
}
static bool code_blob_type_accepts_nmethod(CodeBlobType type) {
- return type == CodeBlobType::All || type <= CodeBlobType::MethodProfiled;
+ // Modified `type <= CodeBlobType::MethodProfiled` to `type < CodeBlobType::NonNMethod`
+ // after adding the JBolt heap. The two logics are still equivalent even without JBolt.
+ return type == CodeBlobType::All || type < CodeBlobType::NonNMethod;
}
static bool code_blob_type_accepts_allocable(CodeBlobType type) {
diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp
index 500ddbc2f..24b5139cf 100644
--- a/src/hotspot/share/code/nmethod.cpp
+++ b/src/hotspot/share/code/nmethod.cpp
@@ -85,6 +85,9 @@
#if INCLUDE_JVMCI
#include "jvmci/jvmciRuntime.hpp"
#endif
+#if INCLUDE_JBOLT
+#include "jbolt/jBoltManager.hpp"
+#endif
#ifdef DTRACE_ENABLED
@@ -554,6 +557,9 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
int speculations_len,
JVMCINMethodData* jvmci_data
#endif
+#if INCLUDE_JBOLT
+ , CodeBlobType code_blob_type // for jbolt
+#endif // INCLUDE_JBOLT
)
{
assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
@@ -577,7 +583,11 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+#if INCLUDE_JBOLT
+ nm = new (nmethod_size, comp_level, code_blob_type)
+#else // INCLUDE_JBOLT
nm = new (nmethod_size, comp_level)
+#endif // INCLUDE_JBOLT
nmethod(method(), compiler->type(), nmethod_size, compile_id, entry_bci, offsets,
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
oop_maps,
@@ -761,6 +771,15 @@ void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod
return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod);
}
+#if INCLUDE_JBOLT
+void* nmethod::operator new(size_t size, int nmethod_size, int comp_level, CodeBlobType code_blob_type) throw () {
+ if (code_blob_type < CodeBlobType::All) {
+ return CodeCache::allocate(nmethod_size, code_blob_type);
+ }
+ return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
+}
+#endif // INCLUDE_JBOLT
+
nmethod::nmethod(
Method* method,
CompilerType type,
diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp
index 03b8210c3..ddb8a1274 100644
--- a/src/hotspot/share/code/nmethod.hpp
+++ b/src/hotspot/share/code/nmethod.hpp
@@ -307,6 +307,11 @@ class nmethod : public CompiledMethod {
// findable by nmethod iterators! In particular, they must not contain oops!
void* operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
+#if INCLUDE_JBOLT
+ // For JBolt. So the code can be allocated in code segments defined by JBolt.
+ void* operator new(size_t size, int nmethod_size, int comp_level, CodeBlobType code_blob_type) throw ();
+#endif // INCLUDE_JBOLT
+
const char* reloc_string_for(u_char* begin, u_char* end);
bool try_transition(int new_state);
@@ -349,6 +354,9 @@ class nmethod : public CompiledMethod {
int speculations_len = 0,
JVMCINMethodData* jvmci_data = nullptr
#endif
+#if INCLUDE_JBOLT
+ , CodeBlobType code_blob_type = CodeBlobType::All // for jbolt
+#endif // INCLUDE_JBOLT
);
// Only used for unit tests.
diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp
index 9c1bf5393..3a59a17eb 100644
--- a/src/hotspot/share/compiler/compileBroker.cpp
+++ b/src/hotspot/share/compiler/compileBroker.cpp
@@ -83,6 +83,9 @@
#include "jvmci/jvmciEnv.hpp"
#include "jvmci/jvmciRuntime.hpp"
#endif
+#if INCLUDE_JBOLT
+#include "jbolt/jBoltManager.hpp"
+#endif // INCLUDE_JBOLT
#ifdef DTRACE_ENABLED
@@ -1959,6 +1962,12 @@ void CompileBroker::compiler_thread_loop() {
task->set_failure_reason("breakpoints are present");
}
+#if INCLUDE_JBOLT
+ if (UseJBolt && JBoltLoadMode) {
+ JBoltManager::check_start_reordering(thread);
+ }
+#endif // INCLUDE_JBOLT
+
if (UseDynamicNumberOfCompilerThreads) {
possibly_add_compiler_threads(thread);
assert(!thread->has_pending_exception(), "should have been handled");
diff --git a/src/hotspot/share/compiler/compileBroker.hpp b/src/hotspot/share/compiler/compileBroker.hpp
index b7f09259f..a2143ad93 100644
--- a/src/hotspot/share/compiler/compileBroker.hpp
+++ b/src/hotspot/share/compiler/compileBroker.hpp
@@ -140,6 +140,9 @@ public:
class CompileBroker: AllStatic {
friend class Threads;
friend class CompileTaskWrapper;
+#if INCLUDE_JBOLT
+ friend class JBoltManager;
+#endif // INCLUDE_JBOLT
public:
enum {
diff --git a/src/hotspot/share/compiler/compileTask.hpp b/src/hotspot/share/compiler/compileTask.hpp
index f5b628232..aff2df38a 100644
--- a/src/hotspot/share/compiler/compileTask.hpp
+++ b/src/hotspot/share/compiler/compileTask.hpp
@@ -56,6 +56,9 @@ class CompileTask : public CHeapObj<mtCompiler> {
Reason_Whitebox, // Whitebox API
Reason_MustBeCompiled, // Used for -Xcomp or AlwaysCompileLoopMethods (see CompilationPolicy::must_be_compiled())
Reason_Bootstrap, // JVMCI bootstrap
+#if INCLUDE_JBOLT
+ Reason_Reorder, // JBolt reorder
+#endif
Reason_Count
};
@@ -69,6 +72,9 @@ class CompileTask : public CHeapObj<mtCompiler> {
"whitebox",
"must_be_compiled",
"bootstrap"
+#if INCLUDE_JBOLT
+ , "reorder"
+#endif
};
return reason_names[compile_reason];
}
@@ -230,6 +236,12 @@ public:
print_inlining_inner(tty, method, inline_level, bci, msg);
}
static void print_inlining_ul(ciMethod* method, int inline_level, int bci, const char* msg = nullptr);
+
+#if INCLUDE_JBOLT
+ CompileReason compile_reason() { return _compile_reason; }
+ int hot_count() { return _hot_count; }
+ const char* failure_reason() { return _failure_reason; }
+#endif // INCLUDE_JBOLT
};
#endif // SHARE_COMPILER_COMPILETASK_HPP
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index 9b6b22a29..67667a08c 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -116,6 +116,9 @@
#include "utilities/bitMap.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/stack.inline.hpp"
+#if INCLUDE_JBOLT
+#include "jbolt/jBoltManager.hpp"
+#endif // INCLUDE_JBOLT
size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
@@ -1645,6 +1648,11 @@ size_t G1CollectedHeap::recalculate_used() const {
}
bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
+#if INCLUDE_JBOLT
+ if (UseJBolt && cause == GCCause::_java_lang_system_gc && JBoltManager::gc_should_sweep_code_heaps_now()) {
+ return true;
+ }
+#endif // INCLUDE_JBOLT
return GCCause::is_user_requested_gc(cause) && ExplicitGCInvokesConcurrent;
}
diff --git a/src/hotspot/share/jbolt/jBoltCallGraph.cpp b/src/hotspot/share/jbolt/jBoltCallGraph.cpp
new file mode 100644
index 000000000..c171215ad
--- /dev/null
+++ b/src/hotspot/share/jbolt/jBoltCallGraph.cpp
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "jbolt/jBoltCallGraph.hpp"
+#include "jfr/utilities/jfrAllocation.hpp"
+#include "jfr/support/jfrMethodLookup.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
+#include "oops/method.inline.hpp"
+#include "runtime/os.hpp"
+#include "utilities/defaultStream.hpp"
+#ifdef LINUX
+#include "os_linux.hpp"
+#endif
+
+#define PAGE_SIZE os::vm_page_size()
+
+static GrowableArray<JBoltCluster>* _clusters = NULL;
+static GrowableArray<JBoltCall>* _calls = NULL;
+static GrowableArray<JBoltFunc>* _funcs = NULL;
+
+// (JBolt hfsort optional)sort final clusters by density
+static const bool _jbolt_density_sort = false;
+// (JBolt hfsort optional)freeze merging while exceeding pagesize
+static const bool _jbolt_merge_frozen = false;
+
+void JBoltCallGraph::initialize() {
+ ::_clusters = JBoltCallGraph::callgraph_instance().callgraph_clusters();
+ ::_calls = JBoltCallGraph::callgraph_instance().callgraph_calls();
+ ::_funcs = JBoltCallGraph::callgraph_instance().callgraph_funcs();
+}
+
+void JBoltCallGraph::deinitialize() {
+ ::_clusters = NULL;
+ ::_calls = NULL;
+ ::_funcs = NULL;
+}
+
+int JBoltCallGraph::clear_instance() {
+ delete _clusters;
+ delete _calls;
+ delete _funcs;
+
+ // Reinit default cluster start id
+ _init_cluster_id = 0;
+
+ // Re-allocate
+ _clusters = create_growable_array<JBoltCluster>();
+ _calls = create_growable_array<JBoltCall>();
+ _funcs = create_growable_array<JBoltFunc>();
+
+ // Re-initialize
+ initialize();
+
+ return 0;
+}
+
+static GrowableArray<JBoltCluster>* clusters_copy() {
+ GrowableArray<JBoltCluster>* copy = create_growable_array<JBoltCluster>(_clusters->length());
+ copy->appendAll(_clusters);
+ return copy;
+}
+
+static GrowableArray<JBoltFunc>* funcs_copy() {
+ GrowableArray<JBoltFunc>* copy = create_growable_array<JBoltFunc>(_funcs->length());
+ copy->appendAll(_funcs);
+ return copy;
+}
+
+static int find_func_index(const JBoltFunc* func) {
+ for (int i = 0; i < _funcs->length(); ++i) {
+ JBoltFunc& existing = _funcs->at(i);
+ if (existing == (*func)) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+// Searching for a cluster with corresponding func or creating a new one if doesn't exist
+static JBoltCluster* find_cluster(JBoltFunc* func) {
+ for (int i = 0; i < _clusters->length(); ++i) {
+ JBoltCluster& cluster = _clusters->at(i);
+ int index = cluster.func_indexes()->at(0);
+ if (_funcs->at(index) == (*func)) {
+ return &cluster;
+ }
+ }
+ _funcs->append(*func);
+ _clusters->append(JBoltCluster(*func));
+ JBoltCluster& cluster = _clusters->at(_clusters->length() - 1);
+ _funcs->at(_funcs->length() - 1).set_cluster_id(cluster.id());
+ return &cluster;
+}
+
+// Creating a new call in graph or updating the weight if exists
+static void add_call_to_calls(GrowableArray<JBoltCall>* calls, const JBoltCall* call) {
+ for (int i = 0; i < calls->length(); ++i) {
+ JBoltCall& existing_call = calls->at(i);
+ if (existing_call == *call) {
+ if (existing_call.stacktrace_id() == call->stacktrace_id()) {
+ assert(call->call_count() >= existing_call.call_count(), "invariant");
+ existing_call.callee().add_heat(call->call_count() - existing_call.call_count());
+ existing_call.set_call_count(call->call_count());
+ }
+ else {
+ existing_call.callee().add_heat(call->call_count());
+ existing_call.set_call_count(existing_call.call_count() + call->call_count());
+ }
+ return;
+ }
+ }
+
+ calls->append(*call);
+ call->callee().add_heat(call->call_count());
+ call->callee().append_call_index(calls->length() - 1);
+}
+
+// Getting final funcs order from an array of processed clusters
+static GrowableArray<JBoltFunc>* clusters_to_funcs_order(GrowableArray<JBoltCluster>* clusters) {
+ log_debug(jbolt)( "sorted clusters:\n");
+ for (int i = 0; i < clusters->length(); ++i) {
+ log_debug(jbolt)( "cluster id: %d heats: %ld size: %dB density: %f\n", clusters->at(i).id(), clusters->at(i).heats(), clusters->at(i).size(), clusters->at(i).density());
+ for (int j = 0; j < clusters->at(i).get_funcs_count(); ++j) {
+ JBoltFunc& func = _funcs->at(clusters->at(i).func_indexes()->at(j));
+ const Method* const method = JfrMethodLookup::lookup(func.klass(), func.method_id());
+ if (method != NULL) {
+ log_debug(jbolt)( "%d: method signature:%s heat: %ld size: %dB\n",
+ j, method->external_name(), func.heat(), func.size());
+ }
+ }
+ }
+
+ GrowableArray<JBoltFunc>* order = create_growable_array<JBoltFunc>(_funcs->length());
+ // used to seperator distinct cluster, klass = NULL
+ JBoltFunc seperator_func;
+ order->append(seperator_func);
+ for (int i = 0; i < clusters->length(); ++i) {
+ JBoltCluster& cluster = clusters->at(i);
+ GrowableArray<int>* func_indexes = cluster.func_indexes();
+
+ for (int j = 0; j < func_indexes->length(); ++j) {
+ int index = func_indexes->at(j);
+ order->append(_funcs->at(index));
+ }
+
+ order->append(seperator_func);
+ }
+ return order;
+}
+
+// Comparing function needed to sort an array of funcs by their weights (in decreasing order)
+static int func_comparator(JBoltFunc* func1, JBoltFunc* func2) {
+ return func1->heat() < func2->heat();
+}
+
+// Comparing cluster needed to sort an array of clusters by their densities (in decreasing order)
+static int cluster_comparator(JBoltCluster* cluster1, JBoltCluster* cluster2) {
+ return _jbolt_density_sort ? (cluster1->density() < cluster2->density()) : (cluster1->heats() < cluster2 -> heats());
+}
+
+// Comparing call indexes needed to sort an array of call indexes by their call counts (in decreasing order)
+static int func_call_indexes_comparator(int* index1, int* index2) {
+ return _calls->at(*index1).call_count() < _calls->at(*index2).call_count();
+}
+
+JBoltCallGraph& JBoltCallGraph::callgraph_instance() {
+ static JBoltCallGraph _call_graph;
+ return _call_graph;
+}
+
+void JBoltCallGraph::add_func(JBoltFunc* func) {
+ if (!(UseJBolt && JBoltManager::reorder_phase_is_profiling_or_waiting())) return;
+ JBoltCluster* cluster = find_cluster(func);
+ assert(cluster != NULL, "invariant");
+}
+
+void JBoltCallGraph::add_call(JBoltCall* call) {
+ if (!(UseJBolt && JBoltManager::reorder_phase_is_profiling_or_waiting())) return;
+ add_call_to_calls(_calls, call);
+}
+
+uintptr_t data_layout_jbolt[] = {
+ (uintptr_t)in_bytes(JBoltCluster::id_offset()),
+ (uintptr_t)in_bytes(JBoltCluster::heats_offset()),
+ (uintptr_t)in_bytes(JBoltCluster::frozen_offset()),
+ (uintptr_t)in_bytes(JBoltCluster::size_offset()),
+ (uintptr_t)in_bytes(JBoltCluster::density_offset()),
+ (uintptr_t)in_bytes(JBoltCluster::func_indexes_offset()),
+
+ (uintptr_t)in_bytes(GrowableArrayView<address>::data_offset()),
+
+ (uintptr_t)JBoltCluster::find_cluster_by_id,
+ (uintptr_t)_jbolt_merge_frozen
+};
+
+static void deal_with_each_func(GrowableArray<JBoltCluster>* clusters, GrowableArray<JBoltFunc>* funcs, GrowableArray<int>* merged) {
+ for (int i = 0; i < funcs->length(); ++i) {
+ JBoltFunc& func = funcs->at(i);
+
+ JBoltCluster* cluster = JBoltCluster::find_cluster_by_id(clusters, func.cluster_id());
+
+ // for cluster size larger than page size, should be frozen and don't merge with any cluster
+ if (_jbolt_merge_frozen && cluster->frozen()) continue;
+
+ // find best predecessor
+ func.call_indexes()->sort(&func_call_indexes_comparator);
+
+ int bestPred = -1;
+
+ for (int j = 0; j < func.call_indexes()->length(); ++j) {
+ const JBoltCall& call = _calls->at(func.call_indexes()->at(j));
+
+ bestPred = os::Linux::jboltMerge_judge(data_layout_jbolt, call.caller().cluster_id(), (address)clusters, (address)merged, (address)cluster);
+
+ if (bestPred == -1) continue;
+
+ break;
+ }
+
+ // not merge -- no suitable caller nodes
+ if (bestPred == -1) {
+ continue;
+ }
+
+ JBoltCluster* predCluster = JBoltCluster::find_cluster_by_id(clusters, bestPred);
+
+ // merge callee cluster to caller cluster
+ for (int j = 0; j < cluster->func_indexes()->length(); ++j) {
+ int index = cluster->func_indexes()->at(j);
+ predCluster->append_func_index(index);
+ }
+ predCluster->add_heat(cluster->heats());
+ predCluster->add_size(cluster->size());
+ predCluster->update_density();
+ merged->at(cluster->id()) = bestPred;
+ cluster->clear();
+ }
+}
+
+// Every node is a cluster with funcs
+// Initially each cluster has only one func inside
+GrowableArray<JBoltFunc>* JBoltCallGraph::hfsort() {
+ if (!(UseJBolt && (JBoltDumpMode || JBoltManager::auto_mode()))) return NULL;
+ log_debug(jbolt)( "hfsort begin...\n");
+ // Copies are needed for saving initial graph in memory
+ GrowableArray<JBoltCluster>* clusters = clusters_copy();
+ GrowableArray<JBoltFunc>* funcs = funcs_copy();
+
+ // store a map for finding head of merge chain
+ GrowableArray<int>* merged = create_growable_array<int>(clusters->length());
+ for (int i = 0; i < clusters->length(); ++i) {
+ merged->append(-1);
+ }
+
+ // sorted by func(initially a node) weight(now just as 'heat')
+ funcs->sort(&func_comparator);
+
+ // Process each function, and consider merging its cluster with the
+ // one containing its most likely predecessor.
+ deal_with_each_func(clusters, funcs, merged);
+
+ // the set of clusters that are left
+ GrowableArray<JBoltCluster>* sortedClusters = create_growable_array<JBoltCluster>();
+ for (int i = 0; i < clusters->length(); ++i) {
+ if (clusters->at(i).id() != -1) {
+ sortedClusters->append(clusters->at(i));
+ }
+ }
+
+ sortedClusters->sort(&cluster_comparator);
+
+ GrowableArray<JBoltFunc>* order = clusters_to_funcs_order(sortedClusters);
+
+ delete clusters;
+ delete funcs;
+ delete merged;
+ delete sortedClusters;
+ log_debug(jbolt)( "hfsort over...\n");
+
+ return order;
+}
+
+JBoltFunc::JBoltFunc() :
+ _klass(NULL),
+ _method_id(0),
+ _heat(0),
+ _size(0),
+ _cluster_id(-1),
+ _method_key(),
+ _call_indexes(create_growable_array<int>()) {}
+
+JBoltFunc::JBoltFunc(const JBoltFunc& func) :
+ _klass(func._klass),
+ _method_id(func._method_id),
+ _heat(func._heat),
+ _size(func._size),
+ _cluster_id(func._cluster_id),
+ _method_key(func._method_key),
+ _call_indexes(create_growable_array<int>(func.get_calls_count())) {
+ GrowableArray<int>* array = func.call_indexes();
+ _call_indexes->appendAll(array);
+ }
+
+JBoltFunc::JBoltFunc(const InstanceKlass* klass, traceid method_id, int size, JBoltMethodKey method_key) :
+ _klass(klass),
+ _method_id(method_id),
+ _heat(0),
+ _size(size),
+ _cluster_id(-1),
+ _method_key(method_key),
+ _call_indexes(create_growable_array<int>()) {
+ // not new_symbol, need to inc reference cnt
+ _method_key.klass()->increment_refcount();
+ _method_key.name()->increment_refcount();
+ _method_key.sig()->increment_refcount();
+ }
+
+void JBoltFunc::add_heat(int64_t heat) {
+ _heat += heat;
+ assert(_cluster_id != -1, "invariant");
+ _clusters->at(_cluster_id).add_heat(heat);
+ _clusters->at(_cluster_id).update_density();
+}
+
+void JBoltFunc::set_heat(int64_t heat) {
+ int64_t diff = heat - _heat;
+ _heat = heat;
+ assert(_cluster_id != -1, "invariant");
+ _clusters->at(_cluster_id).add_heat(diff);
+ _clusters->at(_cluster_id).update_density();
+}
+
+void JBoltFunc::set_cluster_id(int cluster_id) { _cluster_id = cluster_id; }
+
+void JBoltFunc::append_call_index(int index) { _call_indexes->append(index); }
+
+JBoltFunc* JBoltFunc::constructor(const InstanceKlass* klass, traceid method_id, int size, JBoltMethodKey method_key) {
+ JBoltFunc *ret = new JBoltFunc(klass, method_id, size, method_key);
+ return ret;
+}
+
+JBoltFunc* JBoltFunc::copy_constructor(const JBoltFunc* func) {
+ JBoltFunc *ret = new JBoltFunc(*func);
+ return ret;
+}
+
+JBoltCluster::JBoltCluster() :
+ _id(-1),
+ _heats(0),
+ _frozen(false),
+ _size(0),
+ _density(0.0),
+ _func_indexes(create_growable_array<int>()) {}
+
+JBoltCluster::JBoltCluster(const JBoltFunc& func) :
+ _id(_init_cluster_id++),
+ _heats(func.heat()),
+ _frozen(false),
+ _size(func.size()),
+ _density(0.0),
+ _func_indexes(create_growable_array<int>()) {
+ if (_size >= (int) PAGE_SIZE)
+ freeze();
+
+ update_density();
+
+ int func_idx = find_func_index(&func);
+ assert(func_idx != -1, "invariant");
+ _func_indexes->append(func_idx);
+ }
+
+JBoltCluster::JBoltCluster(const JBoltCluster& cluster) :
+ _id(cluster.id()),
+ _heats(cluster.heats()),
+ _frozen(cluster.frozen()),
+ _size(cluster.size()),
+ _density(cluster.density()),
+ _func_indexes(create_growable_array<int>(cluster.get_funcs_count())) {
+ GrowableArray<int>* array = cluster.func_indexes();
+ _func_indexes->appendAll(array);
+ }
+
+void JBoltCluster::add_heat(int64_t heat) { _heats += heat; }
+
+void JBoltCluster::freeze() { _frozen = true; }
+
+void JBoltCluster::add_size(int size) { _size += size; }
+
+void JBoltCluster::update_density() { _density = (double)_heats / (double)_size; }
+
+void JBoltCluster::append_func_index(int index) { _func_indexes->append(index); }
+
+void JBoltCluster::clear() {
+ _id = -1;
+ _heats = 0;
+ _frozen = false;
+ _size = 0;
+ _density = 0.0;
+ _func_indexes->clear();
+}
+
+// Searching for a cluster by its id
+JBoltCluster* JBoltCluster::find_cluster_by_id(GrowableArray<JBoltCluster>* clusters, u4 id) {
+ if (id >= (u4)clusters->length()) return NULL;
+
+ return &(clusters->at(id));
+}
+
+JBoltCluster* JBoltCluster::constructor(const JBoltFunc* func) {
+ JBoltCluster *ret = new JBoltCluster(*func);
+ return ret;
+}
+
+JBoltCluster* JBoltCluster::copy_constructor(const JBoltCluster* cluster) {
+ JBoltCluster *ret = new JBoltCluster(*cluster);
+ return ret;
+}
+
+JBoltCall::JBoltCall() :
+ _caller_index(-1),
+ _callee_index(-1),
+ _call_count(0),
+ _stacktrace_id(0) {}
+
+JBoltCall::JBoltCall(const JBoltCall& call) :
+ _caller_index(call._caller_index),
+ _callee_index(call._callee_index),
+ _call_count(call._call_count),
+ _stacktrace_id(call._stacktrace_id) {}
+
+JBoltCall::JBoltCall(const JBoltFunc& caller_func, const JBoltFunc& callee_func, u4 call_count, traceid stacktrace_id) :
+ _call_count(call_count),
+ _stacktrace_id(stacktrace_id) {
+ _caller_index = find_func_index(&caller_func);
+ _callee_index = find_func_index(&callee_func);
+ assert(_caller_index != -1, "invariant");
+ assert(_callee_index != -1, "invariant");
+ }
+
+JBoltFunc& JBoltCall::caller() const { return _funcs->at(_caller_index); }
+
+JBoltFunc& JBoltCall::callee() const { return _funcs->at(_callee_index); }
+
+void JBoltCall::set_caller_index(int index) { _caller_index = index; }
+
+void JBoltCall::set_callee_index(int index) { _callee_index = index; }
+
+void JBoltCall::set_call_count(u4 call_count) { _call_count = call_count; }
+
+JBoltCall* JBoltCall::constructor(const JBoltFunc* caller_func, const JBoltFunc* callee_func, u4 call_count, traceid stacktrace_id) {
+ JBoltCall *ret = new JBoltCall(*caller_func, *callee_func, call_count, stacktrace_id);
+ return ret;
+}
+
+JBoltCall* JBoltCall::copy_constructor(const JBoltCall* call) {
+ JBoltCall *ret = new JBoltCall(*call);
+ return ret;
+}
\ No newline at end of file
diff --git a/src/hotspot/share/jbolt/jBoltCallGraph.hpp b/src/hotspot/share/jbolt/jBoltCallGraph.hpp
new file mode 100644
index 000000000..93115f3ce
--- /dev/null
+++ b/src/hotspot/share/jbolt/jBoltCallGraph.hpp
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_JBOLT_JBOLTCALLGRAPH_HPP
+#define SHARE_JBOLT_JBOLTCALLGRAPH_HPP
+
+#include "jbolt/jbolt_globals.hpp"
+#include "jbolt/jBoltManager.hpp"
+#include "jfr/utilities/jfrTypes.hpp"
+#include "utilities/growableArray.hpp"
+
+class JBoltFunc;
+class JBoltCall;
+class JBoltCluster;
+
+template<typename T>
+static GrowableArray<T>* create_growable_array(int size = 1) {
+ GrowableArray<T>* array = new (mtTracing) GrowableArray<T>(size, mtTracing);
+ assert(array != NULL, "invariant");
+ return array;
+}
+
+// initial cluster id
+static u4 _init_cluster_id = 0;
+
+class JBoltCallGraph : public CHeapObj<mtTracing> {
+ private:
+ GrowableArray<JBoltCluster>* _clusters = NULL;
+ GrowableArray<JBoltCall>* _calls = NULL;
+ GrowableArray<JBoltFunc>* _funcs = NULL;
+
+ JBoltCallGraph() {
+ _clusters = create_growable_array<JBoltCluster>();
+ _calls = create_growable_array<JBoltCall>();
+ _funcs = create_growable_array<JBoltFunc>();
+ }
+
+ JBoltCallGraph(const JBoltCallGraph &) = delete;
+ JBoltCallGraph(const JBoltCallGraph &&) = delete;
+
+ // for constructing CG
+ void add_func(JBoltFunc* func); // Node
+ void add_call(JBoltCall* call); // Edge
+
+ public:
+ static JBoltCallGraph& callgraph_instance();
+ // these two funcs initialize and deinitialize homonymous static array pointers in global
+ static void initialize();
+ static void deinitialize();
+
+ GrowableArray<JBoltCluster>* callgraph_clusters() { return _clusters; }
+ GrowableArray<JBoltCall>* callgraph_calls() { return _calls; }
+ GrowableArray<JBoltFunc>* callgraph_funcs() { return _funcs; }
+
+ static void static_add_func(JBoltFunc* func) { callgraph_instance().add_func(func); }
+ static void static_add_call(JBoltCall* call) { callgraph_instance().add_call(call); }
+
+ // for dealing with CG
+ GrowableArray<JBoltFunc>* hfsort();
+
+ int clear_instance();
+
+ virtual ~JBoltCallGraph() {
+ delete _clusters;
+ delete _calls;
+ delete _funcs;
+
+ _clusters = NULL;
+ _calls = NULL;
+ _funcs = NULL;
+ }
+};
+
+class JBoltFunc : public CHeapObj<mtTracing> {
+ private:
+ const InstanceKlass* _klass;
+ traceid _method_id;
+ int64_t _heat;
+ int _size;
+ int _cluster_id;
+ JBoltMethodKey _method_key;
+ GrowableArray<int>* _call_indexes;
+
+ public:
+ JBoltFunc();
+ JBoltFunc(const JBoltFunc& func);
+ JBoltFunc(const InstanceKlass* klass, traceid method_id, int size, JBoltMethodKey method_key);
+
+ virtual ~JBoltFunc() {
+ delete _call_indexes;
+ }
+
+ bool operator==(const JBoltFunc& func) const { return (_klass == func._klass && _method_id == func._method_id) || (_method_key.equals(func._method_key)); }
+ bool operator!=(const JBoltFunc& func) const { return (_klass != func._klass || _method_id != func._method_id) && !(_method_key.equals(func._method_key)); }
+
+ JBoltFunc& operator=(const JBoltFunc& func) {
+ _klass = func._klass;
+ _method_id = func._method_id;
+ _heat = func._heat;
+ _size = func._size;
+ _cluster_id = func._cluster_id;
+ _method_key = func._method_key;
+ if (_call_indexes != nullptr) {
+ delete _call_indexes;
+ }
+ _call_indexes = create_growable_array<int>(func.get_calls_count());
+ _call_indexes->appendAll(func.call_indexes());
+
+ return *this;
+ }
+
+ const InstanceKlass* klass() const { return _klass; }
+ const traceid method_id() const { return _method_id; }
+ const int64_t heat() const { return _heat; }
+ const int size() const { return _size; }
+ const int cluster_id() const { return _cluster_id; }
+ JBoltMethodKey method_key() const { return _method_key; }
+ GrowableArray<int>* call_indexes() const { return _call_indexes; }
+ int get_calls_count() const { return _call_indexes->length(); }
+
+ void add_heat(int64_t heat);
+ void set_heat(int64_t heat);
+ void set_cluster_id(int cluster_id);
+ void append_call_index(int index);
+
+ static ByteSize klass_offset() { return byte_offset_of(JBoltFunc, _klass); }
+ static ByteSize method_id_offset() { return byte_offset_of(JBoltFunc, _method_id); }
+ static ByteSize heat_offset() { return byte_offset_of(JBoltFunc, _heat); }
+ static ByteSize size_offset() { return byte_offset_of(JBoltFunc, _size); }
+ static ByteSize cluster_id_offset() { return byte_offset_of(JBoltFunc, _cluster_id); }
+ static ByteSize call_indexes_offset() { return byte_offset_of(JBoltFunc, _call_indexes); }
+
+ static JBoltFunc* constructor(const InstanceKlass* klass, traceid method_id, int size, JBoltMethodKey method_key);
+ static JBoltFunc* copy_constructor(const JBoltFunc* func);
+};
+
+class JBoltCluster : public CHeapObj<mtTracing> {
+ private:
+ int _id;
+ int64_t _heats;
+ bool _frozen;
+ int _size;
+ double _density;
+ GrowableArray<int>* _func_indexes;
+
+ public:
+ JBoltCluster();
+ JBoltCluster(const JBoltFunc& func);
+ JBoltCluster(const JBoltCluster& cluster);
+
+ bool operator==(const JBoltCluster& cluster) const {
+ if (_id != cluster.id()) return false;
+
+ int count = get_funcs_count();
+ if (count != cluster.get_funcs_count())
+ return false;
+
+ for (int i = 0; i < count; ++i) {
+ if (_func_indexes->at(i) != cluster._func_indexes->at(i)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ JBoltCluster& operator=(const JBoltCluster& cluster) {
+ _id = cluster.id();
+ _heats = cluster.heats();
+ _frozen = cluster.frozen();
+ _size = cluster.size();
+ _density = cluster.density();
+ if (_func_indexes != nullptr) {
+ delete _func_indexes;
+ }
+ _func_indexes = create_growable_array<int>(cluster.get_funcs_count());
+ _func_indexes->appendAll(cluster.func_indexes());
+ return *this;
+ }
+
+ virtual ~JBoltCluster() { delete _func_indexes; }
+
+ int id() const { return _id; }
+ int64_t heats() const { return _heats; }
+ bool frozen() const { return _frozen; }
+ int size() const { return _size; }
+ double density() const { return _density; }
+ GrowableArray<int>* func_indexes() const { return _func_indexes; }
+ int get_funcs_count() const { return _func_indexes->length(); }
+
+ void add_heat(int64_t heat);
+ void freeze();
+ void add_size(int size);
+ void update_density();
+ void append_func_index(int index);
+ void clear();
+
+ static JBoltCluster* find_cluster_by_id(GrowableArray<JBoltCluster>* clusters, u4 id);
+
+ static ByteSize id_offset() { return byte_offset_of(JBoltCluster, _id); }
+ static ByteSize heats_offset() { return byte_offset_of(JBoltCluster, _heats); }
+ static ByteSize frozen_offset() { return byte_offset_of(JBoltCluster, _frozen); }
+ static ByteSize size_offset() { return byte_offset_of(JBoltCluster, _size); }
+ static ByteSize density_offset() { return byte_offset_of(JBoltCluster, _density); }
+ static ByteSize func_indexes_offset() { return byte_offset_of(JBoltCluster, _func_indexes); }
+
+ static JBoltCluster* constructor(const JBoltFunc* func);
+ static JBoltCluster* copy_constructor(const JBoltCluster* cluster);
+};
+
+class JBoltCall : public CHeapObj<mtTracing> {
+ private:
+ int _caller_index;
+ int _callee_index;
+ u4 _call_count;
+ traceid _stacktrace_id;
+
+ public:
+ JBoltCall();
+ JBoltCall(const JBoltCall& call);
+ JBoltCall(const JBoltFunc& caller_func, const JBoltFunc& callee_func, u4 call_count, traceid stacktrace_id);
+
+ bool operator==(const JBoltCall& call) const {
+ return _caller_index == call._caller_index && _callee_index == call._callee_index;
+ }
+
+ JBoltCall& operator=(const JBoltCall& call) {
+ _caller_index = call._caller_index;
+ _callee_index = call._callee_index;
+ _call_count = call._call_count;
+ _stacktrace_id = call._stacktrace_id;
+ return *this;
+ }
+
+ virtual ~JBoltCall() {}
+
+ int caller_index() const { return _caller_index; }
+ int callee_index() const { return _callee_index; }
+ u4 call_count() const { return _call_count; }
+ traceid stacktrace_id() const { return _stacktrace_id; }
+
+ JBoltFunc& caller() const;
+ JBoltFunc& callee() const;
+ void set_caller_index(int index);
+ void set_callee_index(int index);
+ void set_call_count(u4 count);
+
+ static ByteSize caller_offset() { return byte_offset_of(JBoltCall, _caller_index); }
+ static ByteSize callee_offset() { return byte_offset_of(JBoltCall, _caller_index); }
+ static ByteSize call_count_offset() { return byte_offset_of(JBoltCall, _call_count); }
+ static ByteSize stacktrace_id_offset() { return byte_offset_of(JBoltCall, _stacktrace_id); }
+
+ static JBoltCall* constructor(const JBoltFunc* caller_func, const JBoltFunc* callee_func, u4 call_count, traceid stacktrace_id);
+ static JBoltCall* copy_constructor(const JBoltCall* call);
+};
+
+#endif // SHARE_JBOLT_JBOLTCALLGRAPH_HPP
diff --git a/src/hotspot/share/jbolt/jBoltControlThread.cpp b/src/hotspot/share/jbolt/jBoltControlThread.cpp
new file mode 100644
index 000000000..dc42ca77b
--- /dev/null
+++ b/src/hotspot/share/jbolt/jBoltControlThread.cpp
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "classfile/javaClasses.inline.hpp"
+#include "classfile/vmClasses.hpp"
+#include "classfile/vmSymbols.hpp"
+#include "jbolt/jBoltControlThread.hpp"
+#include "jbolt/jBoltManager.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/javaCalls.hpp"
+#include "runtime/jniHandles.inline.hpp"
+#include "runtime/thread.inline.hpp"
+
+JavaThread* volatile JBoltControlThread::_the_java_thread = nullptr;
+Monitor* JBoltControlThread::_control_wait_monitor = nullptr;
+Monitor* JBoltControlThread::_sample_wait_monitor = nullptr;
+jobject JBoltControlThread::_thread_obj = nullptr;
+int volatile JBoltControlThread::_signal = JBoltControlThread::SIG_NULL;
+bool volatile JBoltControlThread::_abort = false;
+intx volatile JBoltControlThread::_interval = 0;
+
+static bool not_first = false;
+
+void JBoltControlThread::init(TRAPS) {
+ Handle string = java_lang_String::create_from_str("JBolt Control", CATCH);
+ Handle thread_group(THREAD, Universe::system_thread_group());
+ Handle thread_oop = JavaCalls::construct_new_instance(
+ vmClasses::Thread_klass(),
+ vmSymbols::threadgroup_string_void_signature(),
+ thread_group,
+ string,
+ CATCH);
+ _thread_obj = JNIHandles::make_global(thread_oop);
+ _control_wait_monitor = new Monitor(Mutex::safepoint - 1, "JBoltControlMonitor");
+ _sample_wait_monitor = new Monitor(Mutex::safepoint - 1, "JBoltSampleMonitor");
+ Atomic::release_store(&_interval, JBoltSampleInterval);
+}
+
+void JBoltControlThread::start_thread(TRAPS) {
+ guarantee(Atomic::load_acquire(&_the_java_thread) == nullptr, "sanity");
+ JavaThread* new_thread = new JavaThread(&thread_entry);
+ if (new_thread->osthread() == nullptr) {
+ fatal("Failed to create JBoltControlThread as no os thread!");
+ return;
+ }
+
+ Handle thread_oop(THREAD, JNIHandles::resolve_non_null(_thread_obj));
+ JavaThread::start_internal_daemon(THREAD, new_thread, thread_oop, MinPriority);
+ guarantee(Atomic::cmpxchg(&_the_java_thread, (JavaThread*) nullptr, new_thread) == nullptr, "sanity");
+}
+
+intx JBoltControlThread::sample_interval() {
+ return Atomic::load_acquire(&_interval);
+}
+
+// Work to do before restarting a control schedule, twice and after only
+bool JBoltControlThread::prev_control_schdule(TRAPS) {
+ guarantee(JBoltManager::auto_mode(), "sanity");
+ // Clear obsolete data structures
+ if (JBoltManager::clear_last_sample_datas() != 0) {
+ log_error(jbolt)("Something wrong happened in data clean, not going on...");
+ return false;
+ }
+
+ // Restart JFR
+ bufferedStream output;
+ DCmd::parse_and_execute(DCmd_Source_Internal, &output, "JFR.start name=jbolt-jfr", ' ', THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ ResourceMark rm;
+ log_warning(jbolt)("unable to start jfr jbolt-jfr");
+ log_warning(jbolt)("exception type: %s", PENDING_EXCEPTION->klass()->external_name());
+ // don't unwind this exception
+ CLEAR_PENDING_EXCEPTION;
+ }
+
+ return true;
+}
+
+void JBoltControlThread::control_schdule(TRAPS) {
+ guarantee(JBoltManager::auto_mode(), "sanity");
+ { MonitorLocker locker(_sample_wait_monitor);
+ // Perform time wait
+ log_info(jbolt)("JBolt Starting Sample for %lds!!!", sample_interval());
+ const jlong interval = (jlong) sample_interval();
+ jlong cur_time = os::javaTimeMillis();
+ const jlong end_time = cur_time + (interval * 1000);
+ while ((end_time > cur_time) && Atomic::load_acquire(&_signal) != SIG_STOP_PROFILING) {
+ int64_t timeout = (int64_t) (end_time - cur_time);
+ locker.wait(timeout);
+ cur_time = os::javaTimeMillis();
+ }
+ }
+ // Close JFR
+ guarantee(JBoltManager::reorder_phase_profiling_to_waiting(), "sanity");
+ bufferedStream output;
+ DCmd::parse_and_execute(DCmd_Source_Internal, &output, "JFR.stop name=jbolt-jfr", ' ', THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ ResourceMark rm;
+ // JFR.stop maybe failed if a jfr recording is already stopped
+ // but it's nothing worry, jbolt should continue to work normally
+ log_warning(jbolt)("unable to stop jfr jbolt-jfr");
+ log_warning(jbolt)("exception type: %s", PENDING_EXCEPTION->klass()->external_name());
+ // don't unwind this exception
+ CLEAR_PENDING_EXCEPTION;
+ }
+ if (Atomic::cmpxchg(&_abort, true, false) == /* should abort */ true) {
+ return;
+ }
+
+ size_t total_nmethod_size = 0;
+ // Init structures for load phase
+ JBoltManager::init_auto_transition(&total_nmethod_size, CATCH);
+
+ if (total_nmethod_size > JBoltCodeHeapSize) {
+ log_warning(jbolt)("JBolt reordering not complete because JBolt CodeHeap is too small to place all ordered methods. Please use -XX:JBoltCodeHeapSize to enlarge");
+ log_warning(jbolt)("JBoltCodeHeapSize=" UINTX_FORMAT " B ( need " UINTX_FORMAT " B).", JBoltCodeHeapSize, total_nmethod_size);
+ }
+
+ if (not_first) {
+ // Exchange Hot Segment primary and secondary relationships
+ JBoltManager::swap_semi_jbolt_segs();
+ }
+
+ guarantee(JBoltManager::reorder_phase_waiting_to_reordering(), "sanity");
+ Atomic::release_store(&_signal, SIG_NULL);
+
+ // Start reorder
+ JBoltManager::reorder_all_methods(CATCH);
+}
+
+// Work to do after reordering, twice and after only
+void JBoltControlThread::post_control_schdule(TRAPS) {
+ JBoltManager::clear_secondary_hot_seg(THREAD);
+}
+
+void JBoltControlThread::thread_run(TRAPS) {
+ if (JBoltManager::auto_mode()) {
+ do {
+ Atomic::release_store(&_signal, SIG_NULL);
+ if (not_first && !prev_control_schdule(THREAD)) continue;
+ guarantee(JBoltManager::reorder_phase_available_to_profiling(), "sanity");
+ control_schdule(THREAD);
+ if (!JBoltManager::reorder_phase_reordering_to_available()) {
+ // abort logic
+ guarantee(JBoltManager::reorder_phase_waiting_to_available(), "sanity");
+ guarantee(Atomic::cmpxchg(&_signal, SIG_STOP_PROFILING, SIG_NULL) == SIG_STOP_PROFILING, "sanity");
+ }
+ else if (not_first) {
+ post_control_schdule(THREAD);
+ }
+ not_first = true;
+ MonitorLocker locker(_control_wait_monitor);
+ while (Atomic::load_acquire(&_signal) != SIG_START_PROFILING) {
+ locker.wait(60 * 1000);
+ }
+ JBoltManager::clear_structures();
+ } while(true);
+ } else {
+ guarantee(JBoltManager::can_reorder_now(), "sanity");
+ guarantee(JBoltManager::reorder_phase_collecting_to_reordering(), "sanity");
+ JBoltManager::reorder_all_methods(CATCH);
+ JBoltManager::clear_structures();
+ guarantee(JBoltManager::reorder_phase_reordering_to_end(), "sanity");
+ assert(JBoltLoadMode, "Only manual JBoltLoadMode can reach here");
+ }
+}
+
+bool JBoltControlThread::notify_sample_wait(bool abort) {
+ int old_sig = Atomic::cmpxchg(&_signal, SIG_NULL, SIG_STOP_PROFILING);
+ if (old_sig == SIG_NULL) {
+ MonitorLocker locker(_sample_wait_monitor);
+ // abort implementation maybe not in order in extreme cases
+ // add fence? or delete abort() if not so useful.
+ Atomic::release_store(&_abort, abort);
+ locker.notify();
+ return true;
+ }
+ return false;
+}
+
+bool JBoltControlThread::notify_control_wait(intx interval) {
+ int old_sig = Atomic::cmpxchg(&_signal, SIG_NULL, SIG_START_PROFILING);
+ if (old_sig == SIG_NULL) {
+ // this lock will be grabbed by ControlThread until it's waiting
+ MonitorLocker locker(_control_wait_monitor);
+ Atomic::release_store(&_interval, interval);
+ locker.notify();
+ return true;
+ }
+ return false;
+}
+
+JavaThread* JBoltControlThread::get_thread() {
+ return Atomic::load_acquire(&_the_java_thread);
+}
diff --git a/src/hotspot/share/jbolt/jBoltControlThread.hpp b/src/hotspot/share/jbolt/jBoltControlThread.hpp
new file mode 100644
index 000000000..e63dd1ea9
--- /dev/null
+++ b/src/hotspot/share/jbolt/jBoltControlThread.hpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_JBOLT_JBOLTCONTROLTHREAD_HPP
+#define SHARE_JBOLT_JBOLTCONTROLTHREAD_HPP
+
+#include "runtime/thread.hpp"
+
+/**
+ * Control JBolt how to run in this thread.
+ */
+class JBoltControlThread : public AllStatic {
+public:
+ static const int SIG_NULL = 0;
+ static const int SIG_START_PROFILING = 1;
+ static const int SIG_STOP_PROFILING = 2;
+
+private:
+ static JavaThread* volatile _the_java_thread;
+ // Can be notified by jcmd JBolt.start, restart a control schedule
+ static Monitor* _control_wait_monitor;
+ // Can be notified by jcmd JBolt.stop/abort, stop a running JFR
+ static Monitor* _sample_wait_monitor;
+ static jobject _thread_obj;
+ static int volatile _signal;
+ static bool volatile _abort;
+ static intx volatile _interval;
+
+ static void thread_entry(JavaThread* thread, TRAPS) { thread_run(thread); }
+ static void thread_run(TRAPS);
+
+ static intx sample_interval();
+ static bool prev_control_schdule(TRAPS);
+ static void control_schdule(TRAPS);
+ static void post_control_schdule(TRAPS);
+
+public:
+ static void init(TRAPS);
+
+ static void start_thread(TRAPS);
+
+ static bool notify_sample_wait(bool abort = false);
+
+ static bool notify_control_wait(intx interval);
+
+ static JavaThread* get_thread();
+};
+
+#endif // SHARE_JBOLT_JBOLTCONTROLTHREAD_HPP
diff --git a/src/hotspot/share/jbolt/jBoltDcmds.cpp b/src/hotspot/share/jbolt/jBoltDcmds.cpp
new file mode 100644
index 000000000..0cf1c75b4
--- /dev/null
+++ b/src/hotspot/share/jbolt/jBoltDcmds.cpp
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "jbolt/jBoltDcmds.hpp"
+#include "jbolt/jBoltControlThread.hpp"
+#include "jbolt/jBoltManager.hpp"
+
+bool register_jbolt_dcmds() {
+ uint32_t full_export = DCmd_Source_Internal | DCmd_Source_AttachAPI | DCmd_Source_MBean;
+ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JBoltStartDCmd>(full_export, true, false));
+ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JBoltStopDCmd>(full_export, true, false));
+ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JBoltAbortDCmd>(full_export, true, false));
+ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JBoltDumpDCmd>(full_export, true, false));
+ return true;
+}
+
+JBoltStartDCmd::JBoltStartDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap),
+ _duration("duration", "Duration of time(second) in this sample.", "INT", false, "600") {
+ _dcmdparser.add_dcmd_option(&_duration);
+}
+
+int JBoltStartDCmd::num_arguments() {
+ ResourceMark rm;
+ JBoltStartDCmd* dcmd = new JBoltStartDCmd(NULL, false);
+ if (dcmd != NULL) {
+ DCmdMark mark(dcmd);
+ return dcmd->_dcmdparser.num_arguments();
+ } else {
+ return 0;
+ }
+}
+
+void JBoltStartDCmd::execute(DCmdSource source, TRAPS) {
+ if (!UseJBolt) {
+ output()->print_cr("Unable to execute because \"UseJBolt\" is disabled.");
+ return;
+ }
+
+ if (!JBoltManager::auto_mode()) {
+ output()->print_cr("JBolt JCMD can only be used in auto mode.");
+ return;
+ }
+
+ if (!JBoltManager::reorder_phase_is_available()) {
+ output()->print_cr("Unable to start because it's working now. Stop it first.");
+ return;
+ }
+
+ intx interval = _duration.is_set() ? _duration.value() : JBoltSampleInterval;
+
+ if (interval < 0) {
+ output()->print_cr("duration is set to %ld which is above range, should be in [0, %d]", interval, max_jint);
+ return;
+ }
+
+ if (JBoltControlThread::notify_control_wait(interval)) {
+ output()->print_cr("OK. Start a new JBolt schedule, duration=%lds.", interval);
+ }
+ else {
+ output()->print_cr("It's busy now. Please try again later...");
+ }
+}
+
+void JBoltStartDCmd::print_help(const char* name) const {
+ output()->print_cr(
+ "Syntax : %s [options]\n"
+ "\n"
+ "Options:\n"
+ "\n"
+ " duration (Optional) Duration of time(second) in this sample. (INT, default value=600)\n"
+ "\n"
+ "Options must be specified using the <key> or <key>=<value> syntax.\n"
+ "\n"
+ "Example usage:\n"
+ " $ jcmd <pid> JBolt.start\n"
+ " $ jcmd <pid> JBolt.start duration=900", name);
+}
+
+void JBoltStopDCmd::execute(DCmdSource source, TRAPS) {
+ if (!UseJBolt) {
+ output()->print_cr("Unable to execute because \"UseJBolt\" is disabled.");
+ return;
+ }
+
+ if (!JBoltManager::auto_mode()) {
+ output()->print_cr("JBolt JCMD can only be used in auto mode.");
+ return;
+ }
+
+ if (!JBoltManager::reorder_phase_is_profiling()) {
+ output()->print_cr("Unable to stop because it's not sampling now.");
+ return;
+ }
+
+ if (JBoltControlThread::notify_sample_wait()) {
+ output()->print_cr("OK.\"jbolt-jfr\" would be stopped and turn to reorder.");
+ } else {
+ output()->print_cr("It's busy now. Please try again later...");
+ }
+}
+
+void JBoltStopDCmd::print_help(const char* name) const {
+ output()->print_cr(
+ "Syntax : %s\n"
+ "\n"
+ "Example usage:\n"
+ " $ jcmd <pid> JBolt.stop", name);
+}
+
+void JBoltAbortDCmd::execute(DCmdSource source, TRAPS) {
+ if (!UseJBolt) {
+ output()->print_cr("Unable to execute because \"UseJBolt\" is disabled.");
+ return;
+ }
+
+ if (!JBoltManager::auto_mode()) {
+ output()->print_cr("JBolt JCMD can only be used in auto mode.");
+ return;
+ }
+
+ if (!JBoltManager::reorder_phase_is_profiling()) {
+ output()->print_cr("Unable to abort because it's not sampling now.");
+ return;
+ }
+
+ if (JBoltControlThread::notify_sample_wait(true)) {
+ output()->print_cr("OK.\"jbolt-jfr\" would be aborted.");
+ } else {
+ output()->print_cr("It's busy now. Please try again later...");
+ }
+}
+
+void JBoltAbortDCmd::print_help(const char* name) const {
+ output()->print_cr(
+ "Syntax : %s\n"
+ "\n"
+ "Example usage:\n"
+ " $ jcmd <pid> JBolt.abort", name);
+}
+
+JBoltDumpDCmd::JBoltDumpDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap),
+ _filename("filename", "Name of the file to which the flight recording data is dumped", "STRING", true, NULL) {
+ _dcmdparser.add_dcmd_option(&_filename);
+}
+
+int JBoltDumpDCmd::num_arguments() {
+ ResourceMark rm;
+ JBoltDumpDCmd* dcmd = new JBoltDumpDCmd(NULL, false);
+ if (dcmd != NULL) {
+ DCmdMark mark(dcmd);
+ return dcmd->_dcmdparser.num_arguments();
+ } else {
+ return 0;
+ }
+}
+
+void JBoltDumpDCmd::execute(DCmdSource source, TRAPS) {
+ if (!UseJBolt) {
+ output()->print_cr("Unable to execute because \"UseJBolt\" is disabled.");
+ return;
+ }
+
+ if (!JBoltManager::auto_mode()) {
+ output()->print_cr("JBolt JCMD can only be used in auto mode.");
+ return;
+ }
+
+ const char* path = _filename.value();
+ char buffer[PATH_MAX];
+ char* rp = NULL;
+
+ JBoltErrorCode ec = JBoltManager::dump_order_in_jcmd(path);
+ switch (ec) {
+ case JBoltOrderNULL:
+ output()->print_cr("Failed: No order applied by JBolt now.");
+ break;
+ case JBoltOpenFileError:
+ output()->print_cr("Failed: File open error or NULL: %s", path);
+ break;
+ case JBoltOK:
+ rp = realpath(path, buffer);
+ output()->print_cr("Successful: Dump to %s", buffer);
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+}
+
+void JBoltDumpDCmd::print_help(const char* name) const {
+ output()->print_cr(
+ "Syntax : %s [options]\n"
+ "\n"
+ "Options:\n"
+ "\n"
+ " filename Name of the file to which the flight recording data is dumped. (STRING, no default value)\n"
+ "\n"
+ "Options must be specified using the <key> or <key>=<value> syntax.\n"
+ "\n"
+ "Example usage:\n"
+ " $ jcmd <pid> JBolt.dump filename=order.log", name);
+}
\ No newline at end of file
diff --git a/src/hotspot/share/jbolt/jBoltDcmds.hpp b/src/hotspot/share/jbolt/jBoltDcmds.hpp
new file mode 100644
index 000000000..f73fc01e6
--- /dev/null
+++ b/src/hotspot/share/jbolt/jBoltDcmds.hpp
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_JBOLT_JBOLTDCMDS_HPP
+#define SHARE_JBOLT_JBOLTDCMDS_HPP
+
+#include "services/diagnosticCommand.hpp"
+
+class JBoltStartDCmd : public DCmdWithParser {
+ protected:
+ DCmdArgument<jlong> _duration;
+ public:
+ JBoltStartDCmd(outputStream* output, bool heap);
+
+ static const char* name() {
+ return "JBolt.start";
+ }
+ static const char* description() {
+ return "Starts a new JBolt sample schedule(fail if sampling)";
+ }
+ static const char* impact() {
+ return "Medium: Depending on JFR that JBolt rely on, the impact can range from low to high.";
+ }
+ static const JavaPermission permission() {
+ JavaPermission p = {"java.lang.management.ManagementPermission", "control", NULL};
+ return p;
+ }
+ static int num_arguments();
+ virtual void execute(DCmdSource source, TRAPS);
+ virtual void print_help(const char* name) const;
+};
+
+class JBoltStopDCmd : public DCmd {
+ public:
+ JBoltStopDCmd(outputStream* output, bool heap) : DCmd(output, heap) {}
+
+ static const char* name() {
+ return "JBolt.stop";
+ }
+ static const char* description() {
+ return "Stop a running JBolt sample schedule and reorder immediately(fail if not sampling)";
+ }
+ static const char* impact() {
+ return "Low";
+ }
+ static const JavaPermission permission() {
+ JavaPermission p = {"java.lang.management.ManagementPermission", "control", NULL};
+ return p;
+ }
+ static int num_arguments() {
+ return 0;
+ }
+
+ virtual void execute(DCmdSource source, TRAPS);
+ virtual void print_help(const char* name) const;
+};
+
+class JBoltAbortDCmd : public DCmd {
+ public:
+ JBoltAbortDCmd(outputStream* output, bool heap) : DCmd(output, heap) {}
+
+ static const char* name() {
+ return "JBolt.abort";
+ }
+ static const char* description() {
+ return "Stop a running JBolt sample schedule but don't reorder(fail if not sampling)";
+ }
+ static const char* impact() {
+ return "Low";
+ }
+ static const JavaPermission permission() {
+ JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL};
+ return p;
+ }
+ static int num_arguments() {
+ return 0;
+ }
+
+ virtual void execute(DCmdSource source, TRAPS);
+ virtual void print_help(const char* name) const;
+};
+
+class JBoltDumpDCmd : public DCmdWithParser {
+ protected:
+ DCmdArgument<char*> _filename;
+ public:
+ JBoltDumpDCmd(outputStream* output, bool heap);
+
+ static const char* name() {
+ return "JBolt.dump";
+ }
+ static const char* description() {
+ return "dump an effective order to file(fail if no order)";
+ }
+ static const char* impact() {
+ return "Low";
+ }
+ static const JavaPermission permission() {
+ JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL};
+ return p;
+ }
+ static int num_arguments();
+ virtual void execute(DCmdSource source, TRAPS);
+ virtual void print_help(const char* name) const;
+};
+
+bool register_jbolt_dcmds();
+
+#endif // SHARE_JBOLT_JBOLTDCMDS_HPP
\ No newline at end of file
diff --git a/src/hotspot/share/jbolt/jBoltManager.cpp b/src/hotspot/share/jbolt/jBoltManager.cpp
new file mode 100644
index 000000000..4cb6f4d1a
--- /dev/null
+++ b/src/hotspot/share/jbolt/jBoltManager.cpp
@@ -0,0 +1,1429 @@
+/*
+ * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "classfile/javaClasses.inline.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/vmSymbols.hpp"
+#include "code/codeBlob.hpp"
+#include "code/codeCache.hpp"
+#include "compiler/compileBroker.hpp"
+#include "compiler/compilerDefinitions.inline.hpp"
+#include "jbolt/jBoltCallGraph.hpp"
+#include "jbolt/jBoltControlThread.hpp"
+#include "jbolt/jBoltManager.hpp"
+#include "jbolt/jBoltUtils.inline.hpp"
+#include "jfr/jfr.hpp"
+#include "jfr/support/jfrMethodLookup.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
+#include "memory/resourceArea.hpp"
+#include "memory/universe.hpp"
+#include "oops/klass.inline.hpp"
+#include "oops/method.inline.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/jniHandles.hpp"
+#include "runtime/os.hpp"
+#include "runtime/safepointVerifiers.hpp"
+#include "utilities/formatBuffer.hpp"
+#ifdef LINUX
+#include "os_linux.hpp"
+#endif
+
+static constexpr int LINE_BUF_SIZE = 8192; // used to parse JBolt order file
+static constexpr int MIN_FRAMESCOUNT = 2; // used as default stacktrace depth
+static constexpr int ILL_NM_STATE = -2; // used to present nmethod illegal state
+
+#define B_TF(b) (b ? "V" : "X")
+
+GrowableArray<JBoltMethodKey>* JBoltManager::_hot_methods_sorted = nullptr;
+JBoltManager::MethodKeyMap* JBoltManager::_hot_methods_vis = nullptr;
+int JBoltManager::_reorder_method_threshold_cnt = 0;
+
+volatile int JBoltManager::_reorder_phase = JBoltReorderPhase::Available;
+volatile int JBoltManager::_reorderable_method_cnt = 0;
+Method* volatile JBoltManager::_cur_reordering_method = nullptr;
+
+Thread* JBoltManager::_start_reordering_thread = nullptr;
+
+JBoltManager::StackFrameKeyMap* JBoltManager::_sampled_methods_refs = nullptr;
+
+bool JBoltManager::_auto_mode = false;
+
+// swap between MethodJBoltHot and MethodJBoltTmp
+volatile int JBoltManager::_primary_hot_seg = int(CodeBlobType::MethodJBoltHot);
+volatile int JBoltManager::_secondary_hot_seg = int(CodeBlobType::MethodJBoltTmp);
+
+volatile int JBoltManager::_gc_should_sweep_code_heaps_now = 0;
+
+GrowableArray<JBoltFunc>* _order_stored = nullptr;
+
+// This is a tmp obj used only in initialization phases.
+// We cannot alloc Symbol in phase 1 so we have to parses the order file again
+// in phase 2.
+// This obj will be freed after initialization.
+static FILE* _order_fp = nullptr;
+
+// The threshold to trigger JBolt reorder in load mode.
+static const double _jbolt_reorder_threshold = 0.8;
+
+static bool read_line(FILE* fp, char* buf, int buf_len, int* res_len) {
+ if (fgets(buf, buf_len, fp) == nullptr) {
+ return false;
+ }
+ int len = (int) strcspn(buf, "\r\n");
+ buf[len] = '\0';
+ *res_len = len;
+ return true;
+}
+
+static bool read_a_size(char* buf, size_t* res) {
+ char* t = strchr(buf, ' ');
+ if (t == nullptr) return false;
+ *t = '\0';
+ julong v;
+ if (!Arguments::atojulong(buf, &v)) {
+ *t = ' ';
+ return false;
+ }
+ *t = ' ';
+ *res = (size_t) v;
+ return true;
+}
+
+static void replace_all(char* s, char from, char to) {
+ char* begin = s;
+ while (true) {
+ char* t = strchr(begin, from);
+ if (t == nullptr) {
+ break;
+ }
+ *t = to;
+ begin = t + 1;
+ }
+}
+
+JBoltMethodValue::~JBoltMethodValue() {
+ if (_comp_info != nullptr) delete get_comp_info();
+}
+
+CompileTaskInfo* JBoltMethodValue::get_comp_info() {
+ return Atomic::load_acquire(&_comp_info);
+}
+
+bool JBoltMethodValue::set_comp_info(CompileTaskInfo* info) {
+ return Atomic::cmpxchg(&_comp_info, (CompileTaskInfo*) nullptr, info) == nullptr;
+}
+
+void JBoltMethodValue::clear_comp_info_but_not_release() {
+ Atomic::release_store(&_comp_info, (CompileTaskInfo*) nullptr);
+}
+
+JBoltStackFrameValue::~JBoltStackFrameValue() {
+ if (_method_holder != nullptr) {
+ if (JNIHandles::is_weak_global_handle(_method_holder)) {
+ JNIHandles::destroy_weak_global(_method_holder);
+ } else {
+ JNIHandles::destroy_global(_method_holder);
+ }
+ }
+}
+
+jobject JBoltStackFrameValue::get_method_holder() { return _method_holder; }
+
+void JBoltStackFrameValue::clear_method_holder_but_not_release() { _method_holder = nullptr; }
+
+CompileTaskInfo::CompileTaskInfo(Method* method, int osr_bci, int comp_level, int comp_reason, Method* hot_method, int hot_cnt):
+ _method(method), _osr_bci(osr_bci), _comp_level(comp_level), _comp_reason(comp_reason), _hot_method(hot_method), _hot_count(hot_cnt) {
+ Thread* thread = Thread::current();
+
+ assert(_method != nullptr, "sanity");
+ // _method_holder can be null for boot loader (the null loader)
+ _method_holder = JNIHandles::make_weak_global(Handle(thread, _method->method_holder()->klass_holder()));
+
+ if (_hot_method != nullptr && _hot_method != _method) {
+ _hot_method_holder = JNIHandles::make_weak_global(Handle(thread, _hot_method->method_holder()->klass_holder()));
+ } else {
+ _hot_method_holder = nullptr;
+ }
+}
+
+CompileTaskInfo::~CompileTaskInfo() {
+ if (_method_holder != nullptr) {
+ if (JNIHandles::is_weak_global_handle(_method_holder)) {
+ JNIHandles::destroy_weak_global(_method_holder);
+ } else {
+ JNIHandles::destroy_global(_method_holder);
+ }
+ }
+ if (_hot_method_holder != nullptr) {
+ if (JNIHandles::is_weak_global_handle(_hot_method_holder)) {
+ JNIHandles::destroy_weak_global(_hot_method_holder);
+ } else {
+ JNIHandles::destroy_global(_hot_method_holder);
+ }
+ }
+}
+
+/**
+ * Set the weak reference to strong reference if the method is not unloaded.
+ * It seems that the life cycle of Method is consistent with that of the Klass and CLD.
+ * @see CompileTask::select_for_compilation()
+ */
+bool CompileTaskInfo::try_select() {
+ NoSafepointVerifier nsv;
+ Thread* thread = Thread::current();
+ // is unloaded
+ if (_method_holder != nullptr && JNIHandles::is_weak_global_handle(_method_holder) && JNIHandles::is_weak_global_cleared(_method_holder)) {
+ if (log_is_enabled(Debug, jbolt)) {
+ log_debug(jbolt)("Some method has been unloaded so skip reordering for it: p=%p.", _method);
+ }
+ return false;
+ }
+
+ assert(_method->method_holder()->is_loader_alive(), "should be alive");
+ Handle method_holder(thread, _method->method_holder()->klass_holder());
+ JNIHandles::destroy_weak_global(_method_holder);
+ _method_holder = JNIHandles::make_global(method_holder);
+
+ if (_hot_method_holder != nullptr) {
+ Handle hot_method_holder(thread, _hot_method->method_holder()->klass_holder());
+ JNIHandles::destroy_weak_global(_hot_method_holder);
+ _hot_method_holder = JNIHandles::make_global(Handle(thread, _hot_method->method_holder()->klass_holder()));
+ }
+ return true;
+}
+
+static void check_arg_not_set(JVMFlagsEnum flag) {
+ if (JVMFlag::is_cmdline(flag)) {
+ vm_exit_during_initialization(err_msg("Do not set VM option %s without UseJBolt enabled.",
+ JVMFlag::flag_from_enum(flag)->name()));
+ }
+}
+
+static const char *method_type_to_string(u1 type) {
+ switch (type) {
+ case JfrStackFrame::FRAME_INTERPRETER:
+ return "Interpreted";
+ case JfrStackFrame::FRAME_JIT:
+ return "JIT compiled";
+ case JfrStackFrame::FRAME_INLINE:
+ return "Inlined";
+ case JfrStackFrame::FRAME_NATIVE:
+ return "Native";
+ default:
+ ShouldNotReachHere();
+ return "Unknown";
+ }
+}
+
+uintptr_t related_data_jbolt[] = {
+ (uintptr_t)in_bytes(JfrStackTrace::hash_offset()),
+ (uintptr_t)in_bytes(JfrStackTrace::id_offset()),
+ (uintptr_t)in_bytes(JfrStackTrace::hotcount_offset()),
+ (uintptr_t)in_bytes(JfrStackTrace::frames_offset()),
+ (uintptr_t)in_bytes(JfrStackTrace::frames_count_offset()),
+
+ (uintptr_t)in_bytes(JfrStackFrame::klass_offset()),
+ (uintptr_t)in_bytes(JfrStackFrame::methodid_offset()),
+ (uintptr_t)in_bytes(JfrStackFrame::bci_offset()),
+ (uintptr_t)in_bytes(JfrStackFrame::type_offset()),
+
+ (uintptr_t)JBoltFunc::constructor,
+ (uintptr_t)JBoltFunc::copy_constructor,
+ (uintptr_t)JBoltCall::constructor,
+ (uintptr_t)JBoltCall::copy_constructor,
+ (uintptr_t)JBoltCallGraph::static_add_func,
+ (uintptr_t)JBoltCallGraph::static_add_call
+};
+
+/**
+ * Invoked in JfrStackTraceRepository::add_jbolt().
+ * Each time JFR record a valid stacktrace,
+ * we log a weak ptr of each unique method in _sampled_methods_refs.
+ */
+void JBoltManager::log_stacktrace(const JfrStackTrace& stacktrace) {
+ Thread* thread = Thread::current();
+ HandleMark hm(thread);
+
+ const JfrStackFrame* frames = stacktrace.get_frames();
+ unsigned int framesCount = stacktrace.get_framesCount();
+
+ for (u4 i = 0; i < framesCount; ++i) {
+ const JfrStackFrame& frame = frames[i];
+
+ JBoltStackFrameKey stackframe_key(const_cast<InstanceKlass *>(frame.get_klass()), frame.get_methodId());
+
+ if (!_sampled_methods_refs->contains(stackframe_key)) {
+ jobject method_holder = JNIHandles::make_weak_global(Handle(thread, frame.get_klass()->klass_holder()));
+ JBoltStackFrameValue stackframe_value(method_holder);
+ _sampled_methods_refs->put(stackframe_key, stackframe_value);
+ // put() transmits method_holder ownership to element in map
+ // set the method_holder to nullptr in temp variable stackframe_value, to avoid double free
+ stackframe_value.clear_method_holder_but_not_release();
+ }
+ }
+}
+
+methodHandle JBoltManager::lookup_method(InstanceKlass* klass, traceid method_id) {
+ Thread* thread = Thread::current();
+ JBoltStackFrameKey stackframe_key(klass, method_id);
+ JBoltStackFrameValue* stackframe_value = _sampled_methods_refs->get(stackframe_key);
+ if (stackframe_value == nullptr) {
+ return methodHandle();
+ }
+
+ jobject method_holder = stackframe_value->get_method_holder();
+ if (method_holder != nullptr && JNIHandles::is_weak_global_handle(method_holder) && JNIHandles::is_weak_global_cleared(method_holder)) {
+ log_debug(jbolt)("method klass at %p is unloaded", (void*)klass);
+ return methodHandle();
+ }
+
+ const Method* const lookup_method = JfrMethodLookup::lookup(klass, method_id);
+ if (lookup_method == NULL) {
+ // stacktrace obsolete
+ return methodHandle();
+ }
+ assert(lookup_method != NULL, "invariant");
+ methodHandle method(thread, const_cast<Method*>(lookup_method));
+
+ return method;
+}
+
+void JBoltManager::construct_stacktrace(const JfrStackTrace& stacktrace) {
+ NoSafepointVerifier nsv;
+ if (stacktrace.get_framesCount() < MIN_FRAMESCOUNT)
+ return;
+
+ u4 topFrameIndex = 0;
+ u4 max_frames = 0;
+
+ const JfrStackFrame* frames = stacktrace.get_frames();
+ unsigned int framesCount = stacktrace.get_framesCount();
+
+ // Native method subsidence
+ while (topFrameIndex < framesCount) {
+ const JfrStackFrame& frame = frames[topFrameIndex];
+
+ if (reinterpret_cast<uintptr_t>(method_type_to_string(frame.get_type())) != reinterpret_cast<uintptr_t>("Native")) {
+ break;
+ }
+
+ topFrameIndex++;
+ }
+
+ if (framesCount - topFrameIndex < MIN_FRAMESCOUNT) {
+ return;
+ }
+
+ os::Linux::jboltLog_precalc(topFrameIndex, max_frames);
+
+ JBoltFunc **tempfunc = NULL;
+
+ for (u4 i = 0; i < max_frames; ++i) {
+ const JfrStackFrame& frame = frames[topFrameIndex + i];
+
+ methodHandle method = lookup_method(const_cast<InstanceKlass*>(frame.get_klass()), frame.get_methodId());
+ if (method.is_null()) {
+ break;
+ }
+ const CompiledMethod* const compiled = method->code();
+
+ log_trace(jbolt)(
+ "Method id - %lu\n\tBytecode index - %hu\n\tSignature - %s\n\tType - %s\n\tCompiler - %s\n\tCompile Level - %d\n\tSize - %dB\n",
+ frame.get_methodId(),
+ frame.get_byteCodeIndex(),
+ method->external_name(),
+ method_type_to_string(frame.get_type()),
+ compiled != NULL ? compiled->compiler_name() : "None",
+ compiled != NULL ? compiled->comp_level() : -1,
+ compiled != NULL ? compiled->size() : 0);
+
+ if (compiled == NULL) break;
+
+ JBoltMethodKey method_key(method->constants()->pool_holder()->name(), method->name(), method->signature());
+ JBoltFunc* func = JBoltFunc::constructor(frame.get_klass(), frame.get_methodId(), compiled->size(), method_key);
+
+ if (!os::Linux::jboltLog_do(related_data_jbolt, (address)&stacktrace, i, compiled->comp_level(), (address)func, (address*)&tempfunc)) {
+ delete func;
+ func = NULL;
+ break;
+ }
+ }
+
+ log_trace(jbolt)(
+ "StackTrace hash - %u hotcount - %d\n==============================\n", stacktrace.hash(), stacktrace.hotcount());
+}
+
+/**
+ * Invoked in JfrStackTraceRepository::write().
+ * Each time JfrChunkWrite do write and clear stacktrace table,
+ * we update the CG by invoke construct_stacktrace().
+ */
+void JBoltManager::construct_cg_once() {
+ guarantee((UseJBolt && JBoltManager::reorder_phase_is_profiling_or_waiting()), "sanity");
+
+ GrowableArray<JfrStackTrace*>* traces = create_growable_array<JfrStackTrace*>();
+
+ {
+ MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
+ const JfrStackTraceRepository& repository = JfrStackTraceRepository::instance();
+
+ if (repository.get_entries_count_jbolt() == 0) {
+ return;
+ }
+
+ const JfrStackTrace* const * table = repository.get_stacktrace_table_jbolt();
+ for (uint i = 0; i < repository.TABLE_SIZE; ++i) {
+ for (const JfrStackTrace* trace = table[i]; trace != nullptr; trace = trace->next()) {
+ traces->append(const_cast<JfrStackTrace*>(trace));
+ }
+ }
+ }
+
+ Thread* thread = Thread::current();
+ if (thread->is_Java_thread()) {
+ JavaThread* java_thread = JavaThread::current();
+ ThreadInVMfromNative tiv(java_thread);
+ for (int i = 0; i < traces->length(); ++i) {
+ construct_stacktrace(*(traces->at(i)));
+ }
+ } else {
+ for (int i = 0; i < traces->length(); ++i) {
+ construct_stacktrace(*(traces->at(i)));
+ }
+ }
+
+ log_trace(jbolt)(
+ "+++++++ one time log over ++++++\n\n");
+ delete traces;
+}
+
+static void write_order(const GrowableArray<JBoltFunc>* order, fileStream& fs) {
+ assert(order != nullptr, "sanity");
+ const char* methodFlag = "M";
+ const char* segmentor = "C\n";
+
+ log_debug(jbolt)("+============================+\n\t\t\tORDER\n");
+
+ for (int i = 0; i < order->length(); ++i) {
+ const JBoltFunc& func = order->at(i);
+ if (func.klass() == NULL) {
+ fs.write(segmentor, strlen(segmentor));
+ continue;
+ }
+
+ char* holder_name = func.method_key().klass()->as_C_string();
+ char* name = func.method_key().name()->as_C_string();
+ char* signature = func.method_key().sig()->as_C_string();
+ char size[LINE_BUF_SIZE] = {0};
+ snprintf(size, sizeof(size), "%d", func.size());
+
+ log_debug(jbolt)("order %d --- Method - %s %s %s\n", i, holder_name, name, signature);
+
+ fs.write(methodFlag, strlen(methodFlag));
+ fs.write(" ", 1);
+ fs.write(size, strlen(size));
+ fs.write(" ", 1);
+ fs.write(holder_name, strlen(holder_name));
+ fs.write(" ", 1);
+ fs.write(name, strlen(name));
+ fs.write(" ", 1);
+ fs.write(signature, strlen(signature));
+ fs.write("\n", 1);
+ }
+}
+
+/**
+ * Invoked in before_exit().
+ * Only use in manual mode.
+ * Dump the order to JBoltOrderFile before vm exit.
+ */
+void JBoltManager::dump_order_in_manual() {
+ guarantee((UseJBolt && JBoltDumpMode), "sanity");
+ guarantee(reorder_phase_profiling_to_waiting(), "sanity");
+ NoSafepointVerifier nsv;
+ ResourceMark rm;
+ GrowableArray<JBoltFunc>* order = JBoltCallGraph::callgraph_instance().hfsort();
+
+ fileStream orderFile(JBoltOrderFile, "w+");
+
+ if (JBoltOrderFile == NULL || !orderFile.is_open()) {
+ log_error(jbolt)("JBoltOrderFile open error");
+ vm_exit_during_initialization("JBoltOrderFile open error");
+ }
+
+ write_order(order, orderFile);
+
+ log_info(jbolt)("order generate successful !!");
+ log_debug(jbolt)("+============================+\n");
+ delete order;
+ delete _sampled_methods_refs;
+ _sampled_methods_refs = nullptr;
+ JBoltCallGraph::deinitialize();
+}
+
+JBoltErrorCode JBoltManager::dump_order_in_jcmd(const char* filename) {
+ guarantee(UseJBolt, "sanity");
+ NoSafepointVerifier nsv;
+ ResourceMark rm;
+
+ if (_order_stored == nullptr) return JBoltOrderNULL;
+
+ fileStream orderFile(filename, "w+");
+
+ if (filename == NULL || !orderFile.is_open()) return JBoltOpenFileError;
+
+ write_order(_order_stored, orderFile);
+
+ return JBoltOK;
+}
+
+/**
+ * Do not set the JBolt-related flags manually if UseJBolt is not enabled.
+ */
+void JBoltManager::check_arguments_not_set() {
+ if (UseJBolt) return;
+
+ check_arg_not_set(FLAG_MEMBER_ENUM(JBoltDumpMode));
+ check_arg_not_set(FLAG_MEMBER_ENUM(JBoltLoadMode));
+ check_arg_not_set(FLAG_MEMBER_ENUM(JBoltOrderFile));
+ check_arg_not_set(FLAG_MEMBER_ENUM(JBoltSampleInterval));
+ check_arg_not_set(FLAG_MEMBER_ENUM(JBoltCodeHeapSize));
+}
+
+/**
+ * Check which mode is JBolt in.
+ * If JBoltDumpMode or JBoltLoadMode is set manually then do nothing, else it will be fully auto sched by JBolt itself.
+ */
+void JBoltManager::check_mode() {
+ if (!(JBoltDumpMode || JBoltLoadMode)) {
+ _auto_mode = true;
+ return;
+ }
+
+ if (!FLAG_IS_DEFAULT(JBoltSampleInterval)) {
+ log_warning(jbolt)("JBoltSampleInterval is ignored because it is not in auto mode.");
+ }
+
+ if (JBoltDumpMode && JBoltLoadMode) {
+ vm_exit_during_initialization("Do not set both JBoltDumpMode and JBoltLoadMode!");
+ }
+
+ guarantee((JBoltDumpMode ^ JBoltLoadMode), "Must set either JBoltDumpMode or JBoltLoadMode!");
+}
+
+/**
+ * If in auto mode, JBoltOrderFile will be ignored
+ * If in any manual mode, then JBoltOrderFile will be necessary.
+ * Check whether the order file exists or is accessable.
+ */
+void JBoltManager::check_order_file() {
+ if (auto_mode()) {
+ if (JBoltOrderFile != nullptr) log_warning(jbolt)("JBoltOrderFile is ignored because it is in auto mode.");
+ return;
+ }
+
+ if (JBoltOrderFile == nullptr) {
+ vm_exit_during_initialization("JBoltOrderFile is not set!");
+ }
+
+ bool file_exist = (::access(JBoltOrderFile, F_OK) == 0);
+ if (file_exist) {
+ if (JBoltDumpMode) {
+ log_warning(jbolt)("JBoltOrderFile to dump already exists and will be overwritten: file=%s.", JBoltOrderFile);
+ ::remove(JBoltOrderFile);
+ }
+ } else {
+ if (JBoltLoadMode) {
+ vm_exit_during_initialization(err_msg("JBoltOrderFile does not exist or cannot be accessed! file=\"%s\".", JBoltOrderFile));
+ }
+ }
+}
+
+void JBoltManager::check_dependency() {
+ if (FLAG_IS_CMDLINE(FlightRecorder) ? !FlightRecorder : false) {
+ vm_exit_during_initialization("JBolt depends on JFR!");
+ }
+
+ if (!CompilerConfig::is_c2_enabled()) {
+ vm_exit_during_initialization("JBolt depends on C2!");
+ }
+
+ if (!SegmentedCodeCache) {
+ vm_exit_during_initialization("JBolt depends on SegmentedCodeCache!");
+ }
+}
+
+size_t JBoltManager::calc_nmethod_size_with_padding(size_t nmethod_size) {
+ return align_up(nmethod_size, (size_t) CodeCacheSegmentSize);
+}
+
+size_t JBoltManager::calc_segment_size_with_padding(size_t segment_size) {
+ size_t page_size = CodeCache::page_size();
+ if (segment_size < page_size) return page_size;
+ return align_down(segment_size, page_size);
+}
+
+/**
+ * We have to parse the file twice because SymbolTable is not inited in phase 1...
+ */
+void JBoltManager::load_order_file_phase1(int* method_cnt, size_t* segment_size) {
+ assert(JBoltOrderFile != nullptr, "sanity");
+
+ _order_fp = os::fopen(JBoltOrderFile, "r");
+ if (_order_fp == nullptr) {
+ vm_exit_during_initialization(err_msg("Cannot open file JBoltOrderFile! file=\"%s\".", JBoltOrderFile));
+ }
+
+ int mth_cnt = 0;
+ size_t seg_size = 0;
+
+ char line[LINE_BUF_SIZE];
+ int len = -1;
+ while (read_line(_order_fp, line, sizeof(line), &len)) {
+ if (len <= 2) continue;
+ if (line[0] != 'M' || line[1] != ' ') continue;
+ char* left_start = line + 2;
+
+ // parse nmethod size
+ size_t nmethod_size;
+ if (!read_a_size(left_start, &nmethod_size)) {
+ vm_exit_during_initialization(err_msg("Wrong format of JBolt order line! line=\"%s\".", line));
+ }
+ ++mth_cnt;
+ seg_size += calc_nmethod_size_with_padding(nmethod_size);
+ }
+
+ *method_cnt = mth_cnt;
+ *segment_size = seg_size;
+ log_trace(jbolt)("Read order file method_cnt=%d, estimated_segment_size=" SIZE_FORMAT ".", mth_cnt, seg_size);
+}
+
+bool JBoltManager::parse_method_line_phase2(char* const line, const int len) {
+ // Skip "M ".
+ char* left_start = line + 2;
+
+ // Skip nmethod size (has parsed in phase1).
+ {
+ char* t = strchr(left_start, ' ');
+ if (t == nullptr) return false;
+ left_start = t + 1;
+ }
+
+ // Modify "java.lang.Obj" to "java/lang/Obj".
+ replace_all(left_start, '.', '/');
+
+ // Parse the three symbols: class name, method name, signature.
+ Symbol* three_symbols[3];
+ for (int i = 0; i < 2; ++i) {
+ char* t = strchr(left_start, ' ');
+ if (t == nullptr) return false;
+ Symbol* sym = SymbolTable::new_symbol(left_start, t - left_start);
+ three_symbols[i] = sym;
+ left_start = t + 1;
+ }
+ Symbol* sym = SymbolTable::new_symbol(left_start, line + len - left_start);
+ three_symbols[2] = sym;
+ if (log_is_enabled(Trace, jbolt)) {
+ log_trace(jbolt)("HotMethod init: key={%s %s %s}",
+ three_symbols[0]->as_C_string(),
+ three_symbols[1]->as_C_string(),
+ three_symbols[2]->as_C_string());
+ }
+
+ // Add to data structure.
+ JBoltMethodKey method_key(three_symbols[0], three_symbols[1], three_symbols[2]);
+ _hot_methods_sorted->append(method_key);
+ JBoltMethodValue method_value;
+ bool put = _hot_methods_vis->put(method_key, method_value);
+ if (!put) {
+ vm_exit_during_initialization(err_msg("Duplicated method: {%s %s %s}!",
+ three_symbols[0]->as_C_string(),
+ three_symbols[1]->as_C_string(),
+ three_symbols[2]->as_C_string()));
+ }
+
+ return true;
+}
+
+bool JBoltManager::parse_connected_component_line_phase2(char* const line, const int len) { return true; }
+
+void JBoltManager::load_order_file_phase2(TRAPS) {
+ guarantee(_order_fp != nullptr, "sanity");
+
+ // re-scan
+ fseek(_order_fp, 0, SEEK_SET);
+
+ char line[LINE_BUF_SIZE];
+ int len = -1;
+ while (read_line(_order_fp, line, sizeof(line), &len)) {
+ if (len <= 0) continue;
+ bool success = false;
+ switch (line[0]) {
+ case '#': success = true; break; // ignore comments
+ case 'M': success = parse_method_line_phase2(line, len); break;
+ case 'C': success = parse_connected_component_line_phase2(line, len); break;
+ default: break;
+ }
+ if (!success) {
+ vm_exit_during_initialization(err_msg("Wrong format of JBolt order line! line=\"%s\".", line));
+ }
+ }
+ fclose(_order_fp);
+ _order_fp = nullptr;
+}
+
+void JBoltManager::init_load_mode_phase1() {
+ if (!(auto_mode() || JBoltLoadMode)) return;
+
+ if (auto_mode()) {
+ // auto mode has no order now, initialize as default.
+ _hot_methods_sorted = new (mtCompiler) GrowableArray<JBoltMethodKey>(1, mtCompiler);
+ _hot_methods_vis = new (mtCompiler) MethodKeyMap();
+ log_info(jbolt)("Default set JBoltCodeHeapSize=" UINTX_FORMAT " B (" UINTX_FORMAT " MB).", JBoltCodeHeapSize, JBoltCodeHeapSize / 1024 / 1024);
+ return;
+ }
+ guarantee(reorder_phase_available_to_collecting(), "sanity");
+ size_t total_nmethod_size = 0;
+ int method_cnt = 0;
+ load_order_file_phase1(&method_cnt, &total_nmethod_size);
+
+ _hot_methods_sorted = new (mtCompiler) GrowableArray<JBoltMethodKey>(method_cnt, mtCompiler);
+ _hot_methods_vis = new (mtCompiler) MethodKeyMap();
+
+ if (FLAG_IS_DEFAULT(JBoltCodeHeapSize)) {
+ FLAG_SET_ERGO(JBoltCodeHeapSize, calc_segment_size_with_padding(total_nmethod_size));
+ log_info(jbolt)("Auto set JBoltCodeHeapSize=" UINTX_FORMAT " B (" UINTX_FORMAT " MB).", JBoltCodeHeapSize, JBoltCodeHeapSize / 1024 / 1024);
+ }
+}
+
+void JBoltManager::init_load_mode_phase2(TRAPS) {
+ // Only manual load mode need load phase2
+ if (!JBoltLoadMode) return;
+
+ load_order_file_phase2(CHECK);
+ _reorderable_method_cnt = 0;
+ _reorder_method_threshold_cnt = _hot_methods_sorted->length() * _jbolt_reorder_threshold;
+}
+
+void JBoltManager::init_dump_mode_phase2(TRAPS) {
+ if (!(auto_mode() || JBoltDumpMode)) return;
+
+ JBoltCallGraph::initialize();
+ _sampled_methods_refs = new (mtTracing) StackFrameKeyMap();
+
+ // JBolt will create a JFR by itself
+ // In auto mode, will stop in JBoltControlThread::start_thread() after JBoltSampleInterval.
+ // In manual dump mode, won't stop until program exit.
+ log_info(jbolt)("JBolt in dump mode now, start a JFR recording named \"jbolt-jfr\".");
+ bufferedStream output;
+ DCmd::parse_and_execute(DCmd_Source_Internal, &output, "JFR.start name=jbolt-jfr", ' ', THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ ResourceMark rm;
+ log_warning(jbolt)("unable to start jfr jbolt-jfr");
+ log_warning(jbolt)("exception type: %s", PENDING_EXCEPTION->klass()->external_name());
+ // don't unwind this exception
+ CLEAR_PENDING_EXCEPTION;
+ }
+}
+
+static void update_stored_order(const GrowableArray<JBoltFunc>* order) {
+ if (_order_stored != nullptr) {
+ // use a tmp for releasing space to provent _order_stored from being a wild pointer
+ GrowableArray<JBoltFunc>* tmp = _order_stored;
+ _order_stored = nullptr;
+ delete tmp;
+ }
+ _order_stored = new (mtTracing) GrowableArray<JBoltFunc>(order->length(), mtTracing);
+ _order_stored->appendAll(order);
+}
+
+static CompileTaskInfo* create_compile_task_info(methodHandle& method) {
+ CompiledMethod* compiled = method->code();
+ if (compiled == nullptr) {
+ log_warning(jbolt)("Recompilation Task init failed because of null nmethod. func: %s.", method->external_name());
+ return nullptr;
+ }
+ int osr_bci = compiled->is_osr_method() ? compiled->osr_entry_bci() : InvocationEntryBci;
+ int comp_level = compiled->comp_level();
+ // comp_level adaptation for deoptmization
+ if (comp_level > CompLevel_simple && comp_level <= CompLevel_full_optimization) comp_level = CompLevel_full_optimization;
+ CompileTask::CompileReason comp_reason = CompileTask::Reason_Reorder;
+ CompileTaskInfo* ret = new CompileTaskInfo(method(), osr_bci, comp_level, (int)comp_reason,
+ nullptr, 0);
+ return ret;
+}
+
+/**
+ * This function is invoked by JBoltControlThread.
+ * Do initialization for converting dump mode to load mode.
+ */
+void JBoltManager::init_auto_transition(size_t* segment_size, TRAPS) {
+ guarantee(UseJBolt && auto_mode(), "sanity");
+ NoSafepointVerifier nsv;
+ ResourceMark rm;
+
+ GrowableArray<JBoltFunc>* order = JBoltCallGraph::callgraph_instance().hfsort();
+ update_stored_order(order);
+
+ size_t seg_size = 0;
+ for (int i = 0; i < order->length(); ++i) {
+ const JBoltFunc& func = order->at(i);
+ if (func.klass() == NULL) {
+ continue;
+ }
+
+ methodHandle method = lookup_method(const_cast<InstanceKlass*>(func.klass()), func.method_id());
+ if (method.is_null()) {
+ continue;
+ }
+
+ CompileTaskInfo* cti = create_compile_task_info(method);
+ if (cti == nullptr) {
+ continue;
+ }
+
+ JBoltMethodKey method_key = func.method_key();
+ JBoltMethodValue method_value;
+ if (!method_value.set_comp_info(cti)) {
+ delete cti;
+ continue;
+ }
+
+ seg_size += calc_nmethod_size_with_padding(func.size());
+ _hot_methods_sorted->append(method_key);
+ bool put = _hot_methods_vis->put(method_key, method_value);
+ if (!put) {
+ vm_exit_during_initialization(err_msg("Duplicated method: {%s %s %s}!",
+ method_key.klass()->as_C_string(),
+ method_key.name()->as_C_string(),
+ method_key.sig()->as_C_string()));
+ }
+ method_value.clear_comp_info_but_not_release();
+ }
+ log_info(jbolt)("order generate successful !!");
+ *segment_size = calc_segment_size_with_padding(seg_size);
+ delete order;
+}
+
+/**
+ * This function must be invoked after CompilerConfig::ergo_initialize() in Arguments::apply_ergo().
+ * This function must be invoked before CodeCache::initialize_heaps() in codeCache_init() in init_globals().
+ * Thread and SymbolTable is not inited now!
+ */
+void JBoltManager::init_phase1() {
+ if (!UseJBolt) return;
+ check_mode();
+ check_dependency();
+ check_order_file();
+
+ /* dump mode has nothing to do in phase1 */
+ init_load_mode_phase1();
+}
+
+void JBoltManager::init_phase2(TRAPS) {
+ if (!UseJBolt) return;
+
+ ResourceMark rm(THREAD);
+ init_dump_mode_phase2(CHECK);
+ init_load_mode_phase2(CHECK);
+
+ // Manual dump mode doesn't need JBoltControlThread, directly go to profiling phase
+ if (JBoltDumpMode) {
+ guarantee(JBoltManager::reorder_phase_available_to_profiling(), "sanity");
+ return;
+ }
+
+ JBoltControlThread::init(CHECK);
+ // Auto mode will start control thread earlier.
+ // Manual load mode start later in check_start_reordering()
+ if (auto_mode()) {
+ JBoltControlThread::start_thread(CHECK_AND_CLEAR);
+ }
+}
+
+/**
+ * Code heaps are initialized between init phase 1 and init phase 2.
+ */
+void JBoltManager::init_code_heaps(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, size_t ps, size_t alignment) {
+ assert(UseJBolt && !JBoltDumpMode, "sanity");
+ if(!is_aligned(JBoltCodeHeapSize, alignment)) {
+ vm_exit_during_initialization(err_msg("JBoltCodeHeapSize should be %ld aligned, please adjust", alignment));
+ }
+
+ size_t jbolt_hot_size = JBoltCodeHeapSize;
+ size_t jbolt_tmp_size = JBoltCodeHeapSize;
+ size_t jbolt_total_size = jbolt_hot_size + jbolt_tmp_size;
+ if (non_profiled_size <= jbolt_total_size) {
+ vm_exit_during_initialization(err_msg("Not enough space in non-profiled code heap to split out JBolt heap(s): " SIZE_FORMAT "K <= " SIZE_FORMAT "K", non_profiled_size/K, jbolt_total_size/K));
+ }
+ non_profiled_size -= jbolt_total_size;
+ non_profiled_size = align_down(non_profiled_size, alignment);
+ FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size);
+
+ // Memory layout with JBolt:
+ // ---------- high -----------
+ // Non-profiled nmethods
+ // JBolt tmp nmethods
+ // JBolt hot nmethods
+ // Non-nmethods
+ // Profiled nmethods
+ // ---------- low ------------
+ ReservedCodeSpace rs = CodeCache::reserve_heap_memory(cache_size, ps);
+ ReservedSpace profiled_space = rs.first_part(profiled_size);
+ ReservedSpace r1 = rs.last_part(profiled_size);
+ ReservedSpace non_nmethod_space = r1.first_part(non_nmethod_size);
+ ReservedSpace r2 = r1.last_part(non_nmethod_size);
+ ReservedSpace jbolt_hot_space = r2.first_part(jbolt_hot_size);
+ ReservedSpace r3 = r2.last_part(jbolt_hot_size);
+ ReservedSpace jbolt_tmp_space = r3.first_part(jbolt_tmp_size);
+ ReservedSpace non_profiled_space = r3.last_part(jbolt_tmp_size);
+
+ CodeCache::add_heap(non_nmethod_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
+ CodeCache::add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
+ CodeCache::add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
+ const char* no_space = nullptr;
+ CodeCache::add_heap(jbolt_hot_space, "CodeHeap 'jbolt hot nmethods'", CodeBlobType::MethodJBoltHot);
+ if (jbolt_hot_size != jbolt_hot_space.size()) {
+ no_space = "hot";
+ }
+ CodeCache::add_heap(jbolt_tmp_space, "CodeHeap 'jbolt tmp nmethods'", CodeBlobType::MethodJBoltTmp);
+ if (jbolt_tmp_size != jbolt_tmp_space.size()) {
+ no_space = "tmp";
+ }
+ if (no_space != nullptr) {
+ vm_exit_during_initialization(FormatBuffer<1024>(
+ "No enough space for JBolt %s heap: \n"
+ "Expect: cache_size=" SIZE_FORMAT "K, profiled_size=" SIZE_FORMAT "K, non_nmethod_size=" SIZE_FORMAT "K, jbolt_hot_size=" SIZE_FORMAT "K, non_profiled_size=" SIZE_FORMAT "K, jbolt_tmp_size=" SIZE_FORMAT "K\n"
+ "Actual: cache_size=" SIZE_FORMAT "K, profiled_size=" SIZE_FORMAT "K, non_nmethod_size=" SIZE_FORMAT "K, jbolt_hot_size=" SIZE_FORMAT "K, non_profiled_size=" SIZE_FORMAT "K, jbolt_tmp_size=" SIZE_FORMAT "K\n"
+ "alignment=" SIZE_FORMAT,
+ no_space,
+ cache_size/K, profiled_size/K, non_nmethod_size/K, jbolt_hot_size/K, non_profiled_size/K, jbolt_tmp_size/K,
+ rs.size()/K, profiled_space.size()/K, non_nmethod_space.size()/K, jbolt_hot_space.size()/K, non_profiled_space.size()/K, jbolt_tmp_space.size()/K,
+ alignment));
+ }
+}
+
+int JBoltManager::reorder_phase() {
+ return Atomic::load_acquire(&_reorder_phase);
+}
+
+bool JBoltManager::reorder_phase_available_to_collecting() {
+ assert(!auto_mode(), "two-phase only");
+ return Atomic::cmpxchg(&_reorder_phase, JBoltReorderPhase::Available, JBoltReorderPhase::Collecting) == JBoltReorderPhase::Available;
+}
+
+bool JBoltManager::reorder_phase_collecting_to_reordering() {
+ assert(!auto_mode(), "two-phase only");
+ return Atomic::cmpxchg(&_reorder_phase, JBoltReorderPhase::Collecting, JBoltReorderPhase::Reordering) == JBoltReorderPhase::Collecting;
+}
+
+bool JBoltManager::reorder_phase_available_to_profiling() {
+ assert(auto_mode(), "one-phase only");
+ return Atomic::cmpxchg(&_reorder_phase, JBoltReorderPhase::Available, JBoltReorderPhase::Profiling) == JBoltReorderPhase::Available;
+}
+
+bool JBoltManager::reorder_phase_profiling_to_reordering() {
+ assert(auto_mode(), "one-phase only");
+ return Atomic::cmpxchg(&_reorder_phase, JBoltReorderPhase::Profiling, JBoltReorderPhase::Reordering) == JBoltReorderPhase::Profiling;
+}
+
+bool JBoltManager::reorder_phase_reordering_to_available() {
+ assert(auto_mode(), "one-phase only");
+ return Atomic::cmpxchg(&_reorder_phase, JBoltReorderPhase::Reordering, JBoltReorderPhase::Available) == JBoltReorderPhase::Reordering;
+}
+
+bool JBoltManager::reorder_phase_profiling_to_available() {
+ assert(auto_mode(), "one-phase only");
+ return Atomic::cmpxchg(&_reorder_phase, JBoltReorderPhase::Profiling, JBoltReorderPhase::Available) == JBoltReorderPhase::Profiling;
+}
+
+bool JBoltManager::reorder_phase_profiling_to_waiting() {
+ return Atomic::cmpxchg(&_reorder_phase, JBoltReorderPhase::Profiling, JBoltReorderPhase::Waiting) == JBoltReorderPhase::Profiling;
+}
+
+bool JBoltManager::reorder_phase_waiting_to_reordering() {
+ assert(auto_mode(), "one-phase only");
+ return Atomic::cmpxchg(&_reorder_phase, JBoltReorderPhase::Waiting, JBoltReorderPhase::Reordering) == JBoltReorderPhase::Waiting;
+}
+
+bool JBoltManager::reorder_phase_waiting_to_available() {
+ assert(auto_mode(), "one-phase only");
+ return Atomic::cmpxchg(&_reorder_phase, JBoltReorderPhase::Waiting, JBoltReorderPhase::Available) == JBoltReorderPhase::Waiting;
+}
+
+bool JBoltManager::reorder_phase_reordering_to_end() {
+ return Atomic::cmpxchg(&_reorder_phase, JBoltReorderPhase::Reordering, JBoltReorderPhase::End) == JBoltReorderPhase::Reordering;
+}
+
+bool JBoltManager::reorder_phase_is_waiting() {
+ return Atomic::load_acquire(&_reorder_phase) == JBoltReorderPhase::Waiting;
+}
+
+bool JBoltManager::reorder_phase_is_available() {
+ bool res = (Atomic::load_acquire(&_reorder_phase) == JBoltReorderPhase::Available);
+ assert(!res || auto_mode(), "one-phase only");
+ return res;
+}
+
+bool JBoltManager::reorder_phase_is_collecting() {
+ bool res = (Atomic::load_acquire(&_reorder_phase) == JBoltReorderPhase::Collecting);
+ assert(!res || !auto_mode(), "two-phase only");
+ return res;
+}
+
+bool JBoltManager::reorder_phase_is_profiling() {
+ bool res = (Atomic::load_acquire(&_reorder_phase) == JBoltReorderPhase::Profiling);
+ assert(!res || auto_mode(), "for two-phase dump mode & one-phase");
+ return res;
+}
+
+bool JBoltManager::reorder_phase_is_reordering() {
+ return Atomic::load_acquire(&_reorder_phase) == JBoltReorderPhase::Reordering;
+}
+
+bool JBoltManager::reorder_phase_is_collecting_or_reordering() {
+ int p = Atomic::load_acquire(&_reorder_phase);
+ assert(p != JBoltReorderPhase::Collecting || !auto_mode(), "two-phase only");
+ return p == JBoltReorderPhase::Collecting || p == JBoltReorderPhase::Reordering;
+}
+
+bool JBoltManager::reorder_phase_is_profiling_or_waiting() {
+ int p = Atomic::load_acquire(&_reorder_phase);
+ return p == JBoltReorderPhase::Profiling || p == JBoltReorderPhase::Waiting;
+}
+
+Method* JBoltManager::cur_reordering_method() {
+ return Atomic::load_acquire(&_cur_reordering_method);
+}
+
+void JBoltManager::set_cur_reordering_method(Method* method) {
+ Atomic::release_store(&_cur_reordering_method, method);
+}
+
+int JBoltManager::inc_reorderable_method_cnt() {
+ return Atomic::add(&_reorderable_method_cnt, +1);
+}
+
+bool JBoltManager::can_reorder_now() {
+ return Atomic::load_acquire(&_reorderable_method_cnt) >= _reorder_method_threshold_cnt;
+}
+
+bool JBoltManager::should_reorder_now() {
+ return Atomic::load_acquire(&_reorderable_method_cnt) == _reorder_method_threshold_cnt;
+}
+
+bool JBoltManager::gc_should_sweep_code_heaps_now() {
+ return Atomic::load_acquire(&_gc_should_sweep_code_heaps_now) != 0;
+}
+
+CodeBlobType JBoltManager::primary_hot_seg() {
+ return CodeBlobType(Atomic::load_acquire(&_primary_hot_seg));
+}
+
+CodeBlobType JBoltManager::secondary_hot_seg() {
+ return CodeBlobType(Atomic::load_acquire(&_secondary_hot_seg));
+}
+
+int JBoltManager::clear_manager() {
+ /* _hot_methods_sorted, _hot_methods_vis and _sampled_methods_refs have been cleared in other pos, don't delete again */
+ guarantee(_hot_methods_sorted == nullptr, "sanity");
+ guarantee(_hot_methods_vis == nullptr, "sanity");
+ guarantee(_sampled_methods_refs == nullptr, "sanity");
+ // Re-allocate them
+ _hot_methods_sorted = new (mtCompiler) GrowableArray<JBoltMethodKey>(1, mtCompiler);
+ _hot_methods_vis = new (mtCompiler) MethodKeyMap();
+ _sampled_methods_refs = new (mtTracing) StackFrameKeyMap();
+
+ return 0;
+}
+
+/**
+ * Invoked in JBoltControlThread::prev_control_schedule().
+ * Expect to only execute in auto mode while JBolt.start triggered.
+ * Clear JBolt related data structures to restore a initial env same as sample never happening.
+*/
+int JBoltManager::clear_last_sample_datas() {
+ int ret = 0;
+ // Clear _table_jbolt in JfrStackTraceRepository
+ ret = JfrStackTraceRepository::clear_jbolt();
+ // Clear JBoltCallGraph
+ ret = JBoltCallGraph::callgraph_instance().clear_instance();
+ // Clear JBoltManager
+ ret = clear_manager();
+
+ return ret;
+}
+
+/**
+ * Invoked in JBoltControlThread::prev_control_schedule().
+ * Swap primary hot segment with secondary hot segment
+ */
+void JBoltManager::swap_semi_jbolt_segs() {
+ guarantee(reorder_phase_is_waiting(), "swap must happen in reorder phase Profiling.");
+ int tmp = Atomic::xchg(&_secondary_hot_seg, Atomic::load_acquire(&_primary_hot_seg));
+ Atomic::xchg(&_primary_hot_seg, tmp);
+}
+
+/**
+ * Invoked in JBoltControlThread::post_control_schdule().
+ * Free scondary hot segment space for next reorder.
+ */
+void JBoltManager::clear_secondary_hot_seg(TRAPS) {
+ guarantee(reorder_phase_is_available(), "secondary clear must happen in reorder phase Available.");
+ // scan secondary hot seg and recompile alive nmethods to non-profiled
+ ResourceMark rm(THREAD);
+ // We cannot alloc weak handle within CodeCache_lock because of the mutex rank check.
+ // So instead we keep the methods alive only within the scope of this method.
+ JBoltUtils::MetaDataKeepAliveMark mdm(THREAD);
+ const GrowableArray<Metadata*>& to_recompile = mdm.kept();
+
+ {
+ MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ CodeHeap* sec_hot = CodeCache::get_code_heap(secondary_hot_seg());
+ for (CodeBlob* cb = (CodeBlob*) sec_hot->first(); cb != nullptr; cb = (CodeBlob*) sec_hot->next(cb)) {
+ nmethod* nm = cb->as_nmethod_or_null();
+ Method* m = nm->method();
+ if (nm && nm->get_state() == CompiledMethod::in_use && m != nullptr) {
+ mdm.add(m);
+ }
+ }
+ }
+
+ for (int i = 0; i < to_recompile.length(); ++i) {
+ Method* m = (Method*) to_recompile.at(i);
+ methodHandle method(THREAD, m);
+ CompileTaskInfo* cti = create_compile_task_info(method);
+ if (cti == nullptr) continue;
+ guarantee(cti->try_select(), "method is on stack, should be ok");
+ assert(cti->hot_method() == nullptr, "sanity");
+ methodHandle hot_method;
+
+ bool recompile_result = enqueue_recompile_task(cti, method, hot_method, THREAD);
+ if(recompile_result) {
+ check_compiled_result(method(), CodeBlobType::MethodNonProfiled, THREAD);
+ }
+ delete cti;
+ }
+
+ Atomic::release_store(&_gc_should_sweep_code_heaps_now, 1);
+ Universe::heap()->collect(GCCause::_java_lang_system_gc);
+ Universe::heap()->collect(GCCause::_java_lang_system_gc);
+ Atomic::release_store(&_gc_should_sweep_code_heaps_now, 0);
+ log_info(jbolt)("Sweep secondary segment");
+ print_code_heaps();
+}
+
+/**
+ * Invoked in ciEnv::register_method() in CompilerThread.
+ * Controls where the new nmethod should be allocated.
+ *
+ * Returns CodeBlobType::All if it is not determined by JBolt logic.
+ */
+CodeBlobType JBoltManager::calc_code_blob_type(Method* method, CompileTask* task, TRAPS) {
+ assert(UseJBolt && reorder_phase_is_collecting_or_reordering(), "sanity");
+ const CodeBlobType not_care = CodeBlobType::All;
+
+ // Only cares about non-profiled segment.
+ int lvl = task->comp_level();
+ if (lvl != CompLevel_full_optimization && lvl != CompLevel_simple) {
+ return not_care;
+ }
+
+ // Ignore on-stack-replacement.
+ if (task->osr_bci() != InvocationEntryBci) {
+ return not_care;
+ }
+
+ int cur_reorder_phase = reorder_phase();
+ // Do nothing after reordering.
+ if (cur_reorder_phase != JBoltReorderPhase::Collecting && cur_reorder_phase != JBoltReorderPhase::Reordering) {
+ return not_care;
+ }
+ // Only cares about the current reordering method.
+ if (cur_reorder_phase == JBoltReorderPhase::Reordering) {
+ if (cur_reordering_method() == method) {
+ log_trace(jbolt)("Compiling to JBolt heap: method=%s.", method->name_and_sig_as_C_string());
+ return primary_hot_seg();
+ }
+ return not_care;
+ }
+ guarantee(cur_reorder_phase == JBoltReorderPhase::Collecting, "sanity");
+ assert(!auto_mode(), "sanity");
+
+ JBoltMethodKey method_key(method);
+ JBoltMethodValue* method_value = _hot_methods_vis->get(method_key);
+ if (method_value == nullptr) {
+ return not_care;
+ }
+
+ // Register the method and the compile task.
+ if (method_value->get_comp_info() == nullptr) {
+ CompileTaskInfo* cti = new CompileTaskInfo(method, task->osr_bci(), task->comp_level(), (int) task->compile_reason(),
+ task->hot_method(), task->hot_count());
+ if (method_value->set_comp_info(cti)) {
+ int cnt = inc_reorderable_method_cnt();
+ log_trace(jbolt)("Reorderable method found: cnt=%d, lvl=%d, p=%p, method=%s.",
+ cnt, task->comp_level(), method, method->name_and_sig_as_C_string());
+ if (is_power_of_2(_reorder_method_threshold_cnt - cnt)) {
+ log_debug(jbolt)("Reorderable cnt: %d/%d/%d", cnt, _reorder_method_threshold_cnt, _hot_methods_sorted->length());
+ }
+ if (cnt == _reorder_method_threshold_cnt) {
+ log_info(jbolt)("Time to reorder: %d/%d/%d", cnt, _reorder_method_threshold_cnt, _hot_methods_sorted->length());
+ _start_reordering_thread = THREAD;
+ }
+ } else {
+ delete cti;
+ }
+ }
+
+ return secondary_hot_seg();
+}
+
+/**
+ * Check if reordering should start.
+ * The reordering should only start once (for now).
+ * We don't do this check in "if (cnt == _reorder_method_threshold_cnt)" in calc_code_blob_type()
+ * because it will cause an assert error: "Possible safepoint reached by thread that does not allow it".
+ */
+void JBoltManager::check_start_reordering(TRAPS) {
+ // _start_reordering_thread is set and tested in the same thread. No need to be atomic.
+ if (_start_reordering_thread == THREAD) {
+ _start_reordering_thread = nullptr;
+ if (JBoltControlThread::get_thread() == nullptr) {
+ assert(can_reorder_now(), "sanity");
+ log_info(jbolt)("Starting JBoltControlThread to reorder.");
+ JBoltControlThread::start_thread(CHECK_AND_CLEAR);
+ }
+ }
+}
+
+/**
+ * The task will be added to the compile queue and be compiled just like other tasks.
+ */
+CompileTask* JBoltManager::create_a_task_instance(CompileTaskInfo* cti, methodHandle& method, methodHandle& hot_method, TRAPS) {
+ int osr_bci = cti->osr_bci();
+ int comp_level = cti->comp_level();
+ CompileTask::CompileReason comp_reason = (CompileTask::CompileReason) cti->comp_reason();
+ int hot_count = cti->hot_count();
+ bool is_blocking = true;
+
+ // init a task (@see CompileBroker::create_compile_task())
+ CompileTask* task = CompileTask::allocate();
+ int compile_id = CompileBroker::assign_compile_id(method, osr_bci);
+ task->initialize(compile_id, method, osr_bci, comp_level,
+ hot_method, hot_count, comp_reason,
+ is_blocking);
+ return task;
+}
+
+/**
+ * Print the failure reason if something is wrong in recompilation.
+ */
+void JBoltManager::check_compiled_result(Method* method, CodeBlobType check_blob_type, TRAPS) {
+ CompiledMethod* cm = method->code();
+ if (cm == nullptr) {
+ log_warning(jbolt)("Recompilation failed because of null nmethod.");
+ return;
+ }
+ nmethod* nm = cm->as_nmethod_or_null();
+ if (nm == nullptr) {
+ log_warning(jbolt)("Recompilation failed because the code is not a nmethod.");
+ return;
+ }
+ CodeBlobType code_blob_type = CodeCache::get_code_blob_type(nm);
+ if (code_blob_type != check_blob_type) {
+ log_warning(jbolt)("Recompilation failed because the nmethod is not in heap [%s]: it's in [%s].",
+ CodeCache::get_code_heap_name(check_blob_type), CodeCache::get_code_heap_name(code_blob_type));
+ return;
+ }
+ log_trace(jbolt)("Recompilation good: code=%p, size=%d, method=%s, heap=%s.",
+ nm, nm->size(), method->name_and_sig_as_C_string(), CodeCache::get_code_heap_name(check_blob_type));
+}
+
+/**
+ * Create the compile task instance and enqueue into compile queue
+ */
+bool JBoltManager::enqueue_recompile_task(CompileTaskInfo* cti, methodHandle& method, methodHandle& hot_method, TRAPS) {
+ CompileTask* task = nullptr;
+ CompileQueue* queue = CompileBroker::compile_queue(cti->comp_level());
+ { MutexLocker locker(THREAD, MethodCompileQueue_lock);
+ if (CompileBroker::compilation_is_in_queue(method)) {
+ log_warning(jbolt)("JBOLT won't compile as \"compilation is in queue\": method=%s.", method->name_and_sig_as_C_string());
+ return false;
+ }
+
+ task = create_a_task_instance(cti, method, hot_method, CHECK_AND_CLEAR_false);
+ if (task == nullptr) {
+ log_warning(jbolt)("JBOLT won't compile as \"task instance is NULL\": method=%s.", method->name_and_sig_as_C_string());
+ return false;
+ }
+ queue->add(task);
+ }
+
+ // Same waiting logic as CompileBroker::wait_for_completion().
+ { MonitorLocker ml(THREAD, task->lock());
+ while (!task->is_complete() && !CompileBroker::is_compilation_disabled_forever()) {
+ ml.wait();
+ }
+ }
+
+ CompileBroker::wait_for_completion(task);
+ task = nullptr; // freed
+ return true;
+}
+
+/**
+ * Recompilation is to move the nmethod to _primary_hot_seg.
+ */
+bool JBoltManager::recompile_one(CompileTaskInfo* cti, methodHandle& method, methodHandle& hot_method, TRAPS) {
+ ResourceMark rm(THREAD);
+
+ if (cti->osr_bci() != InvocationEntryBci) {
+ log_trace(jbolt)("We don't handle on-stack-replacement nmethods: method=%s.", method->name_and_sig_as_C_string());
+ return false;
+ }
+
+ if (log_is_enabled(Trace, jbolt)) {
+ const char* heap_name = nullptr;
+ CompiledMethod* cm = method->code();
+ if (cm == nullptr) heap_name = "<null>";
+ else if (!cm->is_nmethod()) heap_name = "<not-nmethod>";
+ else heap_name = CodeCache::get_code_heap_name(CodeCache::get_code_blob_type(cm));
+ log_trace(jbolt)("Start to recompile & reorder: heap=%s, method=%s.", heap_name, method->name_and_sig_as_C_string());
+ }
+
+ // Add a compilation task.
+ set_cur_reordering_method(method());
+ enqueue_recompile_task(cti, method, hot_method, CHECK_AND_CLEAR_false);
+ check_compiled_result(method(), primary_hot_seg(), CHECK_AND_CLEAR_false);
+
+ return true;
+}
+
+/**
+ * This method is invoked in a new thread JBoltControlThread.
+ * Recompiles the methods in the order list one by one (serially) based on the hot order.
+ * The methods to recompile were almost all in MethodJBoltTmp, and will in install in
+ * MethodJBoltHot after recompilation.
+ */
+void JBoltManager::reorder_all_methods(TRAPS) {
+ guarantee(UseJBolt && reorder_phase_is_reordering(), "sanity");
+ log_info(jbolt)("Start to reorder!");
+ print_code_heaps();
+
+ ResourceMark rm(THREAD);
+ for (int i = 0; i < _hot_methods_sorted->length(); ++i) {
+ JBoltMethodKey k = _hot_methods_sorted->at(i);
+ JBoltMethodValue* v = _hot_methods_vis->get(k);
+ if (v == nullptr) continue;
+ CompileTaskInfo* cti = v->get_comp_info();
+ if (cti == nullptr) continue;
+ if (!cti->try_select()) continue;
+
+ methodHandle method(THREAD, cti->method());
+ methodHandle hot_method(THREAD, cti->hot_method());
+
+ recompile_one(cti, method, hot_method, THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ Handle ex(THREAD, PENDING_EXCEPTION);
+ CLEAR_PENDING_EXCEPTION;
+ LogTarget(Warning, jbolt) lt;
+ if (lt.is_enabled()) {
+ LogStream ls(lt);
+ ls.print("Failed to recompile the method: %s.", method->name_and_sig_as_C_string());
+ java_lang_Throwable::print(ex(), &ls);
+ }
+ }
+ }
+
+ log_info(jbolt)("JBolt reordering succeeds.");
+ print_code_heaps();
+
+}
+
+void JBoltManager::clear_structures() {
+ delete _sampled_methods_refs;
+ _sampled_methods_refs = nullptr;
+ JBoltCallGraph::deinitialize();
+ set_cur_reordering_method(nullptr);
+ delete _hot_methods_sorted;
+ _hot_methods_sorted = nullptr;
+ delete _hot_methods_vis;
+ _hot_methods_vis = nullptr;
+}
+
+void JBoltManager::print_code_heap(outputStream& ls, CodeHeap* heap, const char* name) {
+ for (CodeBlob* cb = (CodeBlob*) heap->first(); cb != nullptr; cb = (CodeBlob*) heap->next(cb)) {
+ nmethod* nm = cb->as_nmethod_or_null();
+ Method* m = nm != nullptr ? nm->method() : nullptr;
+ ls.print_cr("%s %p %d nmethod=%s, entrant=%s, name=[%s %s %s]",
+ name,
+ cb, cb->size(),
+ B_TF(cb->is_nmethod()),
+ nm ? B_TF(!nm->is_not_entrant()) : "?",
+ m ? m->method_holder()->name()->as_C_string() : cb->name(),
+ m ? m->name()->as_C_string() : nullptr,
+ m ? m->signature()->as_C_string() : nullptr);
+ }
+}
+
+void JBoltManager::print_code_heaps() {
+ {
+ LogTarget(Debug, jbolt) lt;
+ if (!lt.is_enabled()) return;
+ LogStream ls(lt);
+ MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ CodeCache::print_summary(&ls, true);
+ }
+
+ {
+ LogTarget(Trace, jbolt) lt;
+ if (!lt.is_enabled()) return;
+ LogStream ls(lt);
+ CodeHeap* hot_heap = CodeCache::get_code_heap(CodeBlobType::MethodJBoltHot);
+ CodeHeap* tmp_heap = CodeCache::get_code_heap(CodeBlobType::MethodJBoltTmp);
+
+ ResourceMark rm;
+ if (hot_heap == nullptr) {
+ ls.print_cr("The jbolt hot heap is null.");
+ } else {
+ print_code_heap(ls, hot_heap, "hot");
+ }
+ if (tmp_heap == nullptr) {
+ ls.print_cr("The jbolt tmp heap is null.");
+ } else {
+ print_code_heap(ls, tmp_heap, "tmp");
+ }
+ }
+}
+
+#undef B_TF
\ No newline at end of file
diff --git a/src/hotspot/share/jbolt/jBoltManager.hpp b/src/hotspot/share/jbolt/jBoltManager.hpp
new file mode 100644
index 000000000..c0e752073
--- /dev/null
+++ b/src/hotspot/share/jbolt/jBoltManager.hpp
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_JBOLT_JBOLTMANAGER_HPP
+#define SHARE_JBOLT_JBOLTMANAGER_HPP
+
+#include "compiler/compileTask.hpp"
+#include "jbolt/jbolt_globals.hpp"
+#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/dcmd/jfrDcmds.hpp"
+#include "memory/allocation.hpp"
+#include "memory/heap.hpp"
+#include "oops/symbol.hpp"
+#include "runtime/handles.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/resourceHash.hpp"
+
+class CompileTask;
+class CompileTaskInfo;
+class Method;
+class Thread;
+
+enum JBoltErrorCode {
+ JBoltOK = 0,
+ JBoltOrderNULL = 1,
+ JBoltOpenFileError = 2
+};
+
+struct JBoltReorderPhase {
+ static const int Waiting = -1; // JBolt logic is waiting for something to be done.
+ static const int Available = 0; // JBolt logic is not working or is done (can be reordered again now).
+ static const int Collecting = 1; // Collecting methods in the order file (this phase is for two-phase only).
+ static const int Profiling = 2; // JFR is working (this phase is for one-phase only).
+ static const int Reordering = 3; // Recompiling and re-laying.
+ static const int End = 4; // JBolt is not available anymore (for two-phase, or error happened on one-phase).
+};
+
+class JBoltMethodKey : public StackObj {
+ Symbol* _klass;
+ Symbol* _name;
+ Symbol* _sig;
+
+ void inc_ref_cnt() {
+ Symbol* arr[] = { _klass, _name, _sig };
+ for (int i = 0; i < (int) (sizeof(arr) / sizeof(arr[0])); ++i) {
+ if (arr[i] != nullptr) arr[i]->increment_refcount();
+ }
+ }
+
+ void dec_ref_cnt() {
+ Symbol* arr[] = { _klass, _name, _sig };
+ for (int i = 0; i < (int) (sizeof(arr) / sizeof(arr[0])); ++i) {
+ if (arr[i] != nullptr) arr[i]->decrement_refcount();
+ }
+ }
+public:
+
+ JBoltMethodKey(Symbol* klass, Symbol* name, Symbol* sig): _klass(klass), _name(name), _sig(sig) { /* no inc_ref_cnt() here for SymbolTable::new_symbol() */ }
+ JBoltMethodKey(Method* method): _klass(method->method_holder()->name()), _name(method->name()), _sig(method->signature()) { inc_ref_cnt(); }
+ JBoltMethodKey(const JBoltMethodKey& other): _klass(other._klass), _name(other._name), _sig(other._sig) { inc_ref_cnt(); }
+ JBoltMethodKey(): _klass(nullptr), _name(nullptr), _sig(nullptr) {}
+ ~JBoltMethodKey() { dec_ref_cnt(); }
+
+ JBoltMethodKey& operator = (const JBoltMethodKey& other) {
+ dec_ref_cnt();
+ _klass = other._klass;
+ _name = other._name;
+ _sig = other._sig;
+ inc_ref_cnt();
+ return *this;
+ }
+
+ unsigned hash() const {
+ int v = primitive_hash(_klass);
+ v = v * 31 + primitive_hash(_name);
+ v = v * 31 + primitive_hash(_sig);
+ return v;
+ }
+ bool equals(const JBoltMethodKey& other) const {
+ return _klass == other._klass && _name == other._name && _sig == other._sig;
+ }
+
+ static unsigned calc_hash(const JBoltMethodKey& k) {
+ return k.hash();
+ }
+ static bool calc_equals(const JBoltMethodKey& k1, const JBoltMethodKey& k2) {
+ return k1.equals(k2);
+ }
+
+ Symbol* klass() const { return _klass; }
+ Symbol* name() const { return _name; }
+ Symbol* sig() const { return _sig; }
+};
+
+class JBoltMethodValue : public StackObj {
+private:
+ CompileTaskInfo* volatile _comp_info;
+
+public:
+ JBoltMethodValue(): _comp_info(nullptr) {}
+ ~JBoltMethodValue();
+
+ CompileTaskInfo* get_comp_info();
+ bool set_comp_info(CompileTaskInfo* info);
+ void clear_comp_info_but_not_release();
+};
+
+class CompileTaskInfo : public CHeapObj<mtCompiler> {
+ Method* const _method;
+ jobject _method_holder;
+ const int _osr_bci;
+ const int _comp_level;
+ const int _comp_reason;
+ Method* const _hot_method;
+ jobject _hot_method_holder;
+ const int _hot_count;
+
+public:
+ CompileTaskInfo(Method* method, int osr_bci, int comp_level, int comp_reason, Method* hot_method, int hot_cnt);
+ ~CompileTaskInfo();
+
+ bool try_select();
+
+ Method* method() const { return _method; }
+ int osr_bci() const { return _osr_bci; }
+ int comp_level() const { return _comp_level; }
+ int comp_reason() const { return _comp_reason; }
+ Method* hot_method() const { return _hot_method; }
+ int hot_count() const { return _hot_count; }
+};
+
+class JBoltStackFrameKey : public StackObj {
+ InstanceKlass* _klass;
+ traceid _methodid;
+
+public:
+ JBoltStackFrameKey(InstanceKlass* klass, traceid methodid): _klass(klass), _methodid(methodid) {}
+ JBoltStackFrameKey(const JBoltStackFrameKey& other): _klass(other._klass), _methodid(other._methodid) {}
+ JBoltStackFrameKey(): _klass(NULL), _methodid(0) {}
+ ~JBoltStackFrameKey() { /* nothing to do as _klass is a softcopy of JfrStackFrame::_klass */ }
+
+
+ JBoltStackFrameKey& operator = (const JBoltStackFrameKey& other) {
+ _klass = other._klass;
+ _methodid = other._methodid;
+ return *this;
+ }
+
+ unsigned hash() const {
+ int v = primitive_hash(_klass);
+ v = v * 31 + primitive_hash(_methodid);
+ return v;
+ }
+
+ bool equals(const JBoltStackFrameKey& other) const {
+ return _klass == other._klass && _methodid == other._methodid;
+ }
+
+ static unsigned calc_hash(const JBoltStackFrameKey& k) {
+ return k.hash();
+ }
+
+ static bool calc_equals(const JBoltStackFrameKey& k1, const JBoltStackFrameKey& k2) {
+ return k1.equals(k2);
+ }
+};
+
+class JBoltStackFrameValue : public StackObj {
+private:
+ jobject _method_holder;
+
+public:
+ JBoltStackFrameValue(jobject method_holder): _method_holder(method_holder) {}
+ ~JBoltStackFrameValue();
+
+ jobject get_method_holder();
+ void clear_method_holder_but_not_release();
+};
+
+class JBoltManager : public AllStatic {
+ friend class JBoltControlThread;
+
+ typedef ResourceHashtable<const JBoltMethodKey, JBoltMethodValue,
+ 15889, AnyObj::C_HEAP, mtCompiler,
+ JBoltMethodKey::calc_hash, JBoltMethodKey::calc_equals
+ > MethodKeyMap;
+
+ typedef ResourceHashtable<const JBoltStackFrameKey, JBoltStackFrameValue,
+ 15889, AnyObj::C_HEAP, mtTracing,
+ JBoltStackFrameKey::calc_hash, JBoltStackFrameKey::calc_equals
+ > StackFrameKeyMap;
+
+ static GrowableArray<JBoltMethodKey>* _hot_methods_sorted;
+ static MethodKeyMap* _hot_methods_vis;
+ static int _reorder_method_threshold_cnt;
+
+ static volatile int _reorder_phase;
+ static volatile int _reorderable_method_cnt;
+ static Method* volatile _cur_reordering_method;
+
+ // the CompilerThread to start the new JBoltControlThread
+ static Thread* _start_reordering_thread;
+
+ static StackFrameKeyMap* _sampled_methods_refs;
+
+ // when not set JBoltDumpMode or JBoltLoadMode, JBolt will be in one-step auto mode.
+ static bool _auto_mode;
+
+ // use MethodJBoltHot and MethodJBoltTmp as two semi hot space.
+ // each time restart a schedule, we exchange primary and secondary
+ static volatile int _primary_hot_seg;
+ static volatile int _secondary_hot_seg;
+
+ static volatile int _gc_should_sweep_code_heaps_now;
+
+private:
+ // Used in dump mode.
+ static methodHandle lookup_method(InstanceKlass* klass, traceid method_id);
+ static void construct_stacktrace(const JfrStackTrace &stacktrace);
+
+ // Used in init phase 1.
+ static void check_mode();
+ static void check_order_file();
+ static void check_dependency();
+ static size_t calc_nmethod_size_with_padding(size_t nmethod_size);
+ static size_t calc_segment_size_with_padding(size_t segment_size);
+ static void load_order_file_phase1(int* method_cnt , size_t* total_nmethod_size);
+ static void init_load_mode_phase1();
+
+ // Used in init phase 2.
+ static bool parse_method_line_phase2(char* const line, const int len);
+ static bool parse_connected_component_line_phase2(char* const line, const int len);
+ static void load_order_file_phase2(TRAPS);
+ static void init_load_mode_phase2(TRAPS);
+ static void init_dump_mode_phase2(TRAPS);
+
+ // Used in auto mode.
+ static CodeBlobType primary_hot_seg();
+ static CodeBlobType secondary_hot_seg();
+
+ // Used in auto mode prev_control_schedule
+ static int clear_last_sample_datas();
+ static void swap_semi_jbolt_segs();
+ static int clear_manager();
+
+ // Used in auto mode control_schedule
+ static void init_auto_transition(size_t* segment_size, TRAPS);
+
+ // Used in auto mode post_control_schedule
+ static void clear_secondary_hot_seg(TRAPS);
+
+ // JBolt phases
+
+ static int reorder_phase();
+
+ static bool reorder_phase_available_to_collecting();
+ static bool reorder_phase_collecting_to_reordering();
+
+ static bool reorder_phase_available_to_profiling();
+ static bool reorder_phase_profiling_to_reordering();
+ static bool reorder_phase_reordering_to_available();
+ static bool reorder_phase_profiling_to_available();
+ static bool reorder_phase_profiling_to_waiting();
+ static bool reorder_phase_waiting_to_reordering();
+ static bool reorder_phase_waiting_to_available();
+
+ static bool reorder_phase_reordering_to_end();
+
+ static Method* cur_reordering_method();
+ static void set_cur_reordering_method(Method* method);
+ static int inc_reorderable_method_cnt();
+
+ // Used in reordering phase.
+ static CompileTask* create_a_task_instance(CompileTaskInfo* cti, methodHandle& method, methodHandle& hot_method, TRAPS);
+ static void check_compiled_result(Method* method, CodeBlobType check_blob_type, TRAPS);
+ static bool enqueue_recompile_task(CompileTaskInfo* cti, methodHandle& method, methodHandle& hot_method, TRAPS);
+ static bool recompile_one(CompileTaskInfo* cti, methodHandle& method, methodHandle& hot_method, TRAPS);
+
+ static void print_code_heap(outputStream& ls, CodeHeap* heap, const char* name);
+public:
+ static void log_stacktrace(const JfrStackTrace &stacktrace);
+ static void construct_cg_once();
+ static void dump_order_in_manual();
+ static JBoltErrorCode dump_order_in_jcmd(const char* filename);
+
+ static void check_arguments_not_set();
+ static void init_phase1();
+ static void init_phase2(TRAPS);
+ static void init_code_heaps(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, size_t ps, size_t alignment);
+
+ static bool auto_mode() { return _auto_mode; }
+
+ static bool reorder_phase_is_waiting();
+ static bool reorder_phase_is_available();
+ static bool reorder_phase_is_collecting();
+ static bool reorder_phase_is_profiling();
+ static bool reorder_phase_is_reordering();
+ static bool reorder_phase_is_profiling_or_waiting();
+ static bool reorder_phase_is_collecting_or_reordering();
+
+ static bool can_reorder_now();
+ static bool should_reorder_now();
+
+ static bool gc_should_sweep_code_heaps_now();
+
+ static CodeBlobType calc_code_blob_type(Method* method, CompileTask* task, TRAPS);
+
+ static void check_start_reordering(TRAPS);
+ static void reorder_all_methods(TRAPS);
+ static void clear_structures();
+
+ static void print_code_heaps();
+};
+
+#endif // SHARE_JBOLT_JBOLTMANAGER_HPP
diff --git a/src/hotspot/share/jbolt/jBoltUtils.cpp b/src/hotspot/share/jbolt/jBoltUtils.cpp
new file mode 100644
index 000000000..e48d3b046
--- /dev/null
+++ b/src/hotspot/share/jbolt/jBoltUtils.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "jbolt/jBoltUtils.hpp"
+
+JBoltUtils::MetaDataKeepAliveMark::MetaDataKeepAliveMark(Thread* thread) : _thread(thread), _kept() {
+ assert(thread == Thread::current(), "Must be current thread");
+ assert(_thread->is_in_live_stack((address)this), "not on stack?");
+}
+
+JBoltUtils::MetaDataKeepAliveMark::~MetaDataKeepAliveMark() {
+ for (int i = _kept.length() - 1; i >= 0; --i) {
+ Metadata* md = _kept.at(i);
+ int idx = _thread->metadata_handles()->find_from_end(md);
+ assert(idx != -1, "not in metadata_handles list");
+ _thread->metadata_handles()->remove_at(idx);
+ }
+}
diff --git a/src/hotspot/share/jbolt/jBoltUtils.hpp b/src/hotspot/share/jbolt/jBoltUtils.hpp
new file mode 100644
index 000000000..c2da35519
--- /dev/null
+++ b/src/hotspot/share/jbolt/jBoltUtils.hpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_JBOLT_JBOLTUTILS_HPP
+#define SHARE_JBOLT_JBOLTUTILS_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/metadata.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/growableArray.hpp"
+
+class JBoltUtils : public AllStatic {
+public:
+ /**
+ * Keep the metadata alive.
+ *
+ * @see KeepAliveRegistrar
+ * @see methodHandle
+ */
+ class MetaDataKeepAliveMark : public StackObj {
+ private:
+ Thread* _thread;
+ GrowableArray<Metadata*> _kept;
+
+ public:
+ MetaDataKeepAliveMark(Thread* thread);
+ ~MetaDataKeepAliveMark();
+
+ void add(Metadata* md);
+
+ const GrowableArray<Metadata*>& kept() { return _kept; }
+ };
+};
+
+#endif // SHARE_JBOLT_JBOLTUTILS_HPP
diff --git a/src/hotspot/share/jbolt/jBoltUtils.inline.hpp b/src/hotspot/share/jbolt/jBoltUtils.inline.hpp
new file mode 100644
index 000000000..abd7501ca
--- /dev/null
+++ b/src/hotspot/share/jbolt/jBoltUtils.inline.hpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_JBOLT_JBOLTUTILS_INLINE_HPP
+#define SHARE_JBOLT_JBOLTUTILS_INLINE_HPP
+
+#include "jbolt/jBoltUtils.hpp"
+
+// Register a metadata as 'in-use' by the thread. It's fine to register a
+// metadata multiple times (though perhaps inefficient).
+inline void JBoltUtils::MetaDataKeepAliveMark::add(Metadata* md) {
+ assert(md->is_valid(), "obj is valid");
+ assert(_thread == Thread::current(), "thread must be current");
+ _kept.push(md);
+ _thread->metadata_handles()->push(md);
+}
+
+#endif // SHARE_JBOLT_JBOLTUTILS_INLINE_HPP
diff --git a/src/hotspot/share/jbolt/jbolt_globals.hpp b/src/hotspot/share/jbolt/jbolt_globals.hpp
new file mode 100644
index 000000000..355e79672
--- /dev/null
+++ b/src/hotspot/share/jbolt/jbolt_globals.hpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_JBOLT_JBOLT_GLOBALS_HPP
+#define SHARE_JBOLT_JBOLT_GLOBALS_HPP
+
+#include "runtime/globals_shared.hpp"
+
+#define JBOLT_FLAGS(develop, \
+ develop_pd, \
+ product, \
+ product_pd, \
+ notproduct, \
+ range, \
+ constraint) \
+ \
+ product(bool, UseJBolt, false, EXPERIMENTAL, \
+ "Enable JBolt feature.") \
+ \
+ product(bool, JBoltDumpMode, false, EXPERIMENTAL, \
+ "Trial run of JBolt. Collect profiling and dump it.") \
+ \
+ product(bool, JBoltLoadMode, false, EXPERIMENTAL, \
+ "Second run of JBolt. Load the profiling and reorder nmethods.") \
+ \
+ product(ccstr, JBoltOrderFile, NULL, EXPERIMENTAL, \
+ "The JBolt method order file to dump or load.") \
+ \
+ product(intx, JBoltSampleInterval, 600, EXPERIMENTAL, \
+ "Sample interval(second) of JBolt dump mode" \
+ "only useful in auto mode.") \
+ range(0, max_jint) \
+ \
+ product(uintx, JBoltCodeHeapSize, 8*M , EXPERIMENTAL, \
+ "Code heap size of MethodJBoltHot and MethodJBoltTmp heaps.") \
+ \
+
+// end of JBOLT_FLAGS
+
+DECLARE_FLAGS(JBOLT_FLAGS)
+
+#endif // SHARE_JBOLT_JBOLT_GLOBALS_HPP
diff --git a/src/hotspot/share/jfr/metadata/metadata.xml b/src/hotspot/share/jfr/metadata/metadata.xml
index 80ea101a7..3045a46e1 100644
--- a/src/hotspot/share/jfr/metadata/metadata.xml
+++ b/src/hotspot/share/jfr/metadata/metadata.xml
@@ -988,6 +988,8 @@
<Field type="ulong" contentType="bytes" name="nonNMethodSize" label="Non-nmethod Size" />
<Field type="ulong" contentType="bytes" name="profiledSize" label="Profiled Size" />
<Field type="ulong" contentType="bytes" name="nonProfiledSize" label="Non-profiled Size" />
+ <Field type="ulong" contentType="bytes" name="jboltHotSize" label="JBolt Hot Size" />
+ <Field type="ulong" contentType="bytes" name="jboltTmpSize" label="JBolt Tmp Size" />
<Field type="ulong" contentType="bytes" name="expansionSize" label="Expansion size" />
<Field type="ulong" contentType="bytes" name="minBlockLength" label="Minimum Block Length" />
<Field type="ulong" contentType="address" name="startAddress" label="Start Address" />
diff --git a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp
index ff52b4104..0582f91eb 100644
--- a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp
+++ b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp
@@ -75,6 +75,9 @@
#if INCLUDE_SHENANDOAHGC
#include "gc/shenandoah/shenandoahJfrSupport.hpp"
#endif
+#if INCLUDE_JBOLT
+#include "jbolt/jbolt_globals.hpp"
+#endif // INCLUDE_JBOLT
/**
* JfrPeriodic class
@@ -692,6 +695,10 @@ TRACE_REQUEST_FUNC(CodeCacheConfiguration) {
event.set_nonNMethodSize(NonNMethodCodeHeapSize);
event.set_profiledSize(ProfiledCodeHeapSize);
event.set_nonProfiledSize(NonProfiledCodeHeapSize);
+#if INCLUDE_JBOLT
+ event.set_jboltHotSize(JBoltCodeHeapSize);
+ event.set_jboltTmpSize(JBoltCodeHeapSize);
+#endif
event.set_expansionSize(CodeCacheExpansionSize);
event.set_minBlockLength(CodeCacheMinBlockLength);
event.set_startAddress((u8)CodeCache::low_bound());
diff --git a/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp b/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp
index bda6acbc8..1270a5c63 100644
--- a/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp
+++ b/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp
@@ -46,6 +46,9 @@
#include "runtime/threadCrashProtection.hpp"
#include "runtime/threadSMR.hpp"
#include "utilities/systemMemoryBarrier.hpp"
+#if INCLUDE_JBOLT
+#include "jbolt/jBoltManager.hpp"
+#endif
enum JfrSampleType {
NO_SAMPLE = 0,
@@ -273,7 +276,15 @@ bool JfrThreadSampleClosure::sample_thread_in_java(JavaThread* thread, JfrStackF
return false;
}
EventExecutionSample *event = &_events[_added_java - 1];
- traceid id = JfrStackTraceRepository::add(sampler.stacktrace());
+ traceid id = 0;
+#if INCLUDE_JBOLT
+ if (UseJBolt && JBoltManager::reorder_phase_is_profiling()) {
+ id = JfrStackTraceRepository::add_jbolt(sampler.stacktrace());
+ } else
+#endif
+ {
+ id = JfrStackTraceRepository::add(sampler.stacktrace());
+ }
assert(id != 0, "Stacktrace id should not be 0");
event->set_stackTrace(id);
return true;
diff --git a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp
index 48db2fd87..a68d6ea22 100644
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp
@@ -62,7 +62,11 @@ JfrStackTrace::JfrStackTrace(JfrStackFrame* frames, u4 max_frames) :
_frames_ownership(false),
_reached_root(false),
_lineno(false),
- _written(false) {}
+ _written(false)
+#if INCLUDE_JBOLT
+ , _hotcount(1)
+#endif
+ {}
JfrStackTrace::JfrStackTrace(traceid id, const JfrStackTrace& trace, const JfrStackTrace* next) :
_next(next),
@@ -74,7 +78,11 @@ JfrStackTrace::JfrStackTrace(traceid id, const JfrStackTrace& trace, const JfrSt
_frames_ownership(true),
_reached_root(trace._reached_root),
_lineno(trace._lineno),
- _written(false) {
+ _written(false)
+#if INCLUDE_JBOLT
+ , _hotcount(trace._hotcount)
+#endif
+{
copy_frames(&_frames, trace._nr_of_frames, trace._frames);
}
diff --git a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp
index 49e7d7a19..a89b09359 100644
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp
@@ -52,6 +52,17 @@ class JfrStackFrame {
void write(JfrCheckpointWriter& cpw) const;
void resolve_lineno() const;
+#if INCLUDE_JBOLT
+ const InstanceKlass* get_klass() const { return _klass; }
+ traceid get_methodId() const { return _methodid; }
+ int get_byteCodeIndex() const { return _bci; }
+ u1 get_type() const { return _type; }
+
+ static ByteSize klass_offset() { return byte_offset_of(JfrStackFrame, _klass ); }
+ static ByteSize methodid_offset() { return byte_offset_of(JfrStackFrame, _methodid ); }
+ static ByteSize bci_offset() { return byte_offset_of(JfrStackFrame, _bci ); }
+ static ByteSize type_offset() { return byte_offset_of(JfrStackFrame, _type ); }
+#endif
enum {
FRAME_INTERPRETER = 0,
FRAME_JIT,
@@ -68,6 +79,10 @@ class JfrStackTrace : public JfrCHeapObj {
friend class ObjectSampler;
friend class OSThreadSampler;
friend class StackTraceResolver;
+#if INCLUDE_JBOLT
+ friend class JBoltManager;
+#endif
+
private:
const JfrStackTrace* _next;
JfrStackFrame* _frames;
@@ -79,6 +94,9 @@ class JfrStackTrace : public JfrCHeapObj {
bool _reached_root;
mutable bool _lineno;
mutable bool _written;
+#if INCLUDE_JBOLT
+ u4 _hotcount;
+#endif
const JfrStackTrace* next() const { return _next; }
@@ -107,6 +125,17 @@ class JfrStackTrace : public JfrCHeapObj {
public:
unsigned int hash() const { return _hash; }
traceid id() const { return _id; }
+#if INCLUDE_JBOLT
+ u4 hotcount() const { return _hotcount; }
+ const JfrStackFrame* get_frames() const { return _frames; }
+ u4 get_framesCount() const { return _nr_of_frames; }
+
+ static ByteSize hash_offset() { return byte_offset_of(JfrStackTrace, _hash ); }
+ static ByteSize id_offset() { return byte_offset_of(JfrStackTrace, _id ); }
+ static ByteSize hotcount_offset() { return byte_offset_of(JfrStackTrace, _hotcount ); }
+ static ByteSize frames_offset() { return byte_offset_of(JfrStackTrace, _frames ); }
+ static ByteSize frames_count_offset() { return byte_offset_of(JfrStackTrace, _nr_of_frames ); }
+#endif
};
#endif // SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACE_HPP
diff --git a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp
index 7caada5ab..250535803 100644
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp
@@ -29,6 +29,9 @@
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "runtime/mutexLocker.hpp"
+#if INCLUDE_JBOLT
+#include "jbolt/jBoltManager.hpp"
+#endif
/*
* There are two separate repository instances.
@@ -51,9 +54,16 @@ static JfrStackTraceRepository& leak_profiler_instance() {
return *_leak_profiler_instance;
}
+#if INCLUDE_JBOLT
+JfrStackTraceRepository::JfrStackTraceRepository() : _last_entries(0), _entries(0), _last_entries_jbolt(0), _entries_jbolt(0) {
+ memset(_table, 0, sizeof(_table));
+ memset(_table_jbolt, 0, sizeof(_table_jbolt));
+}
+#else
JfrStackTraceRepository::JfrStackTraceRepository() : _last_entries(0), _entries(0) {
memset(_table, 0, sizeof(_table));
}
+#endif
JfrStackTraceRepository* JfrStackTraceRepository::create() {
assert(_instance == nullptr, "invariant");
@@ -98,6 +108,11 @@ bool JfrStackTraceRepository::is_modified() const {
}
size_t JfrStackTraceRepository::write(JfrChunkWriter& sw, bool clear) {
+#if INCLUDE_JBOLT
+ if (clear && (UseJBolt && JBoltManager::reorder_phase_is_profiling_or_waiting())) {
+ JBoltManager::construct_cg_once();
+ }
+#endif
MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
if (_entries == 0) {
return 0;
@@ -120,6 +135,21 @@ size_t JfrStackTraceRepository::write(JfrChunkWriter& sw, bool clear) {
if (clear) {
memset(_table, 0, sizeof(_table));
_entries = 0;
+#if INCLUDE_JBOLT
+ for (u4 i = 0; i < TABLE_SIZE; ++i) {
+ JfrStackTrace* stacktrace = _table_jbolt[i];
+ while (stacktrace != NULL) {
+ JfrStackTrace* next = const_cast<JfrStackTrace*>(stacktrace->next());
+ delete stacktrace;
+ stacktrace = next;
+ }
+ }
+ memset(_table_jbolt, 0, sizeof(_table_jbolt));
+ _entries_jbolt = 0;
+ }
+ _last_entries_jbolt = _entries_jbolt;
+ {
+#endif
}
_last_entries = _entries;
return count;
@@ -142,6 +172,21 @@ size_t JfrStackTraceRepository::clear(JfrStackTraceRepository& repo) {
const size_t processed = repo._entries;
repo._entries = 0;
repo._last_entries = 0;
+#if INCLUDE_JBOLT
+ if (repo._entries_jbolt != 0) {
+ for (u4 i = 0; i < TABLE_SIZE; ++i) {
+ JfrStackTrace* stacktrace = repo._table_jbolt[i];
+ while (stacktrace != NULL) {
+ JfrStackTrace* next = const_cast<JfrStackTrace*>(stacktrace->next());
+ delete stacktrace;
+ stacktrace = next;
+ }
+ }
+ memset(repo._table_jbolt, 0, sizeof(repo._table_jbolt));
+ repo._entries_jbolt = 0;
+ repo._last_entries_jbolt = 0;
+ }
+#endif
return processed;
}
@@ -233,6 +278,75 @@ const JfrStackTrace* JfrStackTraceRepository::lookup_for_leak_profiler(unsigned
return trace;
}
+#if INCLUDE_JBOLT
+size_t JfrStackTraceRepository::clear_jbolt(JfrStackTraceRepository& repo) {
+ MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
+ if (repo._entries_jbolt == 0) {
+ return 0;
+ }
+
+ for (u4 i = 0; i < TABLE_SIZE; ++i) {
+ JfrStackTrace* stacktrace = repo._table_jbolt[i];
+ while (stacktrace != NULL) {
+ JfrStackTrace* next = const_cast<JfrStackTrace*>(stacktrace->next());
+ delete stacktrace;
+ stacktrace = next;
+ }
+ }
+ memset(repo._table_jbolt, 0, sizeof(repo._table_jbolt));
+ const size_t processed = repo._entries;
+ repo._entries_jbolt = 0;
+ repo._last_entries_jbolt = 0;
+
+ return processed;
+}
+
+size_t JfrStackTraceRepository::clear_jbolt() {
+ clear_jbolt(leak_profiler_instance());
+ return clear_jbolt(instance());
+}
+
+traceid JfrStackTraceRepository::add_jbolt(JfrStackTraceRepository& repo, const JfrStackTrace& stacktrace) {
+ traceid tid = repo.add_trace_jbolt(stacktrace);
+ if (tid == 0) {
+ stacktrace.resolve_linenos();
+ tid = repo.add_trace_jbolt(stacktrace);
+ }
+ assert(tid != 0, "invariant");
+ return tid;
+}
+
+traceid JfrStackTraceRepository::add_jbolt(const JfrStackTrace& stacktrace) {
+ JBoltManager::log_stacktrace(stacktrace);
+ return add_jbolt(instance(), stacktrace);
+}
+
+traceid JfrStackTraceRepository::add_trace_jbolt(const JfrStackTrace& stacktrace) {
+ traceid id = add_trace(stacktrace);
+ MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
+ const size_t index = stacktrace._hash % TABLE_SIZE;
+
+ if (UseJBolt && JBoltManager::reorder_phase_is_profiling()) {
+ const JfrStackTrace* table_jbolt_entry = _table_jbolt[index];
+ while (table_jbolt_entry != NULL) {
+ if (table_jbolt_entry->equals(stacktrace)) {
+ // [jbolt]: each time add an old trace, inc its hotcount
+ const_cast<JfrStackTrace*>(table_jbolt_entry)->_hotcount++;
+ return table_jbolt_entry->id();
+ }
+ table_jbolt_entry = table_jbolt_entry->next();
+ }
+ }
+
+ if (id != 0 && UseJBolt && JBoltManager::reorder_phase_is_profiling()) {
+ _table_jbolt[index] = new JfrStackTrace(id, stacktrace, _table_jbolt[index]);
+ ++_entries_jbolt;
+ }
+
+ return id;
+}
+#endif
+
void JfrStackTraceRepository::clear_leak_profiler() {
clear(leak_profiler_instance());
}
diff --git a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp
index ba0f966ed..dbedb947f 100644
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp
@@ -42,6 +42,9 @@ class JfrStackTraceRepository : public JfrCHeapObj {
friend class RecordStackTrace;
friend class StackTraceBlobInstaller;
friend class StackTraceRepository;
+#if INCLUDE_JBOLT
+ friend class JBoltManager;
+#endif
private:
static const u4 TABLE_SIZE = 2053;
@@ -49,6 +52,19 @@ class JfrStackTraceRepository : public JfrCHeapObj {
u4 _last_entries;
u4 _entries;
+#if INCLUDE_JBOLT
+ // [jbolt]: an exclusive table for jbolt. It should be a subset of _table
+ JfrStackTrace* _table_jbolt[TABLE_SIZE];
+ u4 _last_entries_jbolt;
+ u4 _entries_jbolt;
+
+ static size_t clear_jbolt();
+ static size_t clear_jbolt(JfrStackTraceRepository& repo);
+ traceid add_trace_jbolt(const JfrStackTrace& stacktrace);
+ static traceid add_jbolt(JfrStackTraceRepository& repo, const JfrStackTrace& stacktrace);
+ static traceid add_jbolt(const JfrStackTrace& stacktrace);
+#endif
+
JfrStackTraceRepository();
static JfrStackTraceRepository& instance();
static JfrStackTraceRepository* create();
@@ -71,6 +87,13 @@ class JfrStackTraceRepository : public JfrCHeapObj {
public:
static traceid record(Thread* current_thread, int skip = 0);
+#if INCLUDE_JBOLT
+ const JfrStackTrace* const * get_stacktrace_table() const { return _table; }
+ u4 get_entries_count() const { return _entries; }
+
+ const JfrStackTrace* const * get_stacktrace_table_jbolt() const { return _table_jbolt; }
+ u4 get_entries_count_jbolt() const { return _entries_jbolt; }
+#endif
};
#endif // SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACEREPOSITORY_HPP
diff --git a/src/hotspot/share/logging/logTag.hpp b/src/hotspot/share/logging/logTag.hpp
index 99adff484..4ea67e3ff 100644
--- a/src/hotspot/share/logging/logTag.hpp
+++ b/src/hotspot/share/logging/logTag.hpp
@@ -98,6 +98,7 @@ class outputStream;
LOG_TAG(install) \
LOG_TAG(interpreter) \
LOG_TAG(itables) \
+ JBOLT_ONLY(LOG_TAG(jbolt)) \
LOG_TAG(jfr) \
LOG_TAG(jit) \
LOG_TAG(jni) \
diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp
index c852316f8..9a0d64a33 100644
--- a/src/hotspot/share/opto/doCall.cpp
+++ b/src/hotspot/share/opto/doCall.cpp
@@ -1053,8 +1053,8 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
#ifndef PRODUCT
void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
- if( CountCompiledCalls ) {
- if( at_method_entry ) {
+ if(CountCompiledCalls) {
+ if(at_method_entry) {
// bump invocation counter if top method (for statistics)
if (CountCompiledCalls && depth() == 1) {
const TypePtr* addr_type = TypeMetadataPtr::make(method());
diff --git a/src/hotspot/share/opto/parse1.cpp b/src/hotspot/share/opto/parse1.cpp
index 4d71e552e..6e8fb2e67 100644
--- a/src/hotspot/share/opto/parse1.cpp
+++ b/src/hotspot/share/opto/parse1.cpp
@@ -1216,7 +1216,7 @@ void Parse::do_method_entry() {
set_parse_bci(InvocationEntryBci); // Pseudo-BCP
set_sp(0); // Java Stack Pointer
- NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
+ NOT_PRODUCT(count_compiled_calls(true/* at_method_entry */, false/* is_inline */);)
if (C->env()->dtrace_method_probes()) {
make_dtrace_method_entry(method());
diff --git a/src/hotspot/share/runtime/flags/allFlags.hpp b/src/hotspot/share/runtime/flags/allFlags.hpp
index 03a51891e..19292dcb2 100644
--- a/src/hotspot/share/runtime/flags/allFlags.hpp
+++ b/src/hotspot/share/runtime/flags/allFlags.hpp
@@ -31,6 +31,9 @@
#include "gc/shared/tlab_globals.hpp"
#include "runtime/flags/debug_globals.hpp"
#include "runtime/globals.hpp"
+#if INCLUDE_JBOLT
+#include "jbolt/jbolt_globals.hpp"
+#endif // INCLUDE_JBOLT
// Put LP64/ARCH/JVMCI/COMPILER1/COMPILER2 at the top,
// as they are processed by jvmFlag.cpp in that order.
@@ -148,7 +151,17 @@
product_pd, \
notproduct, \
range, \
- constraint)
+ constraint) \
+ \
+ JBOLT_ONLY( \
+ JBOLT_FLAGS( \
+ develop, \
+ develop_pd, \
+ product, \
+ product_pd, \
+ notproduct, \
+ range, \
+ constraint))
#define ALL_CONSTRAINTS(f) \
COMPILER_CONSTRAINTS(f) \
diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp
index a92a1c5d2..52551446c 100644
--- a/src/hotspot/share/runtime/java.cpp
+++ b/src/hotspot/share/runtime/java.cpp
@@ -97,6 +97,9 @@
#if INCLUDE_JVMCI
#include "jvmci/jvmci.hpp"
#endif
+#if INCLUDE_JBOLT
+#include "jbolt/jBoltManager.hpp"
+#endif
GrowableArray<Method*>* collected_profiled_methods;
@@ -522,6 +525,12 @@ void before_exit(JavaThread* thread, bool halt) {
// Note: we don't wait until it actually dies.
os::terminate_signal_thread();
+#if INCLUDE_JBOLT
+ if (UseJBolt && JBoltDumpMode) {
+ JBoltManager::dump_order_in_manual();
+ }
+#endif
+
print_statistics();
Universe::heap()->print_tracing_info();
diff --git a/src/hotspot/share/runtime/threads.cpp b/src/hotspot/share/runtime/threads.cpp
index ac435317d..8b7058405 100644
--- a/src/hotspot/share/runtime/threads.cpp
+++ b/src/hotspot/share/runtime/threads.cpp
@@ -113,6 +113,10 @@
#if INCLUDE_JFR
#include "jfr/jfr.hpp"
#endif
+#if INCLUDE_JBOLT
+#include "jbolt/jBoltDcmds.hpp"
+#include "jbolt/jBoltManager.hpp"
+#endif // INCLUDE_JBOLT
// Initialization after module runtime initialization
void universe_post_module_init(); // must happen after call_initPhase2
@@ -547,6 +551,14 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
ObjectMonitor::Initialize();
ObjectSynchronizer::initialize();
+#if INCLUDE_JBOLT
+ if (UseJBolt) {
+ JBoltManager::init_phase1();
+ } else {
+ JBoltManager::check_arguments_not_set();
+ }
+#endif // INCLUDE_JBOLT
+
// Initialize global modules
jint status = init_globals();
if (status != JNI_OK) {
@@ -818,6 +830,13 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
ShouldNotReachHere();
}
+#if INCLUDE_JBOLT
+ register_jbolt_dcmds();
+ if (UseJBolt) {
+ JBoltManager::init_phase2(CATCH);
+ }
+#endif // INCLUDE_JBOLT
+
return JNI_OK;
}
diff --git a/src/hotspot/share/utilities/growableArray.hpp b/src/hotspot/share/utilities/growableArray.hpp
index 63dbf668e..4731624cf 100644
--- a/src/hotspot/share/utilities/growableArray.hpp
+++ b/src/hotspot/share/utilities/growableArray.hpp
@@ -31,6 +31,9 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
#include "utilities/powerOfTwo.hpp"
+#if INCLUDE_JBOLT
+#include "utilities/sizes.hpp"
+#endif // INCLUDE_JBOLT
// A growable array.
@@ -126,6 +129,10 @@ protected:
public:
const static GrowableArrayView EMPTY;
+#if INCLUDE_JBOLT
+ static ByteSize data_offset() { return byte_offset_of(GrowableArrayView, _data); }
+#endif // INCLUDE_JBOLT
+
bool operator==(const GrowableArrayView& rhs) const {
if (_len != rhs._len)
return false;
diff --git a/src/hotspot/share/utilities/macros.hpp b/src/hotspot/share/utilities/macros.hpp
index 244b18ecd..8526d95fb 100644
--- a/src/hotspot/share/utilities/macros.hpp
+++ b/src/hotspot/share/utilities/macros.hpp
@@ -129,6 +129,18 @@
#define NOT_CDS_RETURN_(code) { return code; }
#endif // INCLUDE_CDS
+#ifndef INCLUDE_JBOLT
+#define INCLUDE_JBOLT 1
+#endif
+
+#if INCLUDE_JBOLT
+#define JBOLT_ONLY(x) x
+#define NOT_JBOLT(x)
+#else
+#define JBOLT_ONLY(x)
+#define NOT_JBOLT(x) x
+#endif // INCLUDE_JBOLT
+
#ifndef INCLUDE_MANAGEMENT
#define INCLUDE_MANAGEMENT 1
#endif // INCLUDE_MANAGEMENT
@@ -263,6 +275,9 @@
#define JFR_ONLY(code)
#define NOT_JFR_RETURN() {}
#define NOT_JFR_RETURN_(code) { return code; }
+#if INCLUDE_JBOLT
+#define INCLUDE_JBOLT 0 // INCLUDE_JBOLT depends on INCLUDE_JFR
+#endif
#endif
#ifndef INCLUDE_JVMCI
diff --git a/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheCLITestCase.java b/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheCLITestCase.java
index eca5c70e0..39f633361 100644
--- a/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheCLITestCase.java
+++ b/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheCLITestCase.java
@@ -68,12 +68,12 @@ public class CodeCacheCLITestCase {
* Verifies that in interpreted mode PrintCodeCache output contains
* the whole code cache. Int mode disables SegmentedCodeCache with a warning.
*/
- INT_MODE(ONLY_SEGMENTED, EnumSet.of(BlobType.All), USE_INT_MODE),
+ INT_MODE(ONLY_SEGMENTED, EnumSet.copyOf(CodeCacheOptions.NON_SEGMENTED_HEAPS), USE_INT_MODE),
/**
* Verifies that with disabled SegmentedCodeCache PrintCodeCache output
* contains only CodeCache's entry.
*/
- NON_SEGMENTED(options -> !options.segmented, EnumSet.of(BlobType.All),
+ NON_SEGMENTED(options -> !options.segmented, EnumSet.copyOf(CodeCacheOptions.NON_SEGMENTED_HEAPS),
CommandLineOptionTest.prepareBooleanFlag(SEGMENTED_CODE_CACHE,
false)),
/**
@@ -82,7 +82,7 @@ public class CodeCacheCLITestCase {
* profiled-nmethods heap and non-segmented CodeCache.
*/
NON_TIERED(ONLY_SEGMENTED,
- EnumSet.of(BlobType.NonNMethod, BlobType.MethodNonProfiled),
+ EnumSet.copyOf(CodeCacheOptions.SEGMENTED_HEAPS_WO_PROFILED),
CommandLineOptionTest.prepareBooleanFlag(TIERED_COMPILATION,
false)),
/**
@@ -91,7 +91,7 @@ public class CodeCacheCLITestCase {
* heaps only.
*/
TIERED_LEVEL_0(SEGMENTED_SERVER,
- EnumSet.of(BlobType.All),
+ EnumSet.copyOf(CodeCacheOptions.NON_SEGMENTED_HEAPS),
CommandLineOptionTest.prepareBooleanFlag(TIERED_COMPILATION,
true),
CommandLineOptionTest.prepareNumericFlag(TIERED_STOP_AT, 0)),
@@ -101,7 +101,7 @@ public class CodeCacheCLITestCase {
* heaps only.
*/
TIERED_LEVEL_1(SEGMENTED_SERVER,
- EnumSet.of(BlobType.NonNMethod, BlobType.MethodNonProfiled),
+ EnumSet.copyOf(CodeCacheOptions.SEGMENTED_HEAPS_WO_PROFILED),
CommandLineOptionTest.prepareBooleanFlag(TIERED_COMPILATION,
true),
CommandLineOptionTest.prepareNumericFlag(TIERED_STOP_AT, 1)),
@@ -110,7 +110,7 @@ public class CodeCacheCLITestCase {
* contain information about all three code heaps.
*/
TIERED_LEVEL_4(SEGMENTED_SERVER,
- EnumSet.complementOf(EnumSet.of(BlobType.All)),
+ EnumSet.copyOf(CodeCacheOptions.ALL_SEGMENTED_HEAPS),
CommandLineOptionTest.prepareBooleanFlag(TIERED_COMPILATION,
true),
CommandLineOptionTest.prepareNumericFlag(TIERED_STOP_AT, 4));
diff --git a/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheOptions.java b/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheOptions.java
index f5243aaa4..1830911a9 100644
--- a/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheOptions.java
+++ b/test/hotspot/jtreg/compiler/codecache/cli/common/CodeCacheOptions.java
@@ -33,20 +33,27 @@ import java.util.List;
public class CodeCacheOptions {
public static final String SEGMENTED_CODE_CACHE = "SegmentedCodeCache";
- private static final EnumSet<BlobType> NON_SEGMENTED_HEAPS
+ public static final EnumSet<BlobType> NON_SEGMENTED_HEAPS
= EnumSet.of(BlobType.All);
- private static final EnumSet<BlobType> ALL_SEGMENTED_HEAPS
- = EnumSet.complementOf(NON_SEGMENTED_HEAPS);
- private static final EnumSet<BlobType> SEGMENTED_HEAPS_WO_PROFILED
+ public static final EnumSet<BlobType> JBOLT_HEAPS
+ = EnumSet.of(BlobType.MethodJBoltHot, BlobType.MethodJBoltTmp);
+ public static final EnumSet<BlobType> ALL_SEGMENTED_HEAPS
+ = EnumSet.complementOf(union(NON_SEGMENTED_HEAPS, JBOLT_HEAPS));
+ public static final EnumSet<BlobType> ALL_SEGMENTED_HEAPS_WITH_JBOLT
+ = union(ALL_SEGMENTED_HEAPS, JBOLT_HEAPS);
+ public static final EnumSet<BlobType> SEGMENTED_HEAPS_WO_PROFILED
= EnumSet.of(BlobType.NonNMethod, BlobType.MethodNonProfiled);
- private static final EnumSet<BlobType> ONLY_NON_METHODS_HEAP
+ public static final EnumSet<BlobType> ONLY_NON_METHODS_HEAP
= EnumSet.of(BlobType.NonNMethod);
public final long reserved;
public final long nonNmethods;
public final long nonProfiled;
public final long profiled;
+ public final long jboltHot;
+ public final long jboltTmp;
public final boolean segmented;
+ public final boolean useJBolt;
public static long mB(long val) {
return CodeCacheOptions.kB(val) * 1024L;
@@ -56,12 +63,21 @@ public class CodeCacheOptions {
return val * 1024L;
}
+ public static <E extends Enum<E>> EnumSet<E> union(EnumSet<E> e1, EnumSet<E> e2) {
+ EnumSet<E> res = EnumSet.copyOf(e1);
+ res.addAll(e2);
+ return res;
+ }
+
public CodeCacheOptions(long reserved) {
this.reserved = reserved;
this.nonNmethods = 0;
this.nonProfiled = 0;
this.profiled = 0;
+ this.jboltHot = 0;
+ this.jboltTmp = 0;
this.segmented = false;
+ this.useJBolt = false;
}
public CodeCacheOptions(long reserved, long nonNmethods, long nonProfiled,
@@ -70,7 +86,25 @@ public class CodeCacheOptions {
this.nonNmethods = nonNmethods;
this.nonProfiled = nonProfiled;
this.profiled = profiled;
+ this.jboltHot = 0;
+ this.jboltTmp = 0;
this.segmented = true;
+ this.useJBolt = false;
+ }
+
+ /**
+ * No tests for JBolt yet as the related VM options are experimental now.
+ */
+ public CodeCacheOptions(long reserved, long nonNmethods, long nonProfiled,
+ long profiled, long jboltHot, long jboltTmp) {
+ this.reserved = reserved;
+ this.nonNmethods = nonNmethods;
+ this.nonProfiled = nonProfiled;
+ this.profiled = profiled;
+ this.jboltHot = jboltHot;
+ this.jboltTmp = jboltTmp;
+ this.segmented = true;
+ this.useJBolt = true;
}
public long sizeForHeap(BlobType heap) {
@@ -83,6 +117,10 @@ public class CodeCacheOptions {
return this.nonProfiled;
case MethodProfiled:
return this.profiled;
+ case MethodJBoltHot:
+ return this.jboltHot;
+ case MethodJBoltTmp:
+ return this.jboltTmp;
default:
throw new Error("Unknown heap: " + heap.name());
}
@@ -107,14 +145,26 @@ public class CodeCacheOptions {
CommandLineOptionTest.prepareNumericFlag(
BlobType.MethodProfiled.sizeOptionName, profiled));
}
+
+ if (useJBolt) {
+ Collections.addAll(options,
+ CommandLineOptionTest.prepareNumericFlag(
+ BlobType.MethodJBoltHot.sizeOptionName, jboltHot),
+ CommandLineOptionTest.prepareNumericFlag(
+ BlobType.MethodJBoltTmp.sizeOptionName, jboltTmp));
+ }
+
return options.toArray(new String[options.size()]);
}
public CodeCacheOptions mapOptions(EnumSet<BlobType> involvedCodeHeaps) {
if (involvedCodeHeaps.isEmpty()
|| involvedCodeHeaps.equals(NON_SEGMENTED_HEAPS)
- || involvedCodeHeaps.equals(ALL_SEGMENTED_HEAPS)) {
+ || involvedCodeHeaps.equals(ALL_SEGMENTED_HEAPS_WITH_JBOLT)) {
return this;
+ } else if (involvedCodeHeaps.equals(ALL_SEGMENTED_HEAPS)) {
+ return new CodeCacheOptions(reserved, nonNmethods,
+ nonProfiled + jboltHot + jboltTmp, profiled);
} else if (involvedCodeHeaps.equals(SEGMENTED_HEAPS_WO_PROFILED)) {
return new CodeCacheOptions(reserved, nonNmethods,
profiled + nonProfiled, 0L);
diff --git a/test/hotspot/jtreg/compiler/codecache/jbolt/JBoltDumpModeTest.java b/test/hotspot/jtreg/compiler/codecache/jbolt/JBoltDumpModeTest.java
new file mode 100644
index 000000000..b5bdb19bc
--- /dev/null
+++ b/test/hotspot/jtreg/compiler/codecache/jbolt/JBoltDumpModeTest.java
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Test JBolt dump mode functions.
+ * @library /test/lib
+ * @requires vm.flagless
+ *
+ * @run driver compiler.codecache.jbolt.JBoltDumpModeTest
+ */
+
+package compiler.codecache.jbolt;
+
+import java.io.File;
+import java.io.IOException;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.Utils;
+
+public class JBoltDumpModeTest {
+ public static final String SRC_DIR = Utils.TEST_SRC;
+ public static final String ORDER_FILE = SRC_DIR + "/order.log";
+
+ private static void createOrderFile() {
+ try {
+ File order = new File(ORDER_FILE);
+ if (!order.exists()) {
+ order.createNewFile();
+ }
+ }
+ catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private static void clearOrderFile() {
+ File order = new File(ORDER_FILE);
+ if (order.exists()) {
+ order.delete();
+ }
+ }
+
+ private static void OrderFileShouldExist() throws Exception {
+ File order = new File(ORDER_FILE);
+ if (order.exists()) {
+ order.delete();
+ }
+ else {
+ throw new RuntimeException(ORDER_FILE + " doesn't exist as expect.");
+ }
+ }
+
+ private static void OrderFileShouldNotExist() throws Exception {
+ File order = new File(ORDER_FILE);
+ if (order.exists()) {
+ throw new RuntimeException(ORDER_FILE + " exists while expect not.");
+ }
+ }
+
+ private static void testNormalUse() throws Exception {
+ ProcessBuilder pb1 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:JBoltOrderFile=" + ORDER_FILE,
+ "-XX:+JBoltDumpMode",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+
+ ProcessBuilder pb2 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:JBoltOrderFile=" + ORDER_FILE,
+ "-XX:+JBoltDumpMode",
+ "-XX:StartFlightRecording",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+
+ ProcessBuilder pb3 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:JBoltOrderFile=" + ORDER_FILE,
+ "-XX:+JBoltDumpMode",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+
+ clearOrderFile();
+
+ String stdout;
+
+ OutputAnalyzer out1 = new OutputAnalyzer(pb1.start());
+ stdout = out1.getStdout();
+ if (!stdout.contains("JBolt in dump mode now, start a JFR recording named \"jbolt-jfr\".")) {
+ throw new RuntimeException(stdout);
+ }
+ out1.shouldHaveExitValue(0);
+ OrderFileShouldExist();
+
+ OutputAnalyzer out2 = new OutputAnalyzer(pb2.start());
+ stdout = out2.getStdout();
+ if (!stdout.contains("JBolt in dump mode now, start a JFR recording named \"jbolt-jfr\".")) {
+ throw new RuntimeException(stdout);
+ }
+ out2.shouldHaveExitValue(0);
+ OrderFileShouldExist();
+
+ createOrderFile();
+ OutputAnalyzer out3 = new OutputAnalyzer(pb3.start());
+ stdout = out3.getStdout();
+ if (!stdout.contains("JBoltOrderFile to dump already exists and will be overwritten:")) {
+ throw new RuntimeException(stdout);
+ }
+ out3.shouldHaveExitValue(0);
+ OrderFileShouldExist();
+ }
+
+ private static void testUnabletoRun() throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:JBoltOrderFile=" + ORDER_FILE,
+ "-XX:+JBoltDumpMode",
+ "-XX:-FlightRecorder",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+
+ clearOrderFile();
+
+ String stdout;
+ OutputAnalyzer out = new OutputAnalyzer(pb.start());
+
+ stdout = out.getStdout();
+ if (!stdout.contains("JBolt depends on JFR!")) {
+ throw new RuntimeException(stdout);
+ }
+ OrderFileShouldNotExist();
+ }
+
+ private static void testFatalError() throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:JBoltOrderFile=" + ORDER_FILE,
+ "-XX:+JBoltDumpMode",
+ "-XX:foo",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+
+ clearOrderFile();
+
+ OutputAnalyzer out = new OutputAnalyzer(pb.start());
+
+ out.stderrShouldContain("Could not create the Java Virtual Machine");
+ OrderFileShouldNotExist();
+ }
+
+ public static void main(String[] args) throws Exception {
+ testNormalUse();
+ testUnabletoRun();
+ testFatalError();
+ }
+}
\ No newline at end of file
diff --git a/test/hotspot/jtreg/compiler/codecache/jbolt/JBoltVMOptionsTest.java b/test/hotspot/jtreg/compiler/codecache/jbolt/JBoltVMOptionsTest.java
new file mode 100644
index 000000000..4b45a585b
--- /dev/null
+++ b/test/hotspot/jtreg/compiler/codecache/jbolt/JBoltVMOptionsTest.java
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Test JBolt VM options.
+ * @library /test/lib
+ * @requires vm.flagless
+ *
+ * @run driver compiler.codecache.jbolt.JBoltVMOptionsTest
+ */
+
+package compiler.codecache.jbolt;
+
+import java.io.File;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.Utils;
+
+public class JBoltVMOptionsTest {
+ public static final String SRC_DIR = Utils.TEST_SRC;
+ public static final String TEMP_FILE = SRC_DIR + "/tmp.log";
+
+ public static void main(String[] args) throws Exception {
+ test1();
+ test2();
+ test3();
+ test4();
+ }
+
+ private static void clearTmpFile() {
+ File tmp = new File(TEMP_FILE);
+ if (tmp.exists()) {
+ tmp.delete();
+ }
+ }
+
+ private static void test1() throws Exception {
+ ProcessBuilder pb1 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:+JBoltDumpMode",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+ ProcessBuilder pb2 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:+JBoltLoadMode",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+ ProcessBuilder pb3 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:+JBoltLoadMode",
+ "-XX:+JBoltDumpMode",
+ "-XX:JBoltOrderFile=" + SRC_DIR + "/o1.log",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+ ProcessBuilder pb4 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:JBoltOrderFile=" + TEMP_FILE,
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+
+ OutputAnalyzer out1 = new OutputAnalyzer(pb1.start());
+ OutputAnalyzer out2 = new OutputAnalyzer(pb2.start());
+ OutputAnalyzer out3 = new OutputAnalyzer(pb3.start());
+ OutputAnalyzer out4 = new OutputAnalyzer(pb4.start());
+
+ String stdout;
+
+ stdout = out1.getStdout();
+ if (!stdout.contains("JBoltOrderFile is not set!")) {
+ throw new RuntimeException(stdout);
+ }
+
+ stdout = out2.getStdout();
+ if (!stdout.contains("JBoltOrderFile is not set!")) {
+ throw new RuntimeException(stdout);
+ }
+
+ stdout = out3.getStdout();
+ if (!stdout.contains("Do not set both JBoltDumpMode and JBoltLoadMode!")) {
+ throw new RuntimeException(stdout);
+ }
+
+ stdout = out4.getStdout();
+ if (!stdout.contains("JBoltOrderFile is ignored because it is in auto mode.")) {
+ throw new RuntimeException(stdout);
+ }
+ }
+
+ private static void test2() throws Exception {
+ ProcessBuilder pb1 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:+PrintFlagsFinal",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+ ProcessBuilder pb2 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:+JBoltDumpMode",
+ "-XX:JBoltOrderFile=" + TEMP_FILE,
+ "-XX:+PrintFlagsFinal",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+ ProcessBuilder pb3 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:+JBoltLoadMode",
+ "-XX:JBoltOrderFile=" + SRC_DIR + "/o1.log",
+ "-XX:+PrintFlagsFinal",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+
+ OutputAnalyzer out1 = new OutputAnalyzer(pb1.start());
+ OutputAnalyzer out2 = new OutputAnalyzer(pb2.start());
+ OutputAnalyzer out3 = new OutputAnalyzer(pb3.start());
+
+ String stdout;
+
+ stdout = out1.getStdout().replaceAll(" +", "");
+ if (!stdout.contains("JBoltDumpMode=false") || !stdout.contains("JBoltLoadMode=false")) {
+ throw new RuntimeException(stdout);
+ }
+
+ stdout = out2.getStdout().replaceAll(" +", "");
+ if (!stdout.contains("JBoltDumpMode=true") || !stdout.contains("JBoltLoadMode=false")) {
+ throw new RuntimeException(stdout);
+ }
+
+ clearTmpFile();
+
+ stdout = out3.getStdout().replaceAll(" +", "");
+ if (!stdout.contains("JBoltDumpMode=false") || !stdout.contains("JBoltLoadMode=true")) {
+ throw new RuntimeException(stdout);
+ }
+ }
+
+ private static void test3() throws Exception {
+ ProcessBuilder pbF0 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:+JBoltLoadMode",
+ "-XX:JBoltOrderFile=" + TEMP_FILE,
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+ ProcessBuilder pbF1 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:+JBoltLoadMode",
+ "-XX:JBoltOrderFile=" + SRC_DIR + "/o1.log",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+ ProcessBuilder pbF2 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:+JBoltLoadMode",
+ "-XX:JBoltOrderFile=" + SRC_DIR + "/o2.log",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+ ProcessBuilder pbF3 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:+JBoltLoadMode",
+ "-XX:JBoltOrderFile=" + SRC_DIR + "/o3.log",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+ ProcessBuilder pbF4 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UseJBolt",
+ "-XX:+JBoltLoadMode",
+ "-XX:JBoltOrderFile=" + SRC_DIR + "/o4.log",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+
+ OutputAnalyzer outF0 = new OutputAnalyzer(pbF0.start());
+ OutputAnalyzer outF1 = new OutputAnalyzer(pbF1.start());
+ OutputAnalyzer outF2 = new OutputAnalyzer(pbF2.start());
+ OutputAnalyzer outF3 = new OutputAnalyzer(pbF3.start());
+ OutputAnalyzer outF4 = new OutputAnalyzer(pbF4.start());
+
+ String stdout;
+
+ stdout = outF0.getStdout();
+ if (!stdout.contains("JBoltOrderFile does not exist or cannot be accessed!")) {
+ throw new RuntimeException(stdout);
+ }
+
+ stdout = outF1.getStdout();
+ if (!stdout.contains("Wrong format of JBolt order line! line=\"X 123 aa bb cc\".")) {
+ throw new RuntimeException(stdout);
+ }
+
+ stdout = outF2.getStdout();
+ if (!stdout.contains("Wrong format of JBolt order line! line=\"M aa/bb/C dd ()V\".")) {
+ throw new RuntimeException(stdout);
+ }
+
+ stdout = outF3.getStdout();
+ if (!stdout.contains("Duplicated method: {aa/bb/CC dd ()V}!")) {
+ throw new RuntimeException(stdout);
+ }
+
+ stdout = outF4.getStdout();
+ if (stdout.contains("Error occurred during initialization of VM")) {
+ throw new RuntimeException(stdout);
+ }
+ outF4.shouldHaveExitValue(0);
+
+ clearTmpFile();
+ }
+
+ private static void test4() throws Exception {
+ ProcessBuilder pb1 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+JBoltDumpMode",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+ ProcessBuilder pb2 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+JBoltLoadMode",
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+ ProcessBuilder pb3 = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:JBoltOrderFile=" + TEMP_FILE,
+ "-Xlog:jbolt*=trace",
+ "--version"
+ );
+
+ OutputAnalyzer out1 = new OutputAnalyzer(pb1.start());
+ OutputAnalyzer out2 = new OutputAnalyzer(pb2.start());
+ OutputAnalyzer out3 = new OutputAnalyzer(pb3.start());
+
+ String stdout;
+
+ stdout = out1.getStdout();
+ if (!stdout.contains("Do not set VM option JBoltDumpMode without UseJBolt enabled.")) {
+ throw new RuntimeException(stdout);
+ }
+
+ stdout = out2.getStdout();
+ if (!stdout.contains("Do not set VM option JBoltLoadMode without UseJBolt enabled.")) {
+ throw new RuntimeException(stdout);
+ }
+
+ stdout = out3.getStdout();
+ if (!stdout.contains("Do not set VM option JBoltOrderFile without UseJBolt enabled.")) {
+ throw new RuntimeException(stdout);
+ }
+
+ clearTmpFile();
+ }
+}
diff --git a/test/hotspot/jtreg/compiler/codecache/jbolt/o1.log b/test/hotspot/jtreg/compiler/codecache/jbolt/o1.log
new file mode 100644
index 000000000..f0ef01586
--- /dev/null
+++ b/test/hotspot/jtreg/compiler/codecache/jbolt/o1.log
@@ -0,0 +1,2 @@
+M 123 aa/bb/C dd ()V
+X 123 aa bb cc
diff --git a/test/hotspot/jtreg/compiler/codecache/jbolt/o2.log b/test/hotspot/jtreg/compiler/codecache/jbolt/o2.log
new file mode 100644
index 000000000..ef348a6ab
--- /dev/null
+++ b/test/hotspot/jtreg/compiler/codecache/jbolt/o2.log
@@ -0,0 +1,2 @@
+M aa/bb/C dd ()V
+M 123 aa/bb/CC dd ()V
\ No newline at end of file
diff --git a/test/hotspot/jtreg/compiler/codecache/jbolt/o3.log b/test/hotspot/jtreg/compiler/codecache/jbolt/o3.log
new file mode 100644
index 000000000..fe6906b47
--- /dev/null
+++ b/test/hotspot/jtreg/compiler/codecache/jbolt/o3.log
@@ -0,0 +1,4 @@
+# this is a comment
+C
+M 123 aa/bb/CC dd ()V
+M 123 aa/bb/CC dd ()V
\ No newline at end of file
diff --git a/test/hotspot/jtreg/compiler/codecache/jbolt/o4.log b/test/hotspot/jtreg/compiler/codecache/jbolt/o4.log
new file mode 100644
index 000000000..13e96dbab
--- /dev/null
+++ b/test/hotspot/jtreg/compiler/codecache/jbolt/o4.log
@@ -0,0 +1,12 @@
+M 123 aa/bb/CC dd ()V
+# asdfadsfadfs
+C
+M 456 aa/bb/CC ddd ()V
+M 456 aa/bb/CCC dd ()V
+
+C
+
+
+
+
+M 456 aa/bb/CCCCCC ddddddd ()V
diff --git a/test/hotspot/jtreg/runtime/cds/appcds/ClassLoaderTest.java b/test/hotspot/jtreg/runtime/cds/appcds/ClassLoaderTest.java
index 4fba6584f..6f4cc5a83 100644
--- a/test/hotspot/jtreg/runtime/cds/appcds/ClassLoaderTest.java
+++ b/test/hotspot/jtreg/runtime/cds/appcds/ClassLoaderTest.java
@@ -57,7 +57,7 @@ public class ClassLoaderTest {
String bootClassPath = "-Xbootclasspath/a:" + appJar +
File.pathSeparator + whiteBoxJar;
- TestCommon.dump(appJar, appClasses, bootClassPath);
+ TestCommon.dump(appJar, appClasses, bootClassPath).shouldHaveExitValue(0);
TestCommon.run(
"-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
diff --git a/test/lib/jdk/test/whitebox/code/BlobType.java b/test/lib/jdk/test/whitebox/code/BlobType.java
index 24ce9d96a..59039bbbe 100644
--- a/test/lib/jdk/test/whitebox/code/BlobType.java
+++ b/test/lib/jdk/test/whitebox/code/BlobType.java
@@ -46,8 +46,24 @@ public enum BlobType {
|| type == BlobType.MethodNonProfiled;
}
},
+ // Execution hot non-profiled nmethods
+ MethodJBoltHot(2, "CodeHeap 'jbolt hot nmethods'", "JBoltCodeHeapSize") {
+ @Override
+ public boolean allowTypeWhenOverflow(BlobType type) {
+ return super.allowTypeWhenOverflow(type)
+ || type == BlobType.MethodNonProfiled;
+ }
+ },
+ // Execution tmp non-profiled nmethods
+ MethodJBoltTmp(3, "CodeHeap 'jbolt tmp nmethods'", "JBoltCodeHeapSize") {
+ @Override
+ public boolean allowTypeWhenOverflow(BlobType type) {
+ return super.allowTypeWhenOverflow(type)
+ || type == BlobType.MethodNonProfiled;
+ }
+ },
// Non-nmethods like Buffers, Adapters and Runtime Stubs
- NonNMethod(2, "CodeHeap 'non-nmethods'", "NonNMethodCodeHeapSize") {
+ NonNMethod(4, "CodeHeap 'non-nmethods'", "NonNMethodCodeHeapSize") {
@Override
public boolean allowTypeWhenOverflow(BlobType type) {
return super.allowTypeWhenOverflow(type)
@@ -56,7 +72,7 @@ public enum BlobType {
}
},
// All types (No code cache segmentation)
- All(3, "CodeCache", "ReservedCodeCacheSize");
+ All(5, "CodeCache", "ReservedCodeCacheSize");
public final int id;
public final String sizeOptionName;
@@ -99,6 +115,10 @@ public enum BlobType {
// there is no MethodProfiled in non tiered world or pure C1
result.remove(MethodProfiled);
}
+ if (!whiteBox.getBooleanVMFlag("UseJBolt") || whiteBox.getBooleanVMFlag("JBoltDumpMode")) {
+ result.remove(MethodJBoltHot);
+ result.remove(MethodJBoltTmp);
+ }
return result;
}
--
2.47.0.windows.2
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。