diff --git a/Add-Dynamic-Max-Heap-feature-for-G1GC.patch b/Add-Dynamic-Max-Heap-feature-for-G1GC.patch new file mode 100644 index 0000000000000000000000000000000000000000..53cf4233cf56b2bc63fb5ce441fb905194d708c2 --- /dev/null +++ b/Add-Dynamic-Max-Heap-feature-for-G1GC.patch @@ -0,0 +1,1761 @@ +From 4911cc717fa97fac6314bf9cb054ed686f725e90 Mon Sep 17 00:00:00 2001 +Date: Thu, 22 May 2025 19:48:09 +0800 +Subject: [PATCH 2/4] Add Dynamic Max Heap feature for G1GC + +--- + .../sun/jvm/hotspot/runtime/VMOps.java | 1 + + hotspot/src/os/linux/vm/os_linux.cpp | 31 +++- + hotspot/src/os/linux/vm/os_linux.hpp | 38 ++++- + .../gc_implementation/g1/g1CollectedHeap.cpp | 112 ++++++++++---- + .../gc_implementation/g1/g1CollectedHeap.hpp | 31 +++- + .../g1/g1CollectorPolicy.cpp | 6 +- + .../g1/g1MonitoringSupport.cpp | 14 ++ + .../g1/g1MonitoringSupport.hpp | 4 +- + .../g1/heapRegionManager.cpp | 14 ++ + .../g1/heapRegionManager.hpp | 16 +- + .../gc_implementation/g1/vm_operations_g1.cpp | 134 +++++++++++++++++ + .../gc_implementation/g1/vm_operations_g1.hpp | 13 ++ + .../parallelScavenge/parallelScavengeHeap.hpp | 6 + + .../shared/dynamicMaxHeap.cpp | 138 ++++++++++++++++++ + .../shared/dynamicMaxHeap.hpp | 71 +++++++++ + .../shared/generationCounters.cpp | 16 +- + .../shared/generationCounters.hpp | 5 + + .../share/vm/gc_interface/collectedHeap.cpp | 1 + + .../share/vm/gc_interface/collectedHeap.hpp | 10 ++ + hotspot/src/share/vm/gc_interface/gcCause.cpp | 3 + + hotspot/src/share/vm/gc_interface/gcCause.hpp | 1 + + .../src/share/vm/memory/collectorPolicy.cpp | 3 +- + .../src/share/vm/memory/collectorPolicy.hpp | 6 + + .../src/share/vm/memory/genCollectedHeap.hpp | 5 + + .../src/share/vm/memory/referencePolicy.cpp | 3 + + hotspot/src/share/vm/memory/universe.cpp | 1 + + hotspot/src/share/vm/memory/universe.hpp | 7 + + .../vm/prims/wbtestmethods/parserTests.cpp | 53 +++++-- + .../vm/prims/wbtestmethods/parserTests.hpp | 2 +- + hotspot/src/share/vm/prims/whitebox.cpp | 2 +- + hotspot/src/share/vm/runtime/arguments.cpp | 16 +- + hotspot/src/share/vm/runtime/globals.hpp | 6 + + hotspot/src/share/vm/runtime/os.cpp | 1 + + hotspot/src/share/vm/runtime/thread.cpp | 11 ++ + .../src/share/vm/runtime/vm_operations.hpp | 1 + + .../share/vm/services/diagnosticCommand.cpp | 49 ++++++- + .../share/vm/services/diagnosticCommand.hpp | 23 ++- + .../share/vm/services/diagnosticFramework.cpp | 9 +- + hotspot/test/serviceability/ParserTest.java | 23 ++- + .../whitebox/sun/hotspot/WhiteBox.java | 2 +- + .../sun/hotspot/parser/DiagnosticCommand.java | 11 ++ + test/lib/sun/hotspot/WhiteBox.java | 2 +- + 42 files changed, 834 insertions(+), 67 deletions(-) + create mode 100644 hotspot/src/share/vm/gc_implementation/shared/dynamicMaxHeap.cpp + create mode 100644 hotspot/src/share/vm/gc_implementation/shared/dynamicMaxHeap.hpp + +diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VMOps.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VMOps.java +index 84f535285..7df340b5e 100644 +--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VMOps.java ++++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VMOps.java +@@ -82,5 +82,6 @@ public enum VMOps { + JFRCheckpoint, + Exit, + LinuxDllLoad, ++ DynamicMaxHeap, + Terminating + } +diff --git a/hotspot/src/os/linux/vm/os_linux.cpp b/hotspot/src/os/linux/vm/os_linux.cpp +index ec4222d42..a6525944a 100644 +--- a/hotspot/src/os/linux/vm/os_linux.cpp ++++ b/hotspot/src/os/linux/vm/os_linux.cpp +@@ -5546,8 +5546,33 @@ os::Linux::heap_dict_free_t os::Linux::_heap_dict_free; + os::Linux::heap_vector_add_t os::Linux::_heap_vector_add; + os::Linux::heap_vector_get_next_t os::Linux::_heap_vector_get_next; + os::Linux::heap_vector_free_t os::Linux::_heap_vector_free; ++os::Linux::dmh_g1_can_shrink_t os::Linux::_dmh_g1_can_shrink; ++os::Linux::dmh_g1_get_region_limit_t os::Linux::_dmh_g1_get_region_limit; + +-void os::Linux::load_plugin_library() { ++void os::Linux::load_ACC_library_before_ergo() { ++ _dmh_g1_can_shrink = CAST_TO_FN_PTR(dmh_g1_can_shrink_t, dlsym(RTLD_DEFAULT, "DynamicMaxHeap_G1CanShrink")); ++ _dmh_g1_get_region_limit = CAST_TO_FN_PTR(dmh_g1_get_region_limit_t, dlsym(RTLD_DEFAULT, "DynamicMaxHeap_G1GetRegionLimit")); ++ ++ char path[JVM_MAXPATHLEN]; ++ char ebuf[1024]; ++ void* handle = NULL; ++ if (os::dll_build_name(path, sizeof(path), Arguments::get_dll_dir(), "jvm8_kunpeng")) { ++ handle = dlopen(path, RTLD_LAZY); ++ } ++ if (handle == NULL && os::dll_build_name(path, sizeof(path), "/usr/lib64", "jvm8_kunpeng")) { ++ handle = dlopen(path, RTLD_LAZY); ++ } ++ if (handle != NULL) { ++ if (_dmh_g1_can_shrink == NULL) { ++ _dmh_g1_can_shrink = CAST_TO_FN_PTR(dmh_g1_can_shrink_t, dlsym(handle, "DynamicMaxHeap_G1CanShrink")); ++ } ++ if (_dmh_g1_get_region_limit == NULL) { ++ _dmh_g1_get_region_limit = CAST_TO_FN_PTR(dmh_g1_get_region_limit_t, dlsym(handle, "DynamicMaxHeap_G1GetRegionLimit")); ++ } ++ } ++} ++ ++void os::Linux::load_ACC_library() { + _heap_dict_add = CAST_TO_FN_PTR(heap_dict_add_t, dlsym(RTLD_DEFAULT, "HeapDict_Add")); + _heap_dict_lookup = CAST_TO_FN_PTR(heap_dict_lookup_t, dlsym(RTLD_DEFAULT, "HeapDict_Lookup")); + _heap_dict_free = CAST_TO_FN_PTR(heap_dict_free_t, dlsym(RTLD_DEFAULT, "HeapDict_Free")); +@@ -5581,7 +5606,7 @@ void os::Linux::load_plugin_library() { + _heap_vector_get_next = CAST_TO_FN_PTR(heap_vector_get_next_t, dlsym(handle, "HeapVector_GetNext")); + } + if(_heap_vector_free == NULL) { +- _heap_vector_free= CAST_TO_FN_PTR(heap_vector_free_t, dlsym(handle, "HeapVector_Free")); ++ _heap_vector_free = CAST_TO_FN_PTR(heap_vector_free_t, dlsym(handle, "HeapVector_Free")); + } + } + } +@@ -5658,7 +5683,7 @@ jint os::init_2(void) + Linux::is_floating_stack() ? "floating stack" : "fixed stack"); + } + +- Linux::load_plugin_library(); ++ Linux::load_ACC_library(); + + if (UseNUMA) { + if (!Linux::libnuma_init()) { +diff --git a/hotspot/src/os/linux/vm/os_linux.hpp b/hotspot/src/os/linux/vm/os_linux.hpp +index d6866c67e..7a73268d7 100644 +--- a/hotspot/src/os/linux/vm/os_linux.hpp ++++ b/hotspot/src/os/linux/vm/os_linux.hpp +@@ -197,7 +197,8 @@ class Linux { + // stack or fixed stack. + static bool is_floating_stack() { return _is_floating_stack; } + +- static void load_plugin_library(); ++ static void load_ACC_library(); ++ static void load_ACC_library_before_ergo(); + static void libpthread_init(); + static void parse_numa_nodes(); + static bool libnuma_init(); +@@ -304,6 +305,8 @@ private: + typedef void* (*heap_vector_add_t)(void* val, void* heap_vector, bool &_inserted); + typedef void* (*heap_vector_get_next_t)(void* heap_vector, void* heap_vector_node, int &_cnt, void** &_items); + typedef void (*heap_vector_free_t)(void* heap_vector); ++ typedef bool (*dmh_g1_can_shrink_t)(double used_after_gc_d, size_t _new_max_heap, double maximum_used_percentage, size_t max_heap_size); ++ typedef uint (*dmh_g1_get_region_limit_t)(size_t _new_max_heap, size_t region_size); + static heap_dict_add_t _heap_dict_add; + static heap_dict_lookup_t _heap_dict_lookup; + static heap_dict_free_t _heap_dict_free; +@@ -330,6 +333,8 @@ private: + static numa_bitmask_equal_func_t _numa_bitmask_equal; + static numa_set_membind_func_t _numa_set_membind; + static numa_bitmask_free_func_t _numa_bitmask_free; ++ static dmh_g1_can_shrink_t _dmh_g1_can_shrink; ++ static dmh_g1_get_region_limit_t _dmh_g1_get_region_limit; + + static unsigned long* _numa_all_nodes; + static struct bitmask* _numa_all_nodes_ptr; +@@ -502,7 +507,7 @@ public: + // If we only have mallinfo(), values may be 32-bit truncated, which is signaled via + // "ok_but_possibly_wrapped". + static mallinfo_retval_t get_mallinfo(glibc_mallinfo2* out); +- ++ + // Calls out to GNU extension malloc_info if available + // otherwise does nothing and returns -2. + static int malloc_info(FILE* stream); +@@ -583,8 +588,35 @@ public: + _heap_vector_free(heap_vector); + } + } +-}; ++ static bool dmh_g1_can_shrink(double used_after_gc_d, ++ size_t _new_max_heap, ++ double maximum_used_percentage, ++ size_t max_heap_size, ++ bool &is_valid, ++ bool just_check = false) { ++ is_valid = false; ++ bool result = false; ++ if (just_check) { ++ is_valid = (_dmh_g1_can_shrink != NULL); ++ } else if (_dmh_g1_can_shrink != NULL) { ++ is_valid = true; ++ result = _dmh_g1_can_shrink(used_after_gc_d, _new_max_heap, maximum_used_percentage, max_heap_size); ++ } ++ return result; ++ } + ++ static uint dmh_g1_get_region_limit(size_t _new_max_heap, size_t region_size, bool &is_valid, bool just_check = false) { ++ is_valid = false; ++ uint result = 0; ++ if (just_check) { ++ is_valid = (_dmh_g1_get_region_limit != NULL); ++ } else if (_dmh_g1_get_region_limit != NULL) { ++ is_valid = true; ++ result = _dmh_g1_get_region_limit(_new_max_heap, region_size); ++ } ++ return result; ++ } ++}; + + class PlatformEvent : public CHeapObj { + private: +diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +index aeec4e576..72200f228 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +@@ -51,6 +51,7 @@ + #include "gc_implementation/g1/heapRegionRemSet.hpp" + #include "gc_implementation/g1/heapRegionSet.inline.hpp" + #include "gc_implementation/g1/vm_operations_g1.hpp" ++#include "gc_implementation/shared/dynamicMaxHeap.hpp" + #include "gc_implementation/shared/gcHeapSummary.hpp" + #include "gc_implementation/shared/gcTimer.hpp" + #include "gc_implementation/shared/gcTrace.hpp" +@@ -770,31 +771,7 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationCo + // Policy: We could not find enough regions for the humongous object in the + // free list. Look through the heap to find a mix of free and uncommitted regions. + // If so, try expansion. +- first = _hrm.find_contiguous_empty_or_unavailable(obj_regions); +- if (first != G1_NO_HRM_INDEX) { +- // We found something. Make sure these regions are committed, i.e. expand +- // the heap. Alternatively we could do a defragmentation GC. +- ergo_verbose1(ErgoHeapSizing, +- "attempt heap expansion", +- ergo_format_reason("humongous allocation request failed") +- ergo_format_byte("allocation request"), +- word_size * HeapWordSize); +- +- _hrm.expand_at(first, obj_regions); +- g1_policy()->record_new_heap_size(num_regions()); +- +-#ifdef ASSERT +- for (uint i = first; i < first + obj_regions; ++i) { +- HeapRegion* hr = region_at(i); +- assert(hr->is_free(), "sanity"); +- assert(hr->is_empty(), "sanity"); +- assert(is_on_master_free_list(hr), "sanity"); +- } +-#endif +- _hrm.allocate_free_regions_starting_at(first, obj_regions); +- } else { +- // Policy: Potentially trigger a defragmentation GC. +- } ++ first = attempt_expansion_for_humongous_allocation(obj_regions, word_size); + } + + HeapWord* result = NULL; +@@ -814,6 +791,43 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationCo + return result; + } + ++uint G1CollectedHeap::attempt_expansion_for_humongous_allocation(uint obj_regions, size_t word_size) { ++ // Policy: We could not find enough regions for the humongous object in the ++ // free list. Look through the heap to find a mix of free and uncommitted regions. ++ // If so, try expansion. ++ uint first = _hrm.find_contiguous_empty_or_unavailable(obj_regions); ++ if (first != G1_NO_HRM_INDEX) { ++ // We found something. Make sure these regions are committed, i.e. expand ++ // the heap. Alternatively we could do a defragmentation GC. ++ ergo_verbose1(ErgoHeapSizing, ++ "attempt heap expansion", ++ ergo_format_reason("humongous allocation request failed") ++ ergo_format_byte("allocation request"), ++ word_size * HeapWordSize); ++ ++ _hrm.expand_at(first, obj_regions); ++ g1_policy()->record_new_heap_size(num_regions()); ++ if (Universe::is_dynamic_max_heap_enable()) { ++ // expand might fail, search continous only empty again ++ first = _hrm.find_contiguous_only_empty(obj_regions); ++ } ++ if (first != G1_NO_HRM_INDEX) { ++#ifdef ASSERT ++ for (uint i = first; i < first + obj_regions; ++i) { ++ HeapRegion* hr = region_at(i); ++ assert(hr->is_free(), "sanity"); ++ assert(hr->is_empty(), "sanity"); ++ assert(is_on_master_free_list(hr), "sanity"); ++ } ++#endif ++ _hrm.allocate_free_regions_starting_at(first, obj_regions); ++ } ++ } else { ++ // Policy: Potentially trigger a defragmentation GC. ++ } ++ return first; ++} ++ + HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { + assert_heap_not_locked_and_not_at_safepoint(); + assert(!isHumongous(word_size), "we do not allow humongous TLABs"); +@@ -1421,7 +1435,11 @@ size_t G1CollectedHeap::full_collection_resize_amount(bool& expand) { + const double minimum_used_percentage = 1.0 - maximum_free_percentage; + + const size_t min_heap_size = collector_policy()->min_heap_byte_size(); +- const size_t max_heap_size = collector_policy()->max_heap_byte_size(); ++ size_t max_heap_size = collector_policy()->max_heap_byte_size(); ++ if (Universe::is_dynamic_max_heap_enable()) { ++ max_heap_size = current_max_heap_size(); ++ guarantee(max_heap_size >= min_heap_size, "must be"); ++ } + + // We have to be careful here as these two calculations can overflow + // 32-bit size_t's. +@@ -1488,6 +1506,27 @@ size_t G1CollectedHeap::full_collection_resize_amount(bool& expand) { + return shrink_bytes; + } + ++ size_t exp_size = exp_dynamic_max_heap_size(); ++ if (Universe::is_dynamic_max_heap_enable() && ++ (exp_size > 0) && ++ (exp_size < capacity()) && ++ (exp_size >= minimum_desired_capacity)) { ++ // shrink to exp_dynamic_max_heap_size when ++ // 1. exp_dynamic_max_heap_size smaller than capacity ++ // 2. exp_dynamic_max_heap_size bigger than minimum_desired_capacity ++ size_t shrink_bytes = capacity() - exp_size; ++ ergo_verbose3(ErgoHeapSizing, ++ "attempt heap shrinking for dynamic max heap", ++ ergo_format_reason("capacity higher than " ++ "expected dynamic max heap after Full GC") ++ ergo_format_byte("capacity") ++ ergo_format_byte("occupancy") ++ ergo_format_byte("expected dynamic max heap"), ++ capacity_after_gc, used_after_gc, exp_size); ++ expand = false; ++ return shrink_bytes; ++ } ++ + expand = true; + return 0; + } +@@ -1811,6 +1850,9 @@ jint G1CollectedHeap::initialize() { + + size_t init_byte_size = collector_policy()->initial_heap_byte_size(); + size_t max_byte_size = collector_policy()->max_heap_byte_size(); ++ if (Universe::is_dynamic_max_heap_enable()) { ++ max_byte_size = collector_policy()->max_heap_byte_size_limit(); ++ } + size_t heap_alignment = collector_policy()->heap_alignment(); + + if (G1Uncommit) { +@@ -2749,6 +2791,12 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { + } + + size_t G1CollectedHeap::max_capacity() const { ++ // Dynamic Max Heap ++ if (Universe::is_dynamic_max_heap_enable()) { ++ size_t cur_size = current_max_heap_size(); ++ guarantee(cur_size <= _hrm.reserved().byte_size(), "must be"); ++ return cur_size; ++ } + return _hrm.reserved().byte_size(); + } + +@@ -6520,7 +6568,7 @@ public: + } + }; + +-void G1CollectedHeap::rebuild_region_sets(bool free_list_only) { ++void G1CollectedHeap::rebuild_region_sets(bool free_list_only, bool is_dynamic_max_heap_shrink) { + assert_at_safepoint(true /* should_be_vm_thread */); + + if (!free_list_only) { +@@ -6533,7 +6581,8 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) { + if (!free_list_only) { + _allocator->set_used(cl.total_used()); + } +- assert(_allocator->used_unlocked() == recalculate_used(), ++ // don`t do this assert if is_dynamic_max_heap_shrink ++ assert(is_dynamic_max_heap_shrink || _allocator->used_unlocked() == recalculate_used(), + err_msg("inconsistent _allocator->used_unlocked(), " + "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT, + _allocator->used_unlocked(), recalculate_used())); +@@ -6847,3 +6896,10 @@ void G1CollectedHeap::rebuild_strong_code_roots() { + RebuildStrongCodeRootClosure blob_cl(this); + CodeCache::blobs_do(&blob_cl); + } ++ ++bool G1CollectedHeap::change_max_heap(size_t new_size) { ++ assert_heap_not_locked(); ++ G1_ChangeMaxHeapOp op(new_size); ++ VMThread::execute(&op); ++ return op.resize_success(); ++} +diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +index c6e3c5d7b..23dd4217f 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +@@ -227,6 +227,8 @@ class G1CollectedHeap : public SharedHeap { + // Testing classes. + friend class G1CheckCSetFastTableClosure; + ++ friend class G1_ChangeMaxHeapOp; ++ + private: + // The one and only G1CollectedHeap, so static functions can find it. + static G1CollectedHeap* _g1h; +@@ -266,7 +268,7 @@ private: + // free_list_only is true, it will only rebuild the master free + // list. It is called after a Full GC (free_list_only == false) or + // after heap shrinking (free_list_only == true). +- void rebuild_region_sets(bool free_list_only); ++ void rebuild_region_sets(bool free_list_only, bool is_dynamic_max_heap_shrink = false); + + // Callback for region mapping changed events. + G1RegionMappingChangedListener _listener; +@@ -1150,7 +1152,11 @@ public: + // But G1CollectedHeap doesn't yet support this. + + virtual bool is_maximal_no_gc() const { +- return _hrm.available() == 0; ++ return _hrm.available() == 0 || (Universe::is_dynamic_max_heap_enable() && _hrm.dynamic_available() == 0); ++ } ++ ++ HeapRegionManager* hrm() { ++ return &_hrm; + } + + // The current number of regions in the heap. +@@ -1243,6 +1249,7 @@ public: + void reset_free_regions_coming(); + bool free_regions_coming() { return _free_regions_coming; } + void wait_while_free_regions_coming(); ++ uint attempt_expansion_for_humongous_allocation(uint obj_regions, size_t word_size); + + // Determine whether the given region is one that we are using as an + // old GC alloc region. +@@ -1673,6 +1680,26 @@ public: + + protected: + size_t _max_heap_capacity; ++ ++private: ++ // Dynamic Max Heap ++ // expected DynamicMaxHeap size during full gc(temp value) ++ // 0 means do not adjust ++ // min_gen_size <= _expected_dynamic_max_heap_size <= _reserved size. ++ // will be cleared after DynamicMaxHeap VM operation. ++ size_t _exp_dynamic_max_heap_size; ++public: ++ virtual bool change_max_heap(size_t new_size); ++ size_t exp_dynamic_max_heap_size() const { return _exp_dynamic_max_heap_size; } ++ void set_exp_dynamic_max_heap_size(size_t size) { ++ guarantee(size <= _reserved.byte_size(), "must be"); ++ _exp_dynamic_max_heap_size = size; ++ } ++ void update_gen_max_counter(size_t size) { ++ guarantee(Universe::is_dynamic_max_heap_enable(), "must be"); ++ _g1mm->young_collection_counters()->update_max_size(size); ++ _g1mm->old_collection_counters()->update_max_size(size); ++ } + }; + + #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP +diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +index dc05454ad..cbc2d6f5d 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +@@ -188,7 +188,11 @@ G1CollectorPolicy::G1CollectorPolicy() : + // the region size on the heap size, but the heap size should be + // aligned with the region size. To get around this we use the + // unaligned values for the heap. +- HeapRegion::setup_heap_region_size(MaxHeapSize); ++ if (Universe::is_dynamic_max_heap_enable()) { ++ HeapRegion::setup_heap_region_size(DynamicMaxHeapSizeLimit); ++ } else { ++ HeapRegion::setup_heap_region_size(MaxHeapSize); ++ } + HeapRegionRemSet::setup_remset_size(); + + G1ErgoVerbose::initialize(); +diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp +index 8bbe4ff94..878b4fa2c 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp +@@ -272,3 +272,17 @@ void G1MonitoringSupport::update_eden_size() { + eden_counters()->update_used(eden_space_used()); + } + } ++ ++size_t G1MonitoringSupport::young_gen_max() { ++ if (Universe::is_dynamic_max_heap_enable()) { ++ return _g1h->current_max_heap_size(); ++ } ++ return overall_reserved(); ++} ++ ++size_t G1MonitoringSupport::old_gen_max() { ++ if (Universe::is_dynamic_max_heap_enable()) { ++ return _g1h->current_max_heap_size(); ++ } ++ return overall_reserved(); ++} +\ No newline at end of file +diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp +index 03b7300ae..90f451109 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp +@@ -231,14 +231,14 @@ class G1MonitoringSupport : public CHeapObj { + size_t overall_used() { return _overall_used; } + + size_t young_gen_committed() { return _young_gen_committed; } +- size_t young_gen_max() { return overall_reserved(); } ++ size_t young_gen_max(); + size_t eden_space_committed() { return _eden_committed; } + size_t eden_space_used() { return _eden_used; } + size_t survivor_space_committed() { return _survivor_committed; } + size_t survivor_space_used() { return _survivor_used; } + + size_t old_gen_committed() { return old_space_committed(); } +- size_t old_gen_max() { return overall_reserved(); } ++ size_t old_gen_max(); + size_t old_space_committed() { return _old_committed; } + size_t old_space_used() { return _old_used; } + }; +diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp +index b9cf3410f..74d6b3836 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp +@@ -82,6 +82,7 @@ void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage, + MemRegion reserved = heap_storage->reserved(); + _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes); + ++ _dynamic_max_heap_length = (uint)_regions.length(); + _available_map.resize(_regions.length(), false); + _available_map.clear(); + _uncommit_list_filled = false; +@@ -249,6 +250,12 @@ uint HeapRegionManager::expand_at(uint start, uint num_regions) { + if (num_regions == 0) { + return 0; + } ++ if (Universe::is_dynamic_max_heap_enable()) { ++ uint available_regions = dynamic_max_heap_length() - length(); ++ guarantee(dynamic_max_heap_length() >= length(), err_msg("The current length %u must not exceed dynamic max heap length %u", length(), dynamic_max_heap_length())); ++ guarantee(available_regions >= 0 && available_regions <= max_length() && available_regions <= dynamic_max_heap_length(), "must be"); ++ num_regions = MIN2(num_regions, available_regions); ++ } + + uint cur = start; + uint idx_last_found = 0; +@@ -269,6 +276,13 @@ uint HeapRegionManager::expand_at(uint start, uint num_regions) { + } + + uint HeapRegionManager::expand_on_preferred_node(uint preferred_index) { ++ if (Universe::is_dynamic_max_heap_enable()) { ++ guarantee(dynamic_max_heap_length() >= length(), err_msg("The current length %u must not exceed dynamic max heap length %u", length(), dynamic_max_heap_length())); ++ // No regions left, expand failed. ++ if (dynamic_max_heap_length() == length()) { ++ return 0; ++ } ++ } + uint expand_candidate = UINT_MAX; + for (uint i = 0; i < max_length(); i++) { + if (!can_expand(i)) { +diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.hpp +index 38db9c660..7c4373a45 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.hpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.hpp +@@ -119,6 +119,9 @@ class HeapRegionManager: public CHeapObj { + // Internal only. The highest heap region +1 we allocated a HeapRegion instance for. + uint _allocated_heapregions_length; + ++ // The max number of regions controlled by Dynamic Max Heap ++ uint _dynamic_max_heap_length; ++ + HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); } + HeapWord* heap_end() const {return _regions.end_address_mapped(); } + +@@ -161,7 +164,7 @@ public: + + public: + // Empty constructor, we'll initialize it with the initialize() method. +- HeapRegionManager() : _regions(), _heap_mapper(NULL), _num_committed(0), ++ HeapRegionManager() : _regions(), _heap_mapper(NULL), _num_committed(0), _dynamic_max_heap_length(0), + _next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL), + _allocated_heapregions_length(0), _available_map(), + _free_list("Free list", new MasterFreeRegionListMtSafeChecker()), +@@ -233,12 +236,23 @@ public: + // Return the number of available (uncommitted) regions. + uint available() const { return max_length() - length(); } + ++ // Return the number of dynamic available (uncommitted) regions. ++ uint dynamic_available() const { return dynamic_max_heap_length() - length(); } ++ + // Return the number of regions that have been committed in the heap. + uint length() const { return _num_committed; } + + // Return the maximum number of regions in the heap. + uint max_length() const { return (uint)_regions.length(); } + ++ // Return the current maximum number of regions in the heap (dynamic max heap) ++ uint dynamic_max_heap_length() const { return (uint)_dynamic_max_heap_length; } ++ ++ void set_dynamic_max_heap_length(uint len) { ++ guarantee(len <= max_length(), "must be"); ++ _dynamic_max_heap_length = len; ++ } ++ + MemoryUsage get_auxiliary_data_memory_usage() const; + + MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); } +diff --git a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp +index 47267f21d..07886d81c 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp +@@ -257,3 +257,137 @@ void VM_CGC_Operation::doit_epilogue() { + release_and_notify_pending_list_lock(); + } + } ++ ++G1_ChangeMaxHeapOp::G1_ChangeMaxHeapOp(size_t new_max_heap) : ++ VM_ChangeMaxHeapOp(new_max_heap) { ++} ++ ++/* ++ * No need calculate young/old size, shrink will adjust young automatically. ++ * ensure young_list_length, _young_list_max_length, _young_list_target_length align. ++ * ++ * 1. check if need perform gc: new_heap_max >= minimum_desired_capacity ++ * 2. perform full GC if necessary ++ * 3. update new limit ++ * 4. validation ++ */ ++void G1_ChangeMaxHeapOp::doit() { ++ G1CollectedHeap* heap = (G1CollectedHeap*)Universe::heap(); ++ G1CollectorPolicy* policy = heap->g1_policy(); ++ const size_t min_heap_size = policy->min_heap_byte_size(); ++ const size_t max_heap_size = heap->current_max_heap_size(); ++ bool is_shrink = _new_max_heap < max_heap_size; ++ bool is_valid = false; ++ ++ // step1. calculate maximum_used_percentage for shrink validity check ++ const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; ++ const double maximum_used_percentage = 1.0 - minimum_free_percentage; ++ ++ // step2 trigger GC as needed and resize ++ if (is_shrink) { ++ trigger_gc_shrink(_new_max_heap, maximum_used_percentage, max_heap_size, is_valid); ++ if (!is_valid) { ++ // We should not reach here because we have already checked the existence of ++ // the ACC and disabled this feature when the ACC is absent. ++ DMH_LOG("G1_ChangeMaxHeapOp fail for missing ACC"); ++ return; ++ } ++ } ++ ++ DMH_LOG("G1_ChangeMaxHeapOp: current capacity " SIZE_FORMAT "K, new max heap " SIZE_FORMAT "K", ++ heap->capacity() / K, _new_max_heap / K); ++ ++ // step3 check if can update new limit ++ if (heap->capacity() <= _new_max_heap) { ++ uint dynamic_max_heap_len = os::Linux::dmh_g1_get_region_limit(_new_max_heap, HeapRegion::GrainBytes, is_valid); ++ if (!is_valid) { ++ // We should not reach here because we have already checked the existence of ++ // the ACC and disabled this feature when the ACC is absent. ++ DMH_LOG("G1_ChangeMaxHeapOp fail for missing ACC"); ++ return; ++ } ++ heap->set_current_max_heap_size(_new_max_heap); ++ heap->hrm()->set_dynamic_max_heap_length(dynamic_max_heap_len); ++ // G1 young/old share same max size ++ heap->update_gen_max_counter(_new_max_heap); ++ _resize_success = true; ++ DMH_LOG("G1_ChangeMaxHeapOp success"); ++ } else { ++ DMH_LOG("G1_ChangeMaxHeapOp fail"); ++ } ++} ++ ++void G1_ChangeMaxHeapOp::trigger_gc_shrink(size_t _new_max_heap, ++ double maximum_used_percentage, ++ size_t max_heap_size, ++ bool &is_valid) { ++ G1CollectedHeap* heap = (G1CollectedHeap*)Universe::heap(); ++ G1CollectorPolicy* policy = heap->g1_policy(); ++ bool triggered_full_gc = false; ++ bool can_shrink = os::Linux::dmh_g1_can_shrink((double)heap->used(), _new_max_heap, maximum_used_percentage, max_heap_size, is_valid); ++ if (!is_valid) { ++ return; ++ } ++ if (!can_shrink) { ++ // trigger Young GC ++ policy->set_gcs_are_young(true); ++ GCCauseSetter gccs(heap, _gc_cause); ++ bool minor_gc_succeeded = heap->do_collection_pause_at_safepoint(policy->max_pause_time_ms()); ++ if (minor_gc_succeeded) { ++ DMH_LOG("G1_ChangeMaxHeapOp heap after Young GC"); ++ if (TraceDynamicMaxHeap) { ++ heap->print_on(tty); ++ } ++ } ++ can_shrink = os::Linux::dmh_g1_can_shrink((double)heap->used(), _new_max_heap, maximum_used_percentage, max_heap_size, is_valid); ++ if (!is_valid) { ++ return; ++ } ++ if (!can_shrink) { ++ // trigger Full GC and adjust everything in resize_if_necessary_after_full_collection ++ heap->set_exp_dynamic_max_heap_size(_new_max_heap); ++ heap->do_full_collection(true); ++ DMH_LOG("G1_ChangeMaxHeapOp heap after Full GC"); ++ if (TraceDynamicMaxHeap) { ++ heap->print_on(tty); ++ } ++ heap->set_exp_dynamic_max_heap_size(0); ++ triggered_full_gc = true; ++ } ++ } ++ if (!triggered_full_gc) { ++ // there may be two situations when entering this branch: ++ // 1. first check passed, no GC triggered ++ // 2. first check failed, triggered Young GC, ++ // second check passed ++ // so the shrink has not been completed and it must be valid to shrink ++ g1_shrink_without_full_gc(_new_max_heap); ++ } ++} ++ ++void G1_ChangeMaxHeapOp::g1_shrink_without_full_gc(size_t _new_max_heap) { ++ G1CollectedHeap* heap = (G1CollectedHeap*)Universe::heap(); ++ size_t capacity_before_shrink = heap->capacity(); ++ // _new_max_heap is large enough, do nothing ++ if (_new_max_heap >= capacity_before_shrink) { ++ return; ++ } ++ // Capacity too large, compute shrinking size and shrink ++ size_t shrink_bytes = capacity_before_shrink - _new_max_heap; ++ heap->verify_region_sets_optional(); ++ heap->tear_down_region_sets(true /* free_list_only */); ++ heap->shrink_helper(shrink_bytes); ++ heap->rebuild_region_sets(true /* free_list_only */, true /* is_dynamic_max_heap_shrink */); ++ heap->_hrm.verify_optional(); ++ heap->verify_region_sets_optional(); ++ heap->verify_after_gc(); ++ ++ DMH_LOG("G1_ChangeMaxHeapOp: attempt heap shrinking for dynamic max heap %s " ++ "origin capacity " SIZE_FORMAT "K " ++ "new capacity " SIZE_FORMAT "K " ++ "shrink by " SIZE_FORMAT "K", ++ heap->capacity() <= _new_max_heap ? "success":"fail", ++ capacity_before_shrink / K, ++ heap->capacity() / K, ++ shrink_bytes / K); ++} +diff --git a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp +index 265eb37d3..1858ef79b 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp +@@ -27,6 +27,7 @@ + + #include "gc_implementation/g1/g1AllocationContext.hpp" + #include "gc_implementation/shared/vmGCOperations.hpp" ++#include "gc_implementation/shared/dynamicMaxHeap.hpp" + + // VM_operations for the G1 collector. + // VM_GC_Operation: +@@ -122,4 +123,16 @@ public: + } + }; + ++// Change Dynamic Max Heap Size ++class G1_ChangeMaxHeapOp : public VM_ChangeMaxHeapOp { ++public: ++ G1_ChangeMaxHeapOp(size_t new_max_heap); ++ virtual void doit(); ++ void trigger_gc_shrink(size_t _new_max_heap, ++ double maximum_used_percentage, ++ size_t max_heap_size, ++ bool& is_valid); ++ void g1_shrink_without_full_gc(size_t _new_max_heap); ++}; ++ + #endif // SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP +diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp +index 96244cb49..f128f0a65 100644 +--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp ++++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp +@@ -250,6 +250,12 @@ class ParallelScavengeHeap : public CollectedHeap { + ParStrongRootsScope(); + ~ParStrongRootsScope(); + }; ++ ++ // Dynamic Max Heap ++ bool change_max_heap(size_t new_size) { ++ // Not Implement yet ++ return false; ++ }; + }; + + #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP +diff --git a/hotspot/src/share/vm/gc_implementation/shared/dynamicMaxHeap.cpp b/hotspot/src/share/vm/gc_implementation/shared/dynamicMaxHeap.cpp +new file mode 100644 +index 000000000..0681fc212 +--- /dev/null ++++ b/hotspot/src/share/vm/gc_implementation/shared/dynamicMaxHeap.cpp +@@ -0,0 +1,138 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++#include "precompiled.hpp" ++#include "dynamicMaxHeap.hpp" ++ ++size_t DynamicMaxHeapConfig::_initial_max_heap_size = 0; ++ ++VM_ChangeMaxHeapOp::VM_ChangeMaxHeapOp(size_t new_max_heap) : ++ VM_GC_Operation(0, GCCause::_change_max_heap, 0, true) { ++ _new_max_heap = new_max_heap; ++ _resize_success = false; ++} ++ ++bool VM_ChangeMaxHeapOp::skip_operation() const { ++ return false; ++} ++ ++/* ++ * validity check ++ * new current max heap must be: ++ * 1. >= min_heap_byte_size ++ * 2. <= max_heap_byte_size ++ * 3. not equal with current_max_heap_size ++ */ ++bool CollectedHeap::check_new_max_heap_validity(size_t new_size, outputStream* st) { ++ if (new_size > collector_policy()->max_heap_byte_size_limit()) { ++ st->print_cr("GC.change_max_heap " SIZE_FORMAT "K exceeds maximum limit " SIZE_FORMAT "K", ++ (new_size / K), ++ (collector_policy()->max_heap_byte_size_limit() / K)); ++ return false; ++ } ++ if (new_size < collector_policy()->min_heap_byte_size()) { ++ st->print_cr("GC.change_max_heap " SIZE_FORMAT "K below minimum limit " SIZE_FORMAT "K", ++ (new_size / K), ++ (collector_policy()->min_heap_byte_size() / K)); ++ return false; ++ } ++ // don`t print log if it is init shrink triggered by DynamicMaxHeapSizeLimit ++ if (new_size == current_max_heap_size()) { ++ st->print_cr("GC.change_max_heap " SIZE_FORMAT "K same with current max heap size " SIZE_FORMAT "K", ++ (new_size / K), ++ (current_max_heap_size() / K)); ++ return false; ++ } ++ return true; ++} ++ ++void DynamicMaxHeapChecker::common_check() { ++ if (!Universe::is_dynamic_max_heap_enable()) { ++ return; ++ } ++#if !defined(LINUX) || !defined(AARCH64) ++ warning_and_disable("-XX:DynamicMaxHeapSizeLimit can only be assigned on Linux aarch64"); ++ return; ++#endif ++#ifdef AARCH64 ++ if (!VM_Version::is_hisi_enabled()) { ++ warning_and_disable("-XX:DynamicMaxHeapSizeLimit can only be assigned on KUNGPENG now"); ++ return; ++ } ++#endif ++ bool is_valid = false; ++ size_t dummy_param = 0; ++ os::Linux::dmh_g1_get_region_limit(dummy_param, dummy_param, is_valid, true); ++ if (!is_valid) { ++ warning_and_disable("-XX:DynamicMaxHeapSizeLimit can only used with ACC installed"); ++ return; ++ } ++ os::Linux::dmh_g1_can_shrink(dummy_param, dummy_param, dummy_param, dummy_param, is_valid, true); ++ if (!is_valid) { ++ warning_and_disable("-XX:DynamicMaxHeapSizeLimit can only used with ACC installed"); ++ return; ++ } ++ if (FLAG_IS_CMDLINE(OldSize) || FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { ++ warning_and_disable("-XX:DynamicMaxHeapSizeLimit can not be used with -XX:OldSize/-XX:NewSize/-XX:MaxNewSize"); ++ return; ++ } ++ if (UseAdaptiveGCBoundary) { ++ warning_and_disable("-XX:DynamicMaxHeapSizeLimit can not be used with -XX:+UseAdaptiveGCBoundary"); ++ return; ++ } ++ if (!UseAdaptiveSizePolicy) { ++ warning_and_disable("-XX:DynamicMaxHeapSizeLimit should be used with -XX:+UseAdaptiveSizePolicy"); ++ return; ++ } ++ // only G1 GC implemented now ++ if (!UseG1GC) { ++ warning_and_disable("-XX:DynamicMaxHeapSizeLimit should be used with -XX:+UseG1GC now"); ++ return; ++ } ++ if (G1Uncommit) { ++ warning_and_disable("-XX:DynamicMaxHeapSizeLimit can not be used with -XX:+G1Uncommit"); ++ return; ++ } ++} ++ ++// DynamicMaxHeapSizeLimit should be used together with Xmx and larger than Xmx ++bool DynamicMaxHeapChecker::check_dynamic_max_heap_size_limit() { ++ if (!FLAG_IS_CMDLINE(DynamicMaxHeapSizeLimit)) { ++ return false; ++ } ++ if (!FLAG_IS_CMDLINE(MaxHeapSize)) { ++ warning_and_disable("-XX:DynamicMaxHeapSizeLimit should be used together with -Xmx/-XX:MaxHeapSize"); ++ return false; ++ } ++ if (DynamicMaxHeapSizeLimit <= MaxHeapSize) { ++ warning_and_disable("-XX:DynamicMaxHeapSizeLimit should be larger than MaxHeapSize"); ++ return false; ++ } ++ return true; ++} ++ ++void DynamicMaxHeapChecker::warning_and_disable(const char *reason) { ++ warning("DynamicMaxHeap feature are not available for reason: %s, automatically disabled", reason); ++ FLAG_SET_DEFAULT(DynamicMaxHeapSizeLimit, ScaleForWordSize(DynamicMaxHeapChecker::_default_dynamic_max_heap_size_limit * M)); ++ Universe::set_dynamic_max_heap_enable(false); ++} +diff --git a/hotspot/src/share/vm/gc_implementation/shared/dynamicMaxHeap.hpp b/hotspot/src/share/vm/gc_implementation/shared/dynamicMaxHeap.hpp +new file mode 100644 +index 000000000..b618f5519 +--- /dev/null ++++ b/hotspot/src/share/vm/gc_implementation/shared/dynamicMaxHeap.hpp +@@ -0,0 +1,71 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_DYNAMIC_MAX_HEAP_OPERATION_HPP ++#define SHARE_VM_GC_IMPLEMENTATION_SHARED_DYNAMIC_MAX_HEAP_OPERATION_HPP ++ ++#include "utilities/defaultStream.hpp" ++#include "gc_implementation/shared/vmGCOperations.hpp" ++ ++#define DMH_LOG(fmt, ...) \ ++ if (TraceDynamicMaxHeap) { \ ++ ResourceMark rm; \ ++ tty->print_cr(fmt, ##__VA_ARGS__); \ ++ } ++ ++class VM_ChangeMaxHeapOp : public VM_GC_Operation { ++public: ++ VM_ChangeMaxHeapOp(size_t new_max_heap); ++ VMOp_Type type() const { ++ return VMOp_DynamicMaxHeap; ++ } ++ bool resize_success() const { ++ return _resize_success; ++ } ++protected: ++ size_t _new_max_heap; ++ bool _resize_success; ++private: ++ bool skip_operation() const; ++}; ++ ++class DynamicMaxHeapChecker : AllStatic { ++public: ++ static void common_check(); ++ static bool check_dynamic_max_heap_size_limit(); ++ static void warning_and_disable(const char *reason); ++private: ++ static const int _default_dynamic_max_heap_size_limit = 96; ++}; ++ ++class DynamicMaxHeapConfig : AllStatic { ++public: ++ static size_t initial_max_heap_size() { return _initial_max_heap_size; } ++ static void set_initial_max_heap_size(size_t new_size) { ++ _initial_max_heap_size = new_size; ++ } ++private: ++ static size_t _initial_max_heap_size; ++}; ++#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_DYNAMIC_MAX_HEAP_OPERATION_HPP +diff --git a/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp b/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp +index 6dd07a3a9..59242b5e3 100644 +--- a/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp ++++ b/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp +@@ -50,8 +50,15 @@ void GenerationCounters::initialize(const char* name, int ordinal, int spaces, + min_capacity, CHECK); + + cname = PerfDataManager::counter_name(_name_space, "maxCapacity"); +- PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, +- max_capacity, CHECK); ++ // Dynamic Max Heap ++ if (Universe::is_dynamic_max_heap_enable()) { ++ _max_size = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, ++ max_capacity, CHECK); ++ } else { ++ _max_size = NULL; ++ PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, ++ max_capacity, CHECK); ++ } + + cname = PerfDataManager::counter_name(_name_space, "capacity"); + _current_size = +@@ -81,3 +88,8 @@ void GenerationCounters::update_all() { + assert(_virtual_space != NULL, "otherwise, override this method"); + _current_size->set_value(_virtual_space->committed_size()); + } ++ ++void GenerationCounters::update_max_size(size_t size) { ++ guarantee(Universe::is_dynamic_max_heap_enable(), "must be"); ++ _max_size->set_value(size); ++} +diff --git a/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp b/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp +index b716fe8bd..95649bfb9 100644 +--- a/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp ++++ b/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp +@@ -40,6 +40,8 @@ private: + size_t curr_capacity); + + protected: ++ // Dynamic Max Heap ++ PerfVariable* _max_size; // max size can be change when Dynamic Max Heap is on + PerfVariable* _current_size; + VirtualSpace* _virtual_space; + +@@ -74,6 +76,9 @@ private: + + virtual void update_all(); + ++ // Dynamic Max Heap ++ void update_max_size(size_t size); ++ + const char* name_space() const { return _name_space; } + + }; +diff --git a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp +index 67110ec32..a2e67f7a4 100644 +--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp ++++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp +@@ -161,6 +161,7 @@ CollectedHeap::CollectedHeap() : _n_par_threads(0) + _barrier_set = NULL; + _is_gc_active = false; + _total_collections = _total_full_collections = 0; ++ _current_max_heap_size = MaxHeapSize; + _gc_cause = _gc_lastcause = GCCause::_no_gc; + NOT_PRODUCT(_promotion_failure_alot_count = 0;) + NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) +diff --git a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp +index 685d6cd51..d319b1436 100644 +--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp ++++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp +@@ -109,6 +109,7 @@ class CollectedHeap : public CHeapObj { + BarrierSet* _barrier_set; + bool _is_gc_active; + uint _n_par_threads; ++ size_t _current_max_heap_size; + + unsigned int _total_collections; // ... started + unsigned int _total_full_collections; // ... started +@@ -665,6 +666,15 @@ class CollectedHeap : public CHeapObj { + /////////////// Unit tests /////////////// + + NOT_PRODUCT(static void test_is_in();) ++ ++public: ++ // Dynamic Max Heap ++ virtual bool change_max_heap(size_t new_size) = 0; ++ bool check_new_max_heap_validity(size_t new_size, outputStream* st); ++ size_t current_max_heap_size() const { return _current_max_heap_size; } ++ void set_current_max_heap_size(size_t new_size) { ++ _current_max_heap_size = new_size; ++ } + }; + + // Class to set and reset the GC cause for a CollectedHeap. +diff --git a/hotspot/src/share/vm/gc_interface/gcCause.cpp b/hotspot/src/share/vm/gc_interface/gcCause.cpp +index 49851afa2..300c5ba7c 100644 +--- a/hotspot/src/share/vm/gc_interface/gcCause.cpp ++++ b/hotspot/src/share/vm/gc_interface/gcCause.cpp +@@ -67,6 +67,9 @@ const char* GCCause::to_string(GCCause::Cause cause) { + case _allocation_failure: + return "Allocation Failure"; + ++ case _change_max_heap: ++ return "Change Max Heap"; ++ + case _tenured_generation_full: + return "Tenured Generation Full"; + +diff --git a/hotspot/src/share/vm/gc_interface/gcCause.hpp b/hotspot/src/share/vm/gc_interface/gcCause.hpp +index 384d868f3..bb3dadbbd 100644 +--- a/hotspot/src/share/vm/gc_interface/gcCause.hpp ++++ b/hotspot/src/share/vm/gc_interface/gcCause.hpp +@@ -55,6 +55,7 @@ class GCCause : public AllStatic { + _no_gc, + _no_cause_specified, + _allocation_failure, ++ _change_max_heap, + + /* implementation specific */ + +diff --git a/hotspot/src/share/vm/memory/collectorPolicy.cpp b/hotspot/src/share/vm/memory/collectorPolicy.cpp +index 727408d52..1010ffb5b 100644 +--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp ++++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp +@@ -56,7 +56,8 @@ CollectorPolicy::CollectorPolicy() : + _max_heap_size_cmdline(false), + _size_policy(NULL), + _should_clear_all_soft_refs(false), +- _all_soft_refs_clear(false) ++ _all_soft_refs_clear(false), ++ _max_heap_byte_size_limit(DynamicMaxHeapSizeLimit) + {} + + #ifdef ASSERT +diff --git a/hotspot/src/share/vm/memory/collectorPolicy.hpp b/hotspot/src/share/vm/memory/collectorPolicy.hpp +index c924c2e1d..fec3bab8d 100644 +--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp ++++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp +@@ -72,6 +72,9 @@ class CollectorPolicy : public CHeapObj { + size_t _max_heap_byte_size; + size_t _min_heap_byte_size; + ++ // Dynamic Max Heap ++ size_t _max_heap_byte_size_limit; ++ + size_t _space_alignment; + size_t _heap_alignment; + +@@ -112,6 +115,9 @@ class CollectorPolicy : public CHeapObj { + size_t max_heap_byte_size() { return _max_heap_byte_size; } + size_t min_heap_byte_size() { return _min_heap_byte_size; } + ++ // Dynamic Max Heap ++ size_t max_heap_byte_size_limit() { return _max_heap_byte_size_limit; } ++ + enum Name { + CollectorPolicyKind, + TwoGenerationCollectorPolicyKind, +diff --git a/hotspot/src/share/vm/memory/genCollectedHeap.hpp b/hotspot/src/share/vm/memory/genCollectedHeap.hpp +index 9e5405e28..c802ee919 100644 +--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp ++++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp +@@ -550,6 +550,11 @@ protected: + + public: + void stop(); ++ // Dynamic Max Heap ++ bool change_max_heap(size_t new_size) { ++ // Not Implement yet ++ return false; ++ } + }; + + #endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP +diff --git a/hotspot/src/share/vm/memory/referencePolicy.cpp b/hotspot/src/share/vm/memory/referencePolicy.cpp +index a667332a7..abf02b704 100644 +--- a/hotspot/src/share/vm/memory/referencePolicy.cpp ++++ b/hotspot/src/share/vm/memory/referencePolicy.cpp +@@ -63,6 +63,9 @@ LRUMaxHeapPolicy::LRUMaxHeapPolicy() { + // Capture state (of-the-VM) information needed to evaluate the policy + void LRUMaxHeapPolicy::setup() { + size_t max_heap = MaxHeapSize; ++ if (Universe::is_dynamic_max_heap_enable()) { ++ max_heap = Universe::heap()->current_max_heap_size(); ++ } + max_heap -= Universe::get_heap_used_at_last_gc(); + max_heap /= M; + +diff --git a/hotspot/src/share/vm/memory/universe.cpp b/hotspot/src/share/vm/memory/universe.cpp +index d76a10d14..8641d490f 100644 +--- a/hotspot/src/share/vm/memory/universe.cpp ++++ b/hotspot/src/share/vm/memory/universe.cpp +@@ -157,6 +157,7 @@ CollectedHeap* Universe::_collectedHeap = NULL; + NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true }; + NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true }; + address Universe::_narrow_ptrs_base; ++bool Universe::_enable_dynamic_max_heap = false; + + void Universe::basic_type_classes_do(void f(Klass*)) { + f(boolArrayKlassObj()); +diff --git a/hotspot/src/share/vm/memory/universe.hpp b/hotspot/src/share/vm/memory/universe.hpp +index 88ad002fa..3a31afd99 100644 +--- a/hotspot/src/share/vm/memory/universe.hpp ++++ b/hotspot/src/share/vm/memory/universe.hpp +@@ -251,6 +251,9 @@ class Universe: AllStatic { + + static void compute_verify_oop_data(); + ++ // Dynamic Max Heap ++ static bool _enable_dynamic_max_heap; ++ + public: + // Known classes in the VM + static Klass* boolArrayKlassObj() { return _boolArrayKlassObj; } +@@ -484,6 +487,10 @@ class Universe: AllStatic { + + // Compiler support + static int base_vtable_size() { return _base_vtable_size; } ++ ++ // Dynamic Max Heap ++ static bool is_dynamic_max_heap_enable() {return _enable_dynamic_max_heap; } ++ static void set_dynamic_max_heap_enable(bool a) { _enable_dynamic_max_heap = a; } + }; + + class DeferredObjAllocEvent : public CHeapObj { +diff --git a/hotspot/src/share/vm/prims/wbtestmethods/parserTests.cpp b/hotspot/src/share/vm/prims/wbtestmethods/parserTests.cpp +index da3493cf3..ff0852eb4 100644 +--- a/hotspot/src/share/vm/prims/wbtestmethods/parserTests.cpp ++++ b/hotspot/src/share/vm/prims/wbtestmethods/parserTests.cpp +@@ -70,38 +70,63 @@ static void fill_in_parser(DCmdParser* parser, oop argument) + const char* desc = WhiteBox::lookup_jstring("desc", argument); + const char* default_value = WhiteBox::lookup_jstring("defaultValue", argument); + bool mandatory = WhiteBox::lookup_bool("mandatory", argument); ++ bool isarg = WhiteBox::lookup_bool("argument", argument); + const char* type = lookup_diagnosticArgumentEnum("type", argument); + + if (strcmp(type, "STRING") == 0) { + DCmdArgument* argument = new DCmdArgument( + name, desc, + "STRING", mandatory, default_value); +- parser->add_dcmd_option(argument); ++ if (isarg) { ++ parser->add_dcmd_argument(argument); ++ } else { ++ parser->add_dcmd_option(argument); ++ } + } else if (strcmp(type, "NANOTIME") == 0) { + DCmdArgument* argument = new DCmdArgument( + name, desc, + "NANOTIME", mandatory, default_value); +- parser->add_dcmd_option(argument); ++ if (isarg) { ++ parser->add_dcmd_argument(argument); ++ } else { ++ parser->add_dcmd_option(argument); ++ } + } else if (strcmp(type, "JLONG") == 0) { + DCmdArgument* argument = new DCmdArgument( + name, desc, + "JLONG", mandatory, default_value); +- parser->add_dcmd_option(argument); ++ if (isarg) { ++ parser->add_dcmd_argument(argument); ++ } else { ++ parser->add_dcmd_option(argument); ++ } + } else if (strcmp(type, "BOOLEAN") == 0) { + DCmdArgument* argument = new DCmdArgument( + name, desc, + "BOOLEAN", mandatory, default_value); +- parser->add_dcmd_option(argument); ++ if (isarg) { ++ parser->add_dcmd_argument(argument); ++ } else { ++ parser->add_dcmd_option(argument); ++ } + } else if (strcmp(type, "MEMORYSIZE") == 0) { + DCmdArgument* argument = new DCmdArgument( + name, desc, + "MEMORY SIZE", mandatory, default_value); +- parser->add_dcmd_option(argument); ++ if (isarg) { ++ parser->add_dcmd_argument(argument); ++ } else { ++ parser->add_dcmd_option(argument); ++ } + } else if (strcmp(type, "STRINGARRAY") == 0) { + DCmdArgument* argument = new DCmdArgument( + name, desc, + "STRING SET", mandatory); +- parser->add_dcmd_option(argument); ++ if (isarg) { ++ parser->add_dcmd_argument(argument); ++ } else { ++ parser->add_dcmd_option(argument); ++ } + } + } + +@@ -111,11 +136,12 @@ static void fill_in_parser(DCmdParser* parser, oop argument) + * { name, value, name, value ... } + * This can then be checked from java. + */ +-WB_ENTRY(jobjectArray, WB_ParseCommandLine(JNIEnv* env, jobject o, jstring j_cmdline, jobjectArray arguments)) ++WB_ENTRY(jobjectArray, WB_ParseCommandLine(JNIEnv* env, jobject o, jstring j_cmdline, jchar j_delim, jobjectArray arguments)) + ResourceMark rm; + DCmdParser parser; + + const char* c_cmdline = java_lang_String::as_utf8_string(JNIHandles::resolve(j_cmdline)); ++ const char c_delim = j_delim & 0xff; + objArrayOop argumentArray = objArrayOop(JNIHandles::resolve_non_null(arguments)); + objArrayHandle argumentArray_ah(THREAD, argumentArray); + +@@ -127,20 +153,29 @@ WB_ENTRY(jobjectArray, WB_ParseCommandLine(JNIEnv* env, jobject o, jstring j_cmd + } + + CmdLine cmdline(c_cmdline, strlen(c_cmdline), true); +- parser.parse(&cmdline,',',CHECK_NULL); ++ parser.parse(&cmdline, c_delim, CHECK_NULL); + + Klass* k = SystemDictionary::Object_klass(); + objArrayOop returnvalue_array = oopFactory::new_objArray(k, parser.num_arguments() * 2, CHECK_NULL); + objArrayHandle returnvalue_array_ah(THREAD, returnvalue_array); + + GrowableArray*parsedArgNames = parser.argument_name_array(); ++ GenDCmdArgument* arglist = parser.arguments_list(); + + for (int i = 0; i < parser.num_arguments(); i++) { + oop parsedName = java_lang_String::create_oop_from_str(parsedArgNames->at(i), CHECK_NULL); + returnvalue_array_ah->obj_at_put(i*2, parsedName); + GenDCmdArgument* arg = parser.lookup_dcmd_option(parsedArgNames->at(i), strlen(parsedArgNames->at(i))); ++ if (!arg) { ++ arg = arglist; ++ arglist = arglist->next(); ++ } + char buf[VALUE_MAXLEN]; +- arg->value_as_str(buf, sizeof(buf)); ++ if (arg) { ++ arg->value_as_str(buf, sizeof(buf)); ++ } else { ++ sprintf(buf, ""); ++ } + oop parsedValue = java_lang_String::create_oop_from_str(buf, CHECK_NULL); + returnvalue_array_ah->obj_at_put(i*2+1, parsedValue); + } +diff --git a/hotspot/src/share/vm/prims/wbtestmethods/parserTests.hpp b/hotspot/src/share/vm/prims/wbtestmethods/parserTests.hpp +index a6ff1bd98..e791225a4 100644 +--- a/hotspot/src/share/vm/prims/wbtestmethods/parserTests.hpp ++++ b/hotspot/src/share/vm/prims/wbtestmethods/parserTests.hpp +@@ -27,6 +27,6 @@ + #include "prims/jni.h" + #include "prims/whitebox.hpp" + +-WB_METHOD_DECLARE(jobjectArray) WB_ParseCommandLine(JNIEnv* env, jobject o, jstring args, jobjectArray arguments); ++WB_METHOD_DECLARE(jobjectArray) WB_ParseCommandLine(JNIEnv* env, jobject o, jstring args, jchar delim, jobjectArray arguments); + + #endif //SHARE_VM_PRIMS_WBTESTMETHODS_PARSERTESTS_H +diff --git a/hotspot/src/share/vm/prims/whitebox.cpp b/hotspot/src/share/vm/prims/whitebox.cpp +index 16af34e34..ccc1bd62c 100644 +--- a/hotspot/src/share/vm/prims/whitebox.cpp ++++ b/hotspot/src/share/vm/prims/whitebox.cpp +@@ -1307,7 +1307,7 @@ static JNINativeMethod methods[] = { + {CC"getLookupCacheMatches", CC"(Ljava/lang/ClassLoader;Ljava/lang/String;)[I", + (void*)&WB_GetLookupCacheMatches}, + {CC"parseCommandLine", +- CC"(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;", ++ CC"(Ljava/lang/String;C[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;", + (void*) &WB_ParseCommandLine + }, + {CC"addToBootstrapClassLoaderSearch", CC"(Ljava/lang/String;)V", +diff --git a/hotspot/src/share/vm/runtime/arguments.cpp b/hotspot/src/share/vm/runtime/arguments.cpp +index a50aa1866..dfd846777 100644 +--- a/hotspot/src/share/vm/runtime/arguments.cpp ++++ b/hotspot/src/share/vm/runtime/arguments.cpp +@@ -27,6 +27,7 @@ + #include "classfile/javaAssertions.hpp" + #include "classfile/symbolTable.hpp" + #include "compiler/compilerOracle.hpp" ++#include "gc_implementation/shared/dynamicMaxHeap.hpp" + #include "memory/allocation.inline.hpp" + #include "memory/cardTableRS.hpp" + #include "memory/filemap.hpp" +@@ -1635,6 +1636,18 @@ void Arguments::set_use_compressed_oops() { + // the only value that can override MaxHeapSize if we are + // to use UseCompressedOops is InitialHeapSize. + size_t max_heap_size = MAX2(MaxHeapSize, InitialHeapSize); ++ // DynamicMaxHeap ++ // 1. align DynamicMaxHeapSizeLimit ++ // 2. use DynamicMaxHeapSizeLimit to check whether compressedOops can enabled ++ bool dynamic_max_heap_enable = DynamicMaxHeapChecker::check_dynamic_max_heap_size_limit(); ++ if (dynamic_max_heap_enable) { ++ Universe::set_dynamic_max_heap_enable(true); ++ DynamicMaxHeapConfig::set_initial_max_heap_size((size_t)MaxHeapSize); ++ size_t _heap_alignment = CollectorPolicy::compute_heap_alignment(); ++ uintx aligned_max_heap_size_limit = align_size_up(DynamicMaxHeapSizeLimit, _heap_alignment); ++ FLAG_SET_ERGO(uintx, DynamicMaxHeapSizeLimit, aligned_max_heap_size_limit); ++ max_heap_size = MAX2(max_heap_size, DynamicMaxHeapSizeLimit); ++ } + if (max_heap_size <= max_heap_for_compressed_oops()) { + #if !defined(COMPILER1) || defined(TIERED) + if (FLAG_IS_DEFAULT(UseCompressedOops)) { +@@ -1864,6 +1877,7 @@ static bool verify_serial_gc_flags() { + + void Arguments::set_gc_specific_flags() { + #if INCLUDE_ALL_GCS ++ DynamicMaxHeapChecker::common_check(); + // Set per-collector flags + if (UseParallelGC || UseParallelOldGC) { + set_parallel_gc_flags(); +@@ -4668,4 +4682,4 @@ jint Arguments::init_aggressive_cds_properties() { + } + return JNI_OK; + } +-#endif // INCLUDE_AGGRESSIVE_CDS +\ No newline at end of file ++#endif // INCLUDE_AGGRESSIVE_CDS +diff --git a/hotspot/src/share/vm/runtime/globals.hpp b/hotspot/src/share/vm/runtime/globals.hpp +index b3c2f5af6..25c10134f 100644 +--- a/hotspot/src/share/vm/runtime/globals.hpp ++++ b/hotspot/src/share/vm/runtime/globals.hpp +@@ -3301,6 +3301,12 @@ class CommandLineFlags { + product(uintx, MaxHeapSize, ScaleForWordSize(96*M), \ + "Maximum heap size (in bytes)") \ + \ ++ product(uintx, DynamicMaxHeapSizeLimit, ScaleForWordSize(96*M), \ ++ "The limit of Dynamic maximum heap size (in bytes)") \ ++ \ ++ product(bool, TraceDynamicMaxHeap, false, \ ++ "Trace Dynamic Max Heap resizing log and cause of failure") \ ++ \ + product(uintx, OldSize, ScaleForWordSize(4*M), \ + "Initial tenured generation size (in bytes)") \ + \ +diff --git a/hotspot/src/share/vm/runtime/os.cpp b/hotspot/src/share/vm/runtime/os.cpp +index 36138e1f2..bea6b1e11 100644 +--- a/hotspot/src/share/vm/runtime/os.cpp ++++ b/hotspot/src/share/vm/runtime/os.cpp +@@ -370,6 +370,7 @@ void os::init_before_ergo() { + // global variables + extern char** argv_for_execvp; + JavaThread::os_linux_aarch64_options(active_processor_count(), argv_for_execvp); ++ os::Linux::load_ACC_library_before_ergo(); + #endif + initialize_initial_active_processor_count(); + // We need to initialize large page support here because ergonomics takes some +diff --git a/hotspot/src/share/vm/runtime/thread.cpp b/hotspot/src/share/vm/runtime/thread.cpp +index a5758734b..3f1b4444e 100644 +--- a/hotspot/src/share/vm/runtime/thread.cpp ++++ b/hotspot/src/share/vm/runtime/thread.cpp +@@ -86,6 +86,7 @@ + #include "utilities/events.hpp" + #include "utilities/preserveException.hpp" + #include "utilities/macros.hpp" ++#include "gc_implementation/shared/dynamicMaxHeap.hpp" + #ifdef TARGET_OS_FAMILY_linux + # include "os_linux.inline.hpp" + #endif +@@ -3764,6 +3765,16 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { + #ifdef ASSERT + _vm_complete = true; + #endif ++ ++ // Dynamic Max Heap: reset heap initial size to MaxHeapSize ++ if (Universe::is_dynamic_max_heap_enable()) { ++ bool success = Universe::heap()->change_max_heap(MaxHeapSize); ++ if (!success) { ++ jio_fprintf(defaultStream::error_stream(), ++ "VM failed to initialize heap to Xmx " SIZE_FORMAT "K", (MaxHeapSize / K)); ++ vm_exit(1); ++ } ++ } + return JNI_OK; + } + +diff --git a/hotspot/src/share/vm/runtime/vm_operations.hpp b/hotspot/src/share/vm/runtime/vm_operations.hpp +index f2071a9d6..4a4077a66 100644 +--- a/hotspot/src/share/vm/runtime/vm_operations.hpp ++++ b/hotspot/src/share/vm/runtime/vm_operations.hpp +@@ -95,6 +95,7 @@ + template(JFRCheckpoint) \ + template(Exit) \ + template(LinuxDllLoad) \ ++ template(DynamicMaxHeap) \ + template(RotateGCLog) \ + template(WhiteBoxOperation) \ + template(ClassLoaderStatsOperation) \ +diff --git a/hotspot/src/share/vm/services/diagnosticCommand.cpp b/hotspot/src/share/vm/services/diagnosticCommand.cpp +index f8f6ad546..ee3fc2ccd 100644 +--- a/hotspot/src/share/vm/services/diagnosticCommand.cpp ++++ b/hotspot/src/share/vm/services/diagnosticCommand.cpp +@@ -63,6 +63,7 @@ void DCmdRegistrant::register_dcmds(){ + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); ++ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + #if INCLUDE_SERVICES // Heap dumping/inspection supported + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(DCmd_Source_Internal | DCmd_Source_AttachAPI, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(DCmd_Source_Internal | DCmd_Source_AttachAPI, true, false)); +@@ -361,6 +362,52 @@ void FinalizerInfoDCmd::execute(DCmdSource source, TRAPS) { + } + } + ++ChangeMaxHeapDCmd::ChangeMaxHeapDCmd(outputStream* output, bool heap) : ++ DCmdWithParser(output, heap), ++ _new_max_heap_size("change_max_heap", "New max size of heap", "MEMORY SIZE", true) { ++ _dcmdparser.add_dcmd_argument(&_new_max_heap_size); ++} ++ ++int ChangeMaxHeapDCmd::num_arguments() { ++ ResourceMark rm; ++ ChangeMaxHeapDCmd* dcmd = new ChangeMaxHeapDCmd(NULL, false); ++ if (dcmd != NULL) { ++ DCmdMark mark(dcmd); ++ return dcmd->_dcmdparser.num_arguments(); ++ } else { ++ return 0; ++ } ++} ++ ++void ChangeMaxHeapDCmd::execute(DCmdSource source, TRAPS) { ++ if (!Universe::is_dynamic_max_heap_enable()) { ++ output()->print_cr("not supported because -XX:DynamicMaxHeapSizeLimit was not specified"); ++ return; ++ } ++ jlong input_max_heap_size = _new_max_heap_size.value()._size; ++ size_t heap_alignment = Universe::heap()->collector_policy()->heap_alignment(); ++ jlong new_max_heap_size = align_size_up((size_t)input_max_heap_size, heap_alignment); ++ output()->print_cr("align the given value " SIZE_FORMAT " up to "SIZE_FORMAT "K for heap alignment "SIZE_FORMAT "K", ++ input_max_heap_size, ++ (new_max_heap_size / K), ++ (heap_alignment / K)); ++ bool is_validate = Universe::heap()->check_new_max_heap_validity(new_max_heap_size, output()); ++ if (!is_validate) { ++ output()->print_cr("GC.change_max_heap fail"); ++ return; ++ } ++ output()->print_cr("GC.change_max_heap (" SIZE_FORMAT "K" "->" SIZE_FORMAT "K)(" SIZE_FORMAT "K)", ++ (Universe::heap()->current_max_heap_size() / K), ++ (new_max_heap_size / K), ++ (Universe::heap()->collector_policy()->max_heap_byte_size_limit() / K)); ++ bool success = Universe::heap()->change_max_heap(new_max_heap_size); ++ if (success) { ++ output()->print_cr("GC.change_max_heap success"); ++ } else { ++ output()->print_cr("GC.change_max_heap fail"); ++ } ++} ++ + #if INCLUDE_SERVICES // Heap dumping/inspection supported + HeapDumpDCmd::HeapDumpDCmd(outputStream* output, bool heap) : + DCmdWithParser(output, heap), +@@ -874,4 +921,4 @@ void CodeCacheDCmd::execute(DCmdSource source, TRAPS) { + void PerfMapDCmd::execute(DCmdSource source, TRAPS) { + CodeCache::write_perf_map(); + } +-#endif // LINUX +\ No newline at end of file ++#endif // LINUX +diff --git a/hotspot/src/share/vm/services/diagnosticCommand.hpp b/hotspot/src/share/vm/services/diagnosticCommand.hpp +index c89933f96..34c81e53c 100644 +--- a/hotspot/src/share/vm/services/diagnosticCommand.hpp ++++ b/hotspot/src/share/vm/services/diagnosticCommand.hpp +@@ -240,6 +240,27 @@ public: + virtual void execute(DCmdSource source, TRAPS); + }; + ++class ChangeMaxHeapDCmd : public DCmdWithParser { ++public: ++ ChangeMaxHeapDCmd(outputStream* output, bool heap); ++ static const char* name() { return "GC.change_max_heap"; } ++ static const char* description() { ++ return "Change dynamic max heap size during runtime."; ++ } ++ static const char* impact() { ++ return "Medium"; ++ } ++ static int num_arguments(); ++ static const JavaPermission permission() { ++ JavaPermission p = {"java.lang.management.ManagementPermission", ++ "monitor", NULL}; ++ return p; ++ } ++ virtual void execute(DCmdSource source, TRAPS); ++protected: ++ DCmdArgument _new_max_heap_size; ++}; ++ + #if INCLUDE_SERVICES // Heap dumping supported + // See also: dump_heap in attachListener.cpp + class HeapDumpDCmd : public DCmdWithParser { +@@ -286,7 +307,7 @@ public: + return p; + } + static int num_arguments() { +- return 0; ++ return 0; + } + virtual void execute(DCmdSource source, TRAPS); + }; +diff --git a/hotspot/src/share/vm/services/diagnosticFramework.cpp b/hotspot/src/share/vm/services/diagnosticFramework.cpp +index dcb67d36c..870e6405c 100644 +--- a/hotspot/src/share/vm/services/diagnosticFramework.cpp ++++ b/hotspot/src/share/vm/services/diagnosticFramework.cpp +@@ -60,16 +60,15 @@ CmdLine::CmdLine(const char* line, size_t len, bool no_command_name) { + + bool DCmdArgIter::next(TRAPS) { + if (_len == 0) return false; +- // skipping spaces ++ // skipping delimiters + while (_cursor < _len - 1 && _buffer[_cursor] == _delim) { + _cursor++; + } + // handling end of command line +- if (_cursor >= _len - 1) { +- _cursor = _len - 1; +- _key_addr = &_buffer[_len - 1]; ++ if (_cursor == _len - 1 && _buffer[_cursor] == _delim) { ++ _key_addr = &_buffer[_cursor]; + _key_len = 0; +- _value_addr = &_buffer[_len - 1]; ++ _value_addr = &_buffer[_cursor]; + _value_len = 0; + return false; + } +diff --git a/hotspot/test/serviceability/ParserTest.java b/hotspot/test/serviceability/ParserTest.java +index 8db151f4d..0cb454d8b 100644 +--- a/hotspot/test/serviceability/ParserTest.java ++++ b/hotspot/test/serviceability/ParserTest.java +@@ -47,6 +47,7 @@ public class ParserTest { + testBool(); + testQuotes(); + testMemorySize(); ++ testSingleLetterArg(); + } + + public static void main(String... args) throws Exception { +@@ -98,7 +99,7 @@ public class ParserTest { + false, "0"); + DiagnosticCommand[] args = {arg}; + +- wb.parseCommandLine(name + "=10", args); ++ wb.parseCommandLine(name + "=10", ',', args); + parse(name, "10", name + "=10", args); + parse(name, "-5", name + "=-5", args); + +@@ -148,6 +149,15 @@ public class ParserTest { + parse(name, "Recording 1", "\"" + name + "\"" + "=\"Recording 1\",arg=value", args); + } + ++ public void testSingleLetterArg() throws Exception { ++ DiagnosticCommand[] args = new DiagnosticCommand[] { ++ new DiagnosticCommand("flag", "desc", DiagnosticArgumentType.STRING, true, false, null), ++ new DiagnosticCommand("value", "desc", DiagnosticArgumentType.STRING, true, false, null) ++ }; ++ parse("flag", "flag", "flag v", ' ', args); ++ parse("value", "v", "flag v", ' ', args); ++ } ++ + public void testMemorySize() throws Exception { + String name = "name"; + String defaultValue = "1024"; +@@ -175,9 +185,13 @@ public class ParserTest { + + public void parse(String searchName, String expectedValue, + String cmdLine, DiagnosticCommand[] argumentTypes) throws Exception { ++ parse(searchName, expectedValue, cmdLine, ',', argumentTypes); ++ } ++ public void parse(String searchName, String expectedValue, ++ String cmdLine, char delim, DiagnosticCommand[] argumentTypes) throws Exception { + //parseCommandLine will return an object array that looks like + //{, ... } +- Object[] res = wb.parseCommandLine(cmdLine, argumentTypes); ++ Object[] res = wb.parseCommandLine(cmdLine, delim, argumentTypes); + for (int i = 0; i < res.length-1; i+=2) { + String parsedName = (String) res[i]; + if (searchName.equals(parsedName)) { +@@ -195,8 +209,11 @@ public class ParserTest { + } + + private void shouldFail(String argument, DiagnosticCommand[] argumentTypes) throws Exception { ++ shouldFail(argument, ',', argumentTypes); ++ } ++ private void shouldFail(String argument, char delim, DiagnosticCommand[] argumentTypes) throws Exception { + try { +- wb.parseCommandLine(argument, argumentTypes); ++ wb.parseCommandLine(argument, delim, argumentTypes); + throw new Exception("Parser accepted argument: " + argument); + } catch (IllegalArgumentException e) { + //expected +diff --git a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java +index 6e617516b..13721fe0f 100644 +--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java ++++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java +@@ -112,7 +112,7 @@ public class WhiteBox { + public native int g1ActiveMemoryNodeCount(); + public native int[] g1MemoryNodeIds(); + public native MemoryUsage g1AuxiliaryMemoryUsage(); +- public native Object[] parseCommandLine(String commandline, DiagnosticCommand[] args); ++ public native Object[] parseCommandLine(String commandline, char delim, DiagnosticCommand[] args); + + // NMT + public native long NMTMalloc(long size); +diff --git a/hotspot/test/testlibrary/whitebox/sun/hotspot/parser/DiagnosticCommand.java b/hotspot/test/testlibrary/whitebox/sun/hotspot/parser/DiagnosticCommand.java +index ad4ebcc73..11a0c2b4e 100644 +--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/parser/DiagnosticCommand.java ++++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/parser/DiagnosticCommand.java +@@ -34,14 +34,21 @@ public class DiagnosticCommand { + private DiagnosticArgumentType type; + private boolean mandatory; + private String defaultValue; ++ private boolean argument; + + public DiagnosticCommand(String name, String desc, DiagnosticArgumentType type, + boolean mandatory, String defaultValue) { ++ this(name, desc, type, false, mandatory, defaultValue); ++ } ++ ++ public DiagnosticCommand(String name, String desc, DiagnosticArgumentType type, ++ boolean argument, boolean mandatory, String defaultValue) { + this.name = name; + this.desc = desc; + this.type = type; + this.mandatory = mandatory; + this.defaultValue = defaultValue; ++ this.argument = argument; + } + + public String getName() { +@@ -60,6 +67,10 @@ public class DiagnosticCommand { + return mandatory; + } + ++ public boolean isArgument() { ++ return argument; ++ } ++ + public String getDefaultValue() { + return defaultValue; + } +diff --git a/test/lib/sun/hotspot/WhiteBox.java b/test/lib/sun/hotspot/WhiteBox.java +index a6d773bc8..3bd8cc8cd 100644 +--- a/test/lib/sun/hotspot/WhiteBox.java ++++ b/test/lib/sun/hotspot/WhiteBox.java +@@ -140,7 +140,7 @@ public class WhiteBox { + public native long g1NumFreeRegions(); + public native int g1RegionSize(); + public native MemoryUsage g1AuxiliaryMemoryUsage(); +- public native Object[] parseCommandLine(String commandline, DiagnosticCommand[] args); ++ public native Object[] parseCommandLine(String commandline, char delim, DiagnosticCommand[] args); + public native int g1ActiveMemoryNodeCount(); + public native int[] g1MemoryNodeIds(); + +-- +2.47.1.windows.1 + diff --git a/Add-JitProfileCache-feature.patch b/Add-JitProfileCache-feature.patch new file mode 100644 index 0000000000000000000000000000000000000000..0233e28a6aad20a1c0dee3a470144d6baef011ad --- /dev/null +++ b/Add-JitProfileCache-feature.patch @@ -0,0 +1,5833 @@ +From 6693fe275f771ec189501ec548d22152cbd82db7 Mon Sep 17 00:00:00 2001 +Date: Fri, 6 Jun 2025 11:58:43 +0800 +Subject: [PATCH 4/4] Add JitProfileCache feature + +--- + hotspot/make/aix/makefiles/mapfile-vers-debug | 3 + + .../make/aix/makefiles/mapfile-vers-product | 3 + + hotspot/make/bsd/makefiles/mapfile-vers-debug | 3 + + .../make/bsd/makefiles/mapfile-vers-product | 3 + + .../make/linux/makefiles/mapfile-vers-debug | 3 + + .../make/linux/makefiles/mapfile-vers-product | 3 + + hotspot/make/solaris/makefiles/mapfile-vers | 3 + + .../vm/templateInterpreter_aarch64.cpp | 17 + + hotspot/src/os/linux/vm/os_linux.cpp | 15 + + hotspot/src/os/linux/vm/os_linux.hpp | 25 + + hotspot/src/share/vm/ci/ciEnv.cpp | 57 + + hotspot/src/share/vm/ci/ciEnv.hpp | 5 + + hotspot/src/share/vm/ci/ciMethod.cpp | 9 +- + .../share/vm/classfile/classFileParser.cpp | 15 + + .../share/vm/classfile/classLoaderData.cpp | 8 + + .../share/vm/classfile/systemDictionary.cpp | 57 +- + .../share/vm/classfile/systemDictionary.hpp | 3 + + hotspot/src/share/vm/classfile/vmSymbols.hpp | 5 + + hotspot/src/share/vm/code/nmethod.cpp | 5 + + .../src/share/vm/compiler/compileBroker.cpp | 5 + + .../src/share/vm/compiler/compileBroker.hpp | 3 + + .../vm/jprofilecache/jitProfileCache.cpp | 1247 +++++++++++++++++ + .../vm/jprofilecache/jitProfileCache.hpp | 568 ++++++++ + .../vm/jprofilecache/jitProfileCacheDcmds.cpp | 128 ++ + .../vm/jprofilecache/jitProfileCacheDcmds.hpp | 52 + + .../vm/jprofilecache/jitProfileCacheLog.cpp | 26 + + .../vm/jprofilecache/jitProfileCacheLog.hpp | 89 ++ + .../jitProfileCacheLogParser.cpp | 399 ++++++ + .../jitProfileCacheLogParser.hpp | 88 ++ + .../jprofilecache/jitProfileCacheThread.cpp | 79 ++ + .../jprofilecache/jitProfileCacheThread.hpp | 55 + + .../vm/jprofilecache/jitProfileRecord.cpp | 597 ++++++++ + .../vm/jprofilecache/jitProfileRecord.hpp | 282 ++++ + hotspot/src/share/vm/libadt/dict.cpp | 23 + + hotspot/src/share/vm/libadt/dict.hpp | 1 + + hotspot/src/share/vm/oops/constantPool.cpp | 126 +- + hotspot/src/share/vm/oops/constantPool.hpp | 30 + + hotspot/src/share/vm/oops/instanceKlass.cpp | 19 + + hotspot/src/share/vm/oops/instanceKlass.hpp | 32 + + hotspot/src/share/vm/oops/method.cpp | 8 + + hotspot/src/share/vm/oops/method.hpp | 21 + + hotspot/src/share/vm/oops/methodData.hpp | 3 + + hotspot/src/share/vm/opto/callGenerator.cpp | 3 +- + hotspot/src/share/vm/opto/compile.cpp | 8 + + hotspot/src/share/vm/opto/graphKit.cpp | 13 +- + hotspot/src/share/vm/opto/lcm.cpp | 3 + + hotspot/src/share/vm/prims/jvm.cpp | 66 + + hotspot/src/share/vm/prims/jvm.h | 9 + + hotspot/src/share/vm/runtime/globals.hpp | 46 +- + hotspot/src/share/vm/runtime/init.cpp | 19 + + hotspot/src/share/vm/runtime/mutexLocker.cpp | 6 + + hotspot/src/share/vm/runtime/mutexLocker.hpp | 3 + + hotspot/src/share/vm/runtime/safepoint.cpp | 10 + + hotspot/src/share/vm/runtime/thread.cpp | 15 + + hotspot/src/share/vm/runtime/thread.hpp | 12 + + .../share/vm/services/diagnosticCommand.cpp | 2 + + hotspot/src/share/vm/utilities/hashtable.cpp | 5 + + hotspot/src/share/vm/utilities/ostream.cpp | 27 + + hotspot/src/share/vm/utilities/ostream.hpp | 14 +- + .../share/vm/utilities/symbolRegexMatcher.cpp | 103 ++ + .../share/vm/utilities/symbolRegexMatcher.hpp | 69 + + jdk/make/CreateJars.gmk | 1 + + jdk/make/data/classlist/classlist.linux | 1 + + jdk/make/lib/CoreLibraries.gmk | 1 + + jdk/make/mapfiles/libjava/mapfile-linux | 2 + + jdk/make/mapfiles/libjava/mapfile-vers | 2 + + jdk/make/mapfiles/libjava/reorder-sparc | 1 + + jdk/make/mapfiles/libjava/reorder-sparcv9 | 1 + + jdk/make/mapfiles/libjava/reorder-x86 | 1 + + .../huawei/jprofilecache/JProfileCache.java | 71 + + jdk/src/share/javavm/export/jvm.h | 9 + + .../com/huawei/jprofilecache/JProfileCache.c | 40 + + 72 files changed, 4674 insertions(+), 12 deletions(-) + create mode 100644 hotspot/src/share/vm/jprofilecache/jitProfileCache.cpp + create mode 100644 hotspot/src/share/vm/jprofilecache/jitProfileCache.hpp + create mode 100644 hotspot/src/share/vm/jprofilecache/jitProfileCacheDcmds.cpp + create mode 100644 hotspot/src/share/vm/jprofilecache/jitProfileCacheDcmds.hpp + create mode 100644 hotspot/src/share/vm/jprofilecache/jitProfileCacheLog.cpp + create mode 100644 hotspot/src/share/vm/jprofilecache/jitProfileCacheLog.hpp + create mode 100644 hotspot/src/share/vm/jprofilecache/jitProfileCacheLogParser.cpp + create mode 100644 hotspot/src/share/vm/jprofilecache/jitProfileCacheLogParser.hpp + create mode 100644 hotspot/src/share/vm/jprofilecache/jitProfileCacheThread.cpp + create mode 100644 hotspot/src/share/vm/jprofilecache/jitProfileCacheThread.hpp + create mode 100644 hotspot/src/share/vm/jprofilecache/jitProfileRecord.cpp + create mode 100644 hotspot/src/share/vm/jprofilecache/jitProfileRecord.hpp + create mode 100644 hotspot/src/share/vm/utilities/symbolRegexMatcher.cpp + create mode 100644 hotspot/src/share/vm/utilities/symbolRegexMatcher.hpp + create mode 100644 jdk/src/share/classes/com/huawei/jprofilecache/JProfileCache.java + create mode 100644 jdk/src/share/native/com/huawei/jprofilecache/JProfileCache.c + +diff --git a/hotspot/make/aix/makefiles/mapfile-vers-debug b/hotspot/make/aix/makefiles/mapfile-vers-debug +index 127794ce0..523431e27 100644 +--- a/hotspot/make/aix/makefiles/mapfile-vers-debug ++++ b/hotspot/make/aix/makefiles/mapfile-vers-debug +@@ -163,6 +163,9 @@ SUNWprivate_1.1 { + JVM_GetStackAccessControlContext; + JVM_GetStackTraceDepth; + JVM_GetStackTraceElement; ++ JVM_TriggerPrecompilation; ++ JVM_CheckJProfileCacheCompilationIsComplete; ++ JVM_NotifyJVMDeoptProfileCacheMethods; + JVM_GetSystemPackage; + JVM_GetSystemPackages; + JVM_GetTemporaryDirectory; +diff --git a/hotspot/make/aix/makefiles/mapfile-vers-product b/hotspot/make/aix/makefiles/mapfile-vers-product +index 2bbfb32e9..89893bc6d 100644 +--- a/hotspot/make/aix/makefiles/mapfile-vers-product ++++ b/hotspot/make/aix/makefiles/mapfile-vers-product +@@ -161,6 +161,9 @@ SUNWprivate_1.1 { + JVM_GetStackAccessControlContext; + JVM_GetStackTraceDepth; + JVM_GetStackTraceElement; ++ JVM_TriggerPrecompilation; ++ JVM_CheckJProfileCacheCompilationIsComplete; ++ JVM_NotifyJVMDeoptProfileCacheMethods; + JVM_GetSystemPackage; + JVM_GetSystemPackages; + JVM_GetTemporaryDirectory; +diff --git a/hotspot/make/bsd/makefiles/mapfile-vers-debug b/hotspot/make/bsd/makefiles/mapfile-vers-debug +index 86af8a1be..ef6f79819 100644 +--- a/hotspot/make/bsd/makefiles/mapfile-vers-debug ++++ b/hotspot/make/bsd/makefiles/mapfile-vers-debug +@@ -161,6 +161,9 @@ + _JVM_GetStackAccessControlContext + _JVM_GetStackTraceDepth + _JVM_GetStackTraceElement ++ _JVM_TriggerPrecompilation ++ _JVM_CheckJProfileCacheCompilationIsComplete ++ _JVM_NotifyJVMDeoptProfileCacheMethods + _JVM_GetSystemPackage + _JVM_GetSystemPackages + _JVM_GetTemporaryDirectory +diff --git a/hotspot/make/bsd/makefiles/mapfile-vers-product b/hotspot/make/bsd/makefiles/mapfile-vers-product +index 69cd3f962..24aff0131 100644 +--- a/hotspot/make/bsd/makefiles/mapfile-vers-product ++++ b/hotspot/make/bsd/makefiles/mapfile-vers-product +@@ -161,6 +161,9 @@ + _JVM_GetStackAccessControlContext + _JVM_GetStackTraceDepth + _JVM_GetStackTraceElement ++ _JVM_TriggerPrecompilation ++ _JVM_CheckJProfileCacheCompilationIsComplete ++ _JVM_NotifyJVMDeoptProfileCacheMethods + _JVM_GetSystemPackage + _JVM_GetSystemPackages + _JVM_GetTemporaryDirectory +diff --git a/hotspot/make/linux/makefiles/mapfile-vers-debug b/hotspot/make/linux/makefiles/mapfile-vers-debug +index b5e0d809a..58f1ed485 100644 +--- a/hotspot/make/linux/makefiles/mapfile-vers-debug ++++ b/hotspot/make/linux/makefiles/mapfile-vers-debug +@@ -164,6 +164,9 @@ SUNWprivate_1.1 { + JVM_GetStackAccessControlContext; + JVM_GetStackTraceDepth; + JVM_GetStackTraceElement; ++ JVM_TriggerPrecompilation; ++ JVM_CheckJProfileCacheCompilationIsComplete; ++ JVM_NotifyJVMDeoptProfileCacheMethods; + JVM_GetSystemPackage; + JVM_GetSystemPackages; + JVM_GetTemporaryDirectory; +diff --git a/hotspot/make/linux/makefiles/mapfile-vers-product b/hotspot/make/linux/makefiles/mapfile-vers-product +index 554db7bdf..713bc9cf0 100644 +--- a/hotspot/make/linux/makefiles/mapfile-vers-product ++++ b/hotspot/make/linux/makefiles/mapfile-vers-product +@@ -164,6 +164,9 @@ SUNWprivate_1.1 { + JVM_GetStackAccessControlContext; + JVM_GetStackTraceDepth; + JVM_GetStackTraceElement; ++ JVM_TriggerPrecompilation; ++ JVM_CheckJProfileCacheCompilationIsComplete; ++ JVM_NotifyJVMDeoptProfileCacheMethods; + JVM_GetSystemPackage; + JVM_GetSystemPackages; + JVM_GetTemporaryDirectory; +diff --git a/hotspot/make/solaris/makefiles/mapfile-vers b/hotspot/make/solaris/makefiles/mapfile-vers +index 41045dd43..1fe2706d1 100644 +--- a/hotspot/make/solaris/makefiles/mapfile-vers ++++ b/hotspot/make/solaris/makefiles/mapfile-vers +@@ -164,6 +164,9 @@ SUNWprivate_1.1 { + JVM_GetStackAccessControlContext; + JVM_GetStackTraceDepth; + JVM_GetStackTraceElement; ++ JVM_TriggerPrecompilation; ++ JVM_CheckJProfileCacheCompilationIsComplete; ++ JVM_NotifyJVMDeoptProfileCacheMethods; + JVM_GetSystemPackage; + JVM_GetSystemPackages; + JVM_GetTemporaryDirectory; +diff --git a/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp +index 6329ff4e5..f4f832081 100644 +--- a/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp ++++ b/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp +@@ -359,6 +359,23 @@ void InterpreterGenerator::generate_counter_incr( + __ addw(r1, r1, 1); + __ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset())); + } ++#ifdef _LP64 ++ if (JProfilingCacheRecording) { ++ Label skip_record; ++ JitProfileCache* jitprofilecache = JitProfileCache::instance(); ++ assert(jitprofilecache != NULL, "jitprofilecache should not be NULL"); ++ __ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset())); ++ __ cmp(r1, 1); ++ __ br(Assembler::HI, skip_record); ++ unsigned long offset; ++ __ adrp(rscratch2, ExternalAddress(jitprofilecache->recorder()->current_init_order_addr()), offset); ++ __ ldrw(r1, Address(rscratch2, offset)); ++ __ strw(r1, Address(rmethod, Method::first_invoke_init_order_offset())); ++ // restore method_counters_offset to rscratch2 ++ __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset())); ++ __ bind(skip_record); ++ } ++#endif + // Update standard invocation counters + __ ldrw(r1, invocation_counter); + __ ldrw(r0, backedge_counter); +diff --git a/hotspot/src/os/linux/vm/os_linux.cpp b/hotspot/src/os/linux/vm/os_linux.cpp +index a6525944a..0daaa2925 100644 +--- a/hotspot/src/os/linux/vm/os_linux.cpp ++++ b/hotspot/src/os/linux/vm/os_linux.cpp +@@ -5548,6 +5548,9 @@ os::Linux::heap_vector_get_next_t os::Linux::_heap_vector_get_next; + os::Linux::heap_vector_free_t os::Linux::_heap_vector_free; + os::Linux::dmh_g1_can_shrink_t os::Linux::_dmh_g1_can_shrink; + os::Linux::dmh_g1_get_region_limit_t os::Linux::_dmh_g1_get_region_limit; ++os::Linux::get_class_state_t os::Linux::_get_class_state; ++os::Linux::handle_skipped_t os::Linux::_handle_skipped; ++os::Linux::handle_ignore_class_t os::Linux::_handle_ignore_class; + + void os::Linux::load_ACC_library_before_ergo() { + _dmh_g1_can_shrink = CAST_TO_FN_PTR(dmh_g1_can_shrink_t, dlsym(RTLD_DEFAULT, "DynamicMaxHeap_G1CanShrink")); +@@ -5579,6 +5582,9 @@ void os::Linux::load_ACC_library() { + _heap_vector_add = CAST_TO_FN_PTR(heap_vector_add_t, dlsym(RTLD_DEFAULT, "HeapVector_Add")); + _heap_vector_get_next = CAST_TO_FN_PTR(heap_vector_get_next_t, dlsym(RTLD_DEFAULT, "HeapVector_GetNext")); + _heap_vector_free= CAST_TO_FN_PTR(heap_vector_free_t, dlsym(RTLD_DEFAULT, "HeapVector_Free")); ++ _get_class_state= CAST_TO_FN_PTR(get_class_state_t, dlsym(RTLD_DEFAULT, "Get_Class_State")); ++ _handle_skipped= CAST_TO_FN_PTR(handle_skipped_t, dlsym(RTLD_DEFAULT, "Handle_Skipped")); ++ _handle_ignore_class= CAST_TO_FN_PTR(handle_ignore_class_t, dlsym(RTLD_DEFAULT, "Handle_Ignore_Class")); + + char path[JVM_MAXPATHLEN]; + char ebuf[1024]; +@@ -5608,6 +5614,15 @@ void os::Linux::load_ACC_library() { + if(_heap_vector_free == NULL) { + _heap_vector_free = CAST_TO_FN_PTR(heap_vector_free_t, dlsym(handle, "HeapVector_Free")); + } ++ if(_get_class_state == NULL) { ++ _get_class_state = CAST_TO_FN_PTR(get_class_state_t, dlsym(handle, "Get_Class_State")); ++ } ++ if(_handle_skipped == NULL) { ++ _handle_skipped = CAST_TO_FN_PTR(handle_skipped_t, dlsym(handle, "Handle_Skipped")); ++ } ++ if(_handle_ignore_class == NULL) { ++ _handle_ignore_class = CAST_TO_FN_PTR(handle_ignore_class_t, dlsym(handle, "Handle_Ignore_Class")); ++ } + } + } + +diff --git a/hotspot/src/os/linux/vm/os_linux.hpp b/hotspot/src/os/linux/vm/os_linux.hpp +index 7a73268d7..72f4026f7 100644 +--- a/hotspot/src/os/linux/vm/os_linux.hpp ++++ b/hotspot/src/os/linux/vm/os_linux.hpp +@@ -305,6 +305,9 @@ private: + typedef void* (*heap_vector_add_t)(void* val, void* heap_vector, bool &_inserted); + typedef void* (*heap_vector_get_next_t)(void* heap_vector, void* heap_vector_node, int &_cnt, void** &_items); + typedef void (*heap_vector_free_t)(void* heap_vector); ++ typedef int (*get_class_state_t)(); ++ typedef void (*handle_skipped_t)(char* class_name, char* class_loader_name, char* class_path, bool is_log_detail); ++ typedef void (*handle_ignore_class_t)(char* class_name, int index, bool is_log_detail); + typedef bool (*dmh_g1_can_shrink_t)(double used_after_gc_d, size_t _new_max_heap, double maximum_used_percentage, size_t max_heap_size); + typedef uint (*dmh_g1_get_region_limit_t)(size_t _new_max_heap, size_t region_size); + static heap_dict_add_t _heap_dict_add; +@@ -313,6 +316,9 @@ private: + static heap_vector_add_t _heap_vector_add; + static heap_vector_get_next_t _heap_vector_get_next; + static heap_vector_free_t _heap_vector_free; ++ static get_class_state_t _get_class_state; ++ static handle_skipped_t _handle_skipped; ++ static handle_ignore_class_t _handle_ignore_class; + static sched_getcpu_func_t _sched_getcpu; + static numa_node_to_cpus_func_t _numa_node_to_cpus; + static numa_max_node_func_t _numa_max_node; +@@ -616,6 +622,25 @@ public: + } + return result; + } ++ ++ static int get_class_state() { ++ if(_get_class_state != NULL) { ++ return _get_class_state(); ++ } ++ return 0; ++ } ++ ++ static void handle_skipped(char* class_name, char* class_loader_name, char* class_path, bool is_log_detail) { ++ if(_handle_skipped != NULL) { ++ _handle_skipped(class_name, class_loader_name, class_path, is_log_detail); ++ } ++ } ++ ++ static void handle_ignore_class(char* class_name, int index, bool is_log_detail) { ++ if(_handle_ignore_class != NULL) { ++ _handle_ignore_class(class_name, index, is_log_detail); ++ } ++ } + }; + + class PlatformEvent : public CHeapObj { +diff --git a/hotspot/src/share/vm/ci/ciEnv.cpp b/hotspot/src/share/vm/ci/ciEnv.cpp +index a27ca3254..ed9345e46 100644 +--- a/hotspot/src/share/vm/ci/ciEnv.cpp ++++ b/hotspot/src/share/vm/ci/ciEnv.cpp +@@ -709,6 +709,63 @@ ciField* ciEnv::get_field_by_index(ciInstanceKlass* accessor, + GUARDED_VM_ENTRY(return get_field_by_index_impl(accessor, index);) + } + ++// ------------------------------------------------------------------ ++// ciEnv::is_field_resolved ++// ++// is whether this field has been resolved. ++bool ciEnv::is_field_resolved(ciInstanceKlass* accessor_klass, ++ int constant_pool_index) { ++ GUARDED_VM_ENTRY( ++ ciConstantPoolCache* field_cache = accessor_klass->field_cache(); ++ if (field_cache != NULL) { ++ ciField* field = (ciField*)field_cache->get(constant_pool_index); ++ if (field != NULL) { ++ return true; ++ } ++ } ++ CompilerThread *current_thread = CompilerThread::current(); ++ assert(accessor_klass->get_instanceKlass()->is_linked(), "must be linked before using its constant-pool"); ++ constantPoolHandle constant_pool(current_thread, accessor_klass->get_instanceKlass()->constants()); ++ ++ // Get the field's name, signature, and type. ++ Symbol* name = constant_pool->name_ref_at(constant_pool_index); ++ if (name == NULL) { ++ return false; ++ } ++ int name_index = constant_pool->name_and_type_ref_index_at(constant_pool_index); ++ int signature_index = constant_pool->signature_ref_index_at(name_index); ++ Symbol* signature = constant_pool->symbol_at(signature_index); ++ if (signature == NULL) { ++ return false; ++ } ++ return true; ++ ) ++} ++ ++// ------------------------------------------------------------------ ++// ++// Check if all fields needed by this method in ConstantPool are resolved ++bool ciEnv::are_method_fields_all_resolved(ciMethod* method) { ++ ciInstanceKlass* holder_klass = method->holder(); ++ ciBytecodeStream bytecode_stream(method); ++ int start_bci = 0; ++ int end_bci = method->code_size(); ++ bytecode_stream.reset_to_bci(start_bci); ++ Bytecodes::Code current_opcode; ++ while ((current_opcode = bytecode_stream.next()) != ciBytecodeStream::EOBC() && ++ bytecode_stream.cur_bci() < end_bci) { ++ if (current_opcode == Bytecodes::_getfield || ++ current_opcode == Bytecodes::_getstatic || ++ current_opcode == Bytecodes::_putfield || ++ current_opcode == Bytecodes::_putstatic) { ++ if (!is_field_resolved(holder_klass, bytecode_stream.get_index_u2_cpcache())) { ++ return false; ++ } ++ } ++ } ++ return true; ++} ++ + // ------------------------------------------------------------------ + // ciEnv::lookup_method + // +diff --git a/hotspot/src/share/vm/ci/ciEnv.hpp b/hotspot/src/share/vm/ci/ciEnv.hpp +index ced8d8952..0f45df6f8 100644 +--- a/hotspot/src/share/vm/ci/ciEnv.hpp ++++ b/hotspot/src/share/vm/ci/ciEnv.hpp +@@ -127,6 +127,8 @@ private: + ciInstanceKlass* accessor); + ciField* get_field_by_index(ciInstanceKlass* loading_klass, + int field_index); ++ bool is_field_resolved(ciInstanceKlass* accessor_klass, ++ int constant_pool_index); + ciMethod* get_method_by_index(constantPoolHandle cpool, + int method_index, Bytecodes::Code bc, + ciInstanceKlass* loading_klass); +@@ -318,6 +320,9 @@ public: + // Return state of appropriate compilability + int compilable() { return _compilable; } + ++ // Check if all fields needed by this method in ConstantPool are resolved ++ bool are_method_fields_all_resolved(ciMethod* method); ++ + const char* retry_message() const { + switch (_compilable) { + case ciEnv::MethodCompilable_not_at_tier: +diff --git a/hotspot/src/share/vm/ci/ciMethod.cpp b/hotspot/src/share/vm/ci/ciMethod.cpp +index 50fafc4f8..acbb3ec4a 100644 +--- a/hotspot/src/share/vm/ci/ciMethod.cpp ++++ b/hotspot/src/share/vm/ci/ciMethod.cpp +@@ -37,6 +37,7 @@ + #include "compiler/abstractCompiler.hpp" + #include "compiler/compilerOracle.hpp" + #include "compiler/methodLiveness.hpp" ++#include "compiler/compileBroker.hpp" + #include "interpreter/interpreter.hpp" + #include "interpreter/linkResolver.hpp" + #include "interpreter/oopMapCache.hpp" +@@ -978,6 +979,10 @@ bool ciMethod::ensure_method_data(methodHandle h_m) { + if (is_native() || is_abstract() || h_m()->is_accessor()) { + return true; + } ++ if (JProfilingCacheCompileAdvance && CURRENT_ENV->task()->is_jprofilecache_compilation()) { ++ _method_data = CURRENT_ENV->get_empty_methodData(); ++ return false; ++ } + if (h_m()->method_data() == NULL) { + Method::build_interpreter_method_data(h_m, THREAD); + if (HAS_PENDING_EXCEPTION) { +@@ -1018,7 +1023,9 @@ ciMethodData* ciMethod::method_data() { + Thread* my_thread = JavaThread::current(); + methodHandle h_m(my_thread, get_Method()); + +- if (h_m()->method_data() != NULL) { ++ if (JProfilingCacheCompileAdvance && CURRENT_ENV->task()->is_jprofilecache_compilation()) { ++ _method_data = CURRENT_ENV->get_empty_methodData(); ++ } else if (h_m()->method_data() != NULL) { + _method_data = CURRENT_ENV->get_method_data(h_m()->method_data()); + _method_data->load_data(); + } else { +diff --git a/hotspot/src/share/vm/classfile/classFileParser.cpp b/hotspot/src/share/vm/classfile/classFileParser.cpp +index 8f1b2d47a..87ea58536 100644 +--- a/hotspot/src/share/vm/classfile/classFileParser.cpp ++++ b/hotspot/src/share/vm/classfile/classFileParser.cpp +@@ -4255,6 +4255,14 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + this_klass->set_has_default_methods(has_default_methods); + this_klass->set_declares_default_methods(declares_default_methods); + ++ if (JProfilingCacheCompileAdvance || JProfilingCacheRecording) { ++ if (_stream->source() == NULL) { ++ this_klass->set_source_file_path(NULL); ++ } else { ++ this_klass->set_source_file_path(SymbolTable::new_symbol(_stream->source(), THREAD)); ++ } ++ } ++ + if (!host_klass.is_null()) { + assert (this_klass->is_anonymous(), "should be the same"); + this_klass->set_host_klass(host_klass()); +@@ -4451,6 +4459,13 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + } + #endif // INCLUDE_CDS + ++ if (JProfilingCacheCompileAdvance || JProfilingCacheRecording) { ++ unsigned int crc32 = ClassLoader::crc32(0, (char*)(_stream->buffer()), _stream->length()); ++ unsigned int class_bytes_size = _stream->length(); ++ this_klass->set_crc32(crc32); ++ this_klass->set_bytes_size(class_bytes_size); ++ } ++ + // Clear class if no error has occurred so destructor doesn't deallocate it + _klass = NULL; + return this_klass; +diff --git a/hotspot/src/share/vm/classfile/classLoaderData.cpp b/hotspot/src/share/vm/classfile/classLoaderData.cpp +index ba86a0ebc..0d0a8e92c 100644 +--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp ++++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp +@@ -758,6 +758,14 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure, boo + ClassLoaderData* prev = NULL; + bool seen_dead_loader = false; + ++ // Unload ProfileCacheClassChain ++ if (JProfilingCacheCompileAdvance) { ++ JitProfileCache* jpc = JitProfileCache::instance(); ++ assert(jpc != NULL, "JitProfileCache object is null"); ++ ProfileCacheClassChain* chain = jpc->preloader()->chain(); ++ chain->unload_class(is_alive_closure); ++ } ++ + // Save previous _unloading pointer for CMS which may add to unloading list before + // purging and we don't want to rewalk the previously unloaded class loader data. + _saved_unloading = _unloading; +diff --git a/hotspot/src/share/vm/classfile/systemDictionary.cpp b/hotspot/src/share/vm/classfile/systemDictionary.cpp +index 50ed5e32a..6df65e97e 100644 +--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp ++++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp +@@ -296,6 +296,29 @@ Klass* SystemDictionary::resolve_array_class_or_null(Symbol* class_name, + return k; + } + ++class SuperClassRecursionTracker : public StackObj { ++public: ++ SuperClassRecursionTracker() { ++ initialize(Thread::current()); ++ } ++ ++ SuperClassRecursionTracker(Thread* thread) { ++ initialize(thread); ++ } ++ ++ ~SuperClassRecursionTracker() { ++ assert(JProfilingCacheCompileAdvance, "wrong usage"); ++ _thread->super_class_depth_dec(); ++ } ++protected: ++ void initialize(Thread* thread) { ++ assert(JProfilingCacheCompileAdvance, "wrong usage"); ++ _thread = thread; ++ _thread->super_class_depth_add(); ++ } ++private: ++ Thread* _thread; ++}; + + // Must be called for any super-class or super-interface resolution + // during class definition to allow class circularity checking +@@ -397,11 +420,21 @@ Klass* SystemDictionary::resolve_super_or_fail(Symbol* child_name, + // java.lang.Object should have been found above + assert(class_name != NULL, "null super class for resolving"); + // Resolve the super class or interface, check results on return +- Klass* superk = SystemDictionary::resolve_or_null(class_name, +- class_loader, +- protection_domain, +- THREAD); +- ++ Klass* superk = NULL; ++ if (JProfilingCacheCompileAdvance) { ++ SuperClassRecursionTracker superClassRecursionTracker; ++ superk = ++ SystemDictionary::resolve_or_null(class_name, ++ class_loader, ++ protection_domain, ++ THREAD); ++ } else { ++ superk = ++ SystemDictionary::resolve_or_null(class_name, ++ class_loader, ++ protection_domain, ++ THREAD); ++ } + KlassHandle superk_h(THREAD, superk); + + // Clean up of placeholders moved so that each classloadAction registrar self-cleans up +@@ -909,6 +942,14 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, + } + #endif + ++ if (JProfilingCacheCompileAdvance) { ++ if (!class_has_been_loaded) { ++ JitProfileCache* jprofilecache = JitProfileCache::instance(); ++ assert(jprofilecache != NULL, "sanity check"); ++ jprofilecache->preloader()->resolve_loaded_klass(k()); ++ } ++ } ++ + // return if the protection domain in NULL + if (protection_domain() == NULL) return k(); + +@@ -1269,6 +1310,12 @@ Klass* SystemDictionary::resolve_from_stream(Symbol* class_name, + } + } ); + ++ if (JProfilingCacheCompileAdvance) { ++ JitProfileCache* jprofilecache = JitProfileCache::instance(); ++ assert(jprofilecache != NULL, "sanity check"); ++ jprofilecache->preloader()->resolve_loaded_klass(k()); ++ } ++ + return k(); + } + +diff --git a/hotspot/src/share/vm/classfile/systemDictionary.hpp b/hotspot/src/share/vm/classfile/systemDictionary.hpp +index d43075890..ffe77dcc4 100644 +--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp ++++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp +@@ -27,6 +27,7 @@ + + #include "classfile/classFileStream.hpp" + #include "classfile/classLoader.hpp" ++#include "jprofilecache/jitProfileCache.hpp" + #include "oops/objArrayOop.hpp" + #include "oops/symbol.hpp" + #include "runtime/java.hpp" +@@ -204,6 +205,8 @@ class SymbolPropertyTable; + class SystemDictionary : AllStatic { + friend class VMStructs; + friend class SystemDictionaryHandles; ++ friend class JitProfileCache; ++ friend class JitProfileCacheInfo; + + public: + enum WKID { +diff --git a/hotspot/src/share/vm/classfile/vmSymbols.hpp b/hotspot/src/share/vm/classfile/vmSymbols.hpp +index 20ab434ed..dc5b70d84 100644 +--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp ++++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp +@@ -362,6 +362,7 @@ + template(privilegedContext_name, "privilegedContext") \ + template(contextClassLoader_name, "contextClassLoader") \ + template(inheritedAccessControlContext_name, "inheritedAccessControlContext") \ ++ template(jprofilecache_dummy_name, "dummy") \ + template(isPrivileged_name, "isPrivileged") \ + template(isAuthorized_name, "isAuthorized") \ + template(getClassContext_name, "getClassContext") \ +@@ -413,6 +414,10 @@ + template(init_lock_name, "init_lock") \ + template(signers_name, "signers_name") \ + template(loader_data_name, "loader_data") \ ++ template(com_huawei_jprofilecache_JProfileCache, "com/huawei/jprofilecache/JProfileCache") \ ++ template(jprofilecache_trigger_precompilation_name, "triggerPrecompilation") \ ++ template(jprofilecache_check_if_compilation_is_complete_name, "checkIfCompilationIsComplete") \ ++ template(jprofilecache_notify_jvm_deopt_profilecache_methods_name, "notifyJVMDeoptProfileCacheMethods") \ + template(vmdependencies_name, "vmdependencies") \ + template(input_stream_void_signature, "(Ljava/io/InputStream;)V") \ + template(getFileURL_name, "getFileURL") \ +diff --git a/hotspot/src/share/vm/code/nmethod.cpp b/hotspot/src/share/vm/code/nmethod.cpp +index ba5116575..b0cb61b18 100644 +--- a/hotspot/src/share/vm/code/nmethod.cpp ++++ b/hotspot/src/share/vm/code/nmethod.cpp +@@ -34,6 +34,7 @@ + #include "compiler/compilerOracle.hpp" + #include "compiler/disassembler.hpp" + #include "interpreter/bytecode.hpp" ++#include "jprofilecache/jitProfileCache.hpp" + #include "oops/methodData.hpp" + #include "prims/jvmtiRedefineClassesTrace.hpp" + #include "prims/jvmtiImpl.hpp" +@@ -654,6 +655,10 @@ nmethod* nmethod::new_nmethod(methodHandle method, + DEBUG_ONLY(nm->verify();) + nm->log_new_nmethod(); + } ++ if (JProfilingCacheRecording && nm != NULL && comp_level >= CompilationProfileCacheRecordMinLevel) { ++ int bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci; ++ JitProfileCache::instance()->recorder()->add_method(nm->method(), bci); ++ } + return nm; + } + +diff --git a/hotspot/src/share/vm/compiler/compileBroker.cpp b/hotspot/src/share/vm/compiler/compileBroker.cpp +index 294aca0c7..86840d6c8 100644 +--- a/hotspot/src/share/vm/compiler/compileBroker.cpp ++++ b/hotspot/src/share/vm/compiler/compileBroker.cpp +@@ -327,6 +327,8 @@ void CompileTask::initialize(int compile_id, + _comment = comment; + _failure_reason = NULL; + ++ _is_jprofilecache_compilation = false; ++ + if (LogCompilation) { + _time_queued = os::elapsed_counter(); + if (hot_method.not_null()) { +@@ -1596,6 +1598,9 @@ CompileTask* CompileBroker::create_compile_task(CompileQueue* queue, + new_task->initialize(compile_id, method, osr_bci, comp_level, + hot_method, hot_count, comment, + blocking); ++ if (strcmp(comment, "JitProfileCache") == 0) { ++ new_task->mark_jprofilecache_compilation(); ++ } + queue->add(new_task); + return new_task; + } +diff --git a/hotspot/src/share/vm/compiler/compileBroker.hpp b/hotspot/src/share/vm/compiler/compileBroker.hpp +index a5bd806f6..7d490d34f 100644 +--- a/hotspot/src/share/vm/compiler/compileBroker.hpp ++++ b/hotspot/src/share/vm/compiler/compileBroker.hpp +@@ -65,6 +65,7 @@ class CompileTask : public CHeapObj { + int _hot_count; // information about its invocation counter + const char* _comment; // more info about the task + const char* _failure_reason; ++ bool _is_jprofilecache_compilation; + + public: + CompileTask() { +@@ -84,6 +85,8 @@ class CompileTask : public CHeapObj { + bool is_complete() const { return _is_complete; } + bool is_blocking() const { return _is_blocking; } + bool is_success() const { return _is_success; } ++ bool is_jprofilecache_compilation() const { return _is_jprofilecache_compilation; } ++ void mark_jprofilecache_compilation() { _is_jprofilecache_compilation = true; } + + nmethodLocker* code_handle() const { return _code_handle; } + void set_code_handle(nmethodLocker* l) { _code_handle = l; } +diff --git a/hotspot/src/share/vm/jprofilecache/jitProfileCache.cpp b/hotspot/src/share/vm/jprofilecache/jitProfileCache.cpp +new file mode 100644 +index 000000000..d4a472d1d +--- /dev/null ++++ b/hotspot/src/share/vm/jprofilecache/jitProfileCache.cpp +@@ -0,0 +1,1247 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++ ++#include "classfile/classLoaderData.hpp" ++#include "classfile/classLoaderData.inline.hpp" ++#include "classfile/symbolTable.hpp" ++#include "classfile/systemDictionary.hpp" ++#include "compiler/compileBroker.hpp" ++#include "jprofilecache/jitProfileCache.hpp" ++#include "jprofilecache/jitProfileRecord.hpp" ++#include "jprofilecache/jitProfileCacheThread.hpp" ++#include "jprofilecache/jitProfileCacheLogParser.hpp" ++#include "oops/method.hpp" ++#include "oops/typeArrayKlass.hpp" ++#include "runtime/arguments.hpp" ++#include "runtime/compilationPolicy.hpp" ++#include "runtime/fieldType.hpp" ++#include "runtime/handles.inline.hpp" ++#include "runtime/javaCalls.hpp" ++#include "runtime/mutexLocker.hpp" ++#include "runtime/os.hpp" ++#include "runtime/thread.hpp" ++#include "utilities/hashtable.inline.hpp" ++#include "utilities/stack.hpp" ++#include "utilities/stack.inline.hpp" ++#include "runtime/atomic.hpp" ++#include "jprofilecache/jitProfileCacheLog.hpp" // must be last one to use customized jprofilecache log ++#include "libadt/dict.hpp" ++ ++#define JITPROFILECACHE_VERSION 0x1 ++ ++JitProfileCache* JitProfileCache::_jit_profile_cache_instance = NULL; ++ ++JitProfileCache::JitProfileCache() ++ : _jit_profile_cache_state(NOT_INIT), ++ _jit_profile_cache_info(NULL), ++ _jit_profile_cache_recorder(NULL), ++ _excluding_matcher(NULL), ++ _jit_profile_cache_version(JITPROFILECACHE_VERSION), ++ _dummy_method(NULL), ++ profilecacheComplete(false) { ++} ++ ++JitProfileCache::~JitProfileCache() { ++ delete _jit_profile_cache_recorder; ++ delete _jit_profile_cache_info; ++} ++ ++JitProfileCache* JitProfileCache::create_instance() { ++ _jit_profile_cache_instance = new JitProfileCache(); ++ return _jit_profile_cache_instance; ++} ++ ++JitProfileCache::JitProfileCacheState JitProfileCache::init_for_recording() { ++ if (!(JProfilingCacheRecording && !JProfilingCacheCompileAdvance)) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR: JitProfileCache option verify failure"); ++ _jit_profile_cache_state = JitProfileCache::IS_ERR; ++ return _jit_profile_cache_state; ++ } ++ _jit_profile_cache_recorder = new JitProfileRecorder(); ++ _jit_profile_cache_recorder->set_holder(this); ++ _jit_profile_cache_recorder->init(); ++ ++ // wait JProfilingCacheRecordTime flush jit recorder ++ if (JProfilingCacheRecordTime > 0) { ++ JitProfileCacheThread::launch_with_delay(JProfilingCacheRecordTime); ++ } ++ ++ // check state ++ if (_jit_profile_cache_recorder->is_valid()) { ++ _jit_profile_cache_state = JitProfileCache::IS_OK; ++ } else { ++ _jit_profile_cache_state = JitProfileCache::IS_ERR; ++ } ++ return _jit_profile_cache_state; ++} ++ ++JitProfileCache::JitProfileCacheState JitProfileCache::init_for_profilecache() { ++ if (!(!JProfilingCacheRecording && JProfilingCacheCompileAdvance)) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR : JitProfile option verify fail"); ++ _jit_profile_cache_state = JitProfileCache::IS_ERR; ++ return _jit_profile_cache_state; ++ } ++ if (CompilationProfileCacheExclude != NULL) { ++ _excluding_matcher = new (ResourceObj::C_HEAP, mtClass) SymbolRegexMatcher(CompilationProfileCacheExclude); ++ } ++ if (CompilationProfileCacheExplicitDeopt && JProfilingCacheDeoptTime > 0) { ++ jprofilecache_log_warning(profilecache)("[JitProfileCache] WARNING : JProfilingCacheDeoptTime is unused when CompilationProfileCacheExplicitDeopt is enable"); ++ } ++ _jit_profile_cache_info = new JitProfileCacheInfo(); ++ _jit_profile_cache_info->set_holder(this); ++ _jit_profile_cache_info->init(); ++ if (_jit_profile_cache_info->is_valid()) { ++ _jit_profile_cache_state = JitProfileCache::IS_OK; ++ } else { ++ _jit_profile_cache_state = JitProfileCache::IS_ERR; ++ } ++ return _jit_profile_cache_state; ++} ++ ++void JitProfileCache::init() { ++ ++#if defined(__aarch64__) ++ if (!VM_Version::is_hisi_enabled()) { ++ if (JProfilingCacheCompileAdvance || JProfilingCacheRecording) { ++ tty->print_cr("JProfileCache is only supported on Kunpeng architecture. "); ++ vm_exit(-1); ++ } ++ return; ++ } ++#else ++ // x86 return ++ if (JProfilingCacheCompileAdvance || JProfilingCacheRecording) { ++ tty->print_cr("JProfileCache is only supported on Kunpeng architecture. "); ++ vm_exit(-1); ++ } ++ return; ++#endif ++ ++ // set log level ++ set_log_level(); ++ ++ if (JProfilingCacheCompileAdvance) { ++ init_for_profilecache(); ++ } else if(JProfilingCacheRecording) { ++ init_for_recording(); ++ } ++ if ((JProfilingCacheRecording || JProfilingCacheCompileAdvance) && !JitProfileCache::is_valid()) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR: JProfileCache init error."); ++ vm_exit(-1); ++ } ++} ++ ++void JitProfileCache::set_log_level() { ++ if (ProfilingCacheLogLevel == NULL) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] Error: ProfilingCacheLogLevel is invalid must in trace, debug, info, warning, error, off"); ++ _jit_profile_cache_state = JitProfileCache::IS_ERR; ++ vm_exit(-1); ++ } else if (strcmp(ProfilingCacheLogLevel, "trace") == 0) { ++ LogLevel::LogLevelNum = LogLevel::Trace; ++ } else if (strcmp(ProfilingCacheLogLevel, "debug") == 0) { ++ LogLevel::LogLevelNum = LogLevel::Debug; ++ } else if (strcmp(ProfilingCacheLogLevel, "info") == 0) { ++ LogLevel::LogLevelNum = LogLevel::Info; ++ } else if (strcmp(ProfilingCacheLogLevel, "warning") == 0) { ++ LogLevel::LogLevelNum = LogLevel::Warning; ++ } else if (strcmp(ProfilingCacheLogLevel, "error") == 0) { ++ LogLevel::LogLevelNum = LogLevel::Error; ++ } else if (strcmp(ProfilingCacheLogLevel, "off") == 0) { ++ LogLevel::LogLevelNum = LogLevel::Off; ++ } else { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] Error: ProfilingCacheLogLevel is invalid must in trace, debug, info, warning, error, off"); ++ _jit_profile_cache_state = JitProfileCache::IS_ERR; ++ vm_exit(-1); ++ } ++} ++ ++JitProfileCache::JitProfileCacheState JitProfileCache::flush_recorder() { ++ if(_jit_profile_cache_state == IS_ERR) { ++ return _jit_profile_cache_state; ++ } ++ _jit_profile_cache_recorder->flush_record(); ++ if (_jit_profile_cache_recorder->is_valid()) { ++ _jit_profile_cache_state = IS_OK; ++ } else { ++ _jit_profile_cache_state = IS_ERR; ++ } ++ return _jit_profile_cache_state; ++} ++ ++bool JitProfileCache::commit_compilation(methodHandle m, int bci, TRAPS) { ++ int comp_level = CompLevel_full_optimization; ++ if (CompilationPolicy::can_be_compiled(m, comp_level)) { ++ CompileBroker::compile_method(m, bci, comp_level, ++ methodHandle(), 0, ++ "JitProfileCache", THREAD); ++ return true; ++ } ++ return false; ++} ++ ++Symbol* JitProfileCache::get_class_loader_name(ClassLoaderData* cld) { ++ Handle class_loader(Thread::current(), cld->class_loader()); ++ Symbol* loader_name = NULL; ++ if (class_loader() != NULL) { ++ loader_name = JitProfileCacheInfo::remove_meaningless_suffix(class_loader()->klass()->name()); ++ } else { ++ loader_name = SymbolTable::new_symbol("NULL", Thread::current()); ++ } ++ return loader_name; ++} ++ ++#define HEADER_SIZE 36 ++#define MAGIC_NUMBER 0xBABA ++ ++#define JVM_DEFINE_CLASS_PATH "_JVM_DefineClass_" ++ ++JProfileCacheClassDictionary::JProfileCacheClassDictionary(int size) ++ : Hashtable(size, sizeof(ProfileCacheClassEntry)) { ++} ++ ++JProfileCacheClassDictionary::~JProfileCacheClassDictionary() { } ++ ++ProfileCacheClassEntry* JProfileCacheClassDictionary::new_entry(Symbol* symbol) { ++ unsigned int hash = symbol->identity_hash(); ++ ProfileCacheClassEntry* entry = (ProfileCacheClassEntry*)Hashtable:: ++ new_entry(hash, symbol); ++ entry->init(); ++ return entry; ++} ++ ++ProfileCacheClassEntry* JProfileCacheClassDictionary::find_entry(InstanceKlass* k) { ++ Symbol* name = k->name(); ++ Symbol* path = k->source_file_path(); ++ if (path == NULL) { ++ path = SymbolTable::new_symbol(JVM_DEFINE_CLASS_PATH, Thread::current()); ++ } ++ Symbol* loader_name = JitProfileCache::get_class_loader_name(k->class_loader_data()); ++ int hash = name->identity_hash(); ++ return find_entry(hash, name, loader_name, path); ++} ++ ++ProfileCacheClassEntry* JProfileCacheClassDictionary::find_entry(unsigned int hash_value, ++ Symbol* name, ++ Symbol* loader_name, ++ Symbol* path) { ++ int index = hash_to_index(hash_value); ++ for (ProfileCacheClassEntry* p = bucket(index); p != NULL; p = p->next()) { ++ if (p->literal()->fast_compare(name) == 0 && ++ p->class_loader_name()->fast_compare(loader_name) == 0 && ++ p->class_path()->fast_compare(path) == 0) { ++ return p; ++ } ++ } ++ return NULL; ++} ++ ++ProfileCacheClassEntry* JProfileCacheClassDictionary::find_head_entry(unsigned int hash_value, ++ Symbol* name) { ++ int index = hash_to_index(hash_value); ++ for (ProfileCacheClassEntry* p = bucket(index); p != NULL; p = p->next()) { ++ if (p->literal()->fast_compare(name) == 0) { ++ return p; ++ } ++ } ++ return NULL; ++} ++ ++ProfileCacheClassHolder* ProfileCacheClassEntry::find_class_holder(unsigned int size, ++ unsigned int crc32) { ++ for (ProfileCacheClassHolder* p = this->head_holder(); p != NULL; p = p->next()) { ++ if (p->crc32() == crc32 && p->size() == size) { ++ return p; ++ } ++ } ++ return NULL; ++} ++ ++ProfileCacheMethodHold* ProfileCacheMethodHold::clone_and_add() { ++ ProfileCacheMethodHold* clone = new ProfileCacheMethodHold(*this); ++ clone->set_next(_next); ++ _next = clone; ++ return clone; ++} ++ ++ProfileCacheClassEntry* JProfileCacheClassDictionary::find_or_create_class_entry(unsigned int hash_value, ++ Symbol* name, ++ Symbol* loader_name, ++ Symbol* path, ++ int index) { ++ ProfileCacheClassEntry* p = find_entry(hash_value, name, loader_name, path); ++ if (p == NULL) { ++ p = new_entry(name); ++ p->set_chain_offset(index); ++ p->set_class_loader_name(loader_name); ++ p->set_class_path(path); ++ add_entry(hash_to_index(hash_value), p); ++ } ++ return p; ++} ++ ++#define METHOD_LIST_INITIAL_CAPACITY 16 ++ ++ProfileCacheMethodHold::ProfileCacheMethodHold(Symbol* name, Symbol* signature) ++ : _method_name(name), ++ _method_signature(signature), ++ _method_size(0), ++ _method_hash(0), ++ _interpreter_invocation_count(0), ++ _interpreter_exception_count(0), ++ _invocation_count(0), ++ _backage_count(0), ++ _mounted_offset(-1), ++ _owns_method_list(true), ++ _is_method_deopted(false), ++ _next(NULL), ++ _resolved_method(NULL), ++ _method_list(new (ResourceObj::C_HEAP, mtClass) ++ GrowableArray(METHOD_LIST_INITIAL_CAPACITY, true, mtClass)) { ++} ++ ++ProfileCacheMethodHold::ProfileCacheMethodHold(ProfileCacheMethodHold& rhs) ++ : _method_name(rhs._method_name), ++ _method_signature(rhs._method_signature), ++ _method_size(rhs._method_size), ++ _method_hash(rhs._method_hash), ++ _interpreter_invocation_count(rhs._interpreter_invocation_count), ++ _interpreter_exception_count(rhs._interpreter_exception_count), ++ _invocation_count(rhs._invocation_count), ++ _backage_count(rhs._backage_count), ++ _mounted_offset(rhs._mounted_offset), ++ _owns_method_list(false), ++ _is_method_deopted(false), ++ _next(NULL), ++ _resolved_method(NULL), ++ _method_list(rhs._method_list) { ++} ++ ++ProfileCacheMethodHold::~ProfileCacheMethodHold() { ++ if (_owns_method_list) { ++ delete _method_list; ++ } ++} ++ ++bool ProfileCacheMethodHold::is_method_match(Method* method) { ++ if (method_name()->fast_compare(method->name()) == 0 ++ && method_signature()->fast_compare(method->signature()) == 0) { ++ return true; ++ } else { ++ return false; ++ } ++} ++ ++bool ProfileCacheMethodHold::is_alive(BoolObjectClosure* is_alive_closure) const { ++ if (_resolved_method == NULL || _resolved_method->constMethod() == NULL || _resolved_method->constants() == NULL || _resolved_method->constants()->pool_holder() == NULL) { ++ return false; ++ } ++ ClassLoaderData* data = _resolved_method->method_holder()->class_loader_data(); ++ if (data == NULL || !data->is_alive(is_alive_closure)) { ++ return false; ++ } ++ return true; ++} ++ ++#define CLASS_METHOD_LIST_INITIAL_CAPACITY 16 ++ ++ProfileCacheClassHolder::ProfileCacheClassHolder(Symbol* name, Symbol* loader_name, ++ Symbol* path, unsigned int size, ++ unsigned int hash, unsigned int crc32) ++ : _class_size(size), ++ _class_hash(hash), ++ _class_crc32(crc32), ++ _class_name(name), ++ _class_loader_name(loader_name), ++ _class_path(path), ++ _class_method_list(new (ResourceObj::C_HEAP, mtInternal) ++ GrowableArray(CLASS_METHOD_LIST_INITIAL_CAPACITY, true, mtClass)), ++ _class_resolved(false), ++ _next(NULL) { ++} ++ ++ ++ProfileCacheClassHolder::~ProfileCacheClassHolder() { ++ delete _class_method_list; ++} ++ ++bool ProfileCacheClassChain::ProfileCacheClassChainEntry::is_all_initialized() { ++ int len = resolved_klasses()->length(); ++ // if resolved klass is empty return false ++ if (len == 0) { ++ return false; ++ } ++ for (int i = 0; i < len; i++) { ++ InstanceKlass* k = resolved_klasses()->at(i); ++ if (k != NULL && k->is_not_initialized() && !k->is_in_error_state() ) { ++ return false; ++ } ++ } ++ return true; ++} ++ ++bool ProfileCacheClassChain::ProfileCacheClassChainEntry::contains_redefined_class() { ++ int len = resolved_klasses()->length(); ++ for (int i = 0; i < len; i++) { ++ InstanceKlass* k = resolved_klasses()->at(i); ++ if (k != NULL && k->has_been_redefined()) { ++ ResourceMark rm; ++ jprofilecache_log_warning(profilecache)("[JitProfileCache] WARNING: ignore redefined class after API" ++ " triggerPrecompilation : %s:%s@%s.", class_name()->as_C_string(), ++ class_loader_name()->as_C_string(), class_path()->as_C_string()); ++ return true; ++ } ++ } ++ return false; ++} ++ ++InstanceKlass* ProfileCacheClassChain::ProfileCacheClassChainEntry::get_first_uninitialized_klass() { ++ int len = resolved_klasses()->length(); ++ for (int i = 0; i < len; i++) { ++ InstanceKlass* k = resolved_klasses()->at(i); ++ if (k != NULL && k->is_not_initialized()) { ++ return k; ++ } ++ } ++ return NULL; ++} ++ ++ProfileCacheMethodHold* MethodHolderIterator::next() { ++ ProfileCacheMethodHold* next_holder = _current_method_hold->next(); ++ if (next_holder != NULL) { ++ _current_method_hold = next_holder; ++ return _current_method_hold; ++ } ++ while (_holder_index > 0) { ++ _holder_index--; ++ ProfileCacheClassChain::ProfileCacheClassChainEntry* entry = _profile_cache_class_chain->at(_holder_index); ++ if (entry->method_holder() != NULL) { ++ _current_method_hold = entry->method_holder(); ++ return _current_method_hold; ++ } ++ } ++ _current_method_hold = NULL; ++ return _current_method_hold; ++} ++ ++ProfileCacheClassChain::ProfileCacheClassChain(unsigned int size) ++ : _class_chain_inited_index(-1), ++ _loaded_class_index(-1), ++ _length(size), ++ _state(NOT_INITED), ++ _entries(new ProfileCacheClassChainEntry[size]), ++ _holder(NULL), ++ _init_timestamp(), ++ _last_timestamp(), ++ _deopt_index(-1), ++ _deopt_cur_holder(NULL), ++ _has_unmarked_compiling_flag(false) { ++ _init_timestamp.update(); ++ _last_timestamp.update(); ++ try_transition_to_state(INITED); ++} ++ ++ProfileCacheClassChain::~ProfileCacheClassChain() { ++ delete[] _entries; ++} ++ ++const char* ProfileCacheClassChain::get_state(ClassChainState state) { ++ switch (state) { ++ case NOT_INITED: ++ return "not init"; ++ case INITED: ++ return "inited"; ++ case PRE_PROFILECACHE: ++ return "notify precompile"; ++ case PROFILECACHE_COMPILING: ++ return "precompiling"; ++ case PROFILECACHE_DONE: ++ return "precompile done"; ++ case PROFILECACHE_PRE_DEOPTIMIZE: ++ return "trigger deoptimize"; ++ case PROFILECACHE_DEOPTIMIZING: ++ return "deoptmizing"; ++ case PROFILECACHE_DEOPTIMIZED: ++ return "deoptimize done"; ++ case PROFILECACHE_ERROR_STATE: ++ return "profilecache error state"; ++ } ++ assert(false, "invalid state"); ++ return NULL; ++} ++ ++bool ProfileCacheClassChain::try_transition_to_state(ClassChainState new_state) { ++ ClassChainState old_state = current_state(); ++ if (old_state == new_state) { ++ jprofilecache_log_warning(profilecache)("JProfileCache [WARNING]: profilecache state has already been %s Doesn't need transferred to %s", ++ get_state(old_state), get_state(new_state)); ++ return true; ++ } ++ bool can_transfer = false; ++ switch (new_state) { ++ case PROFILECACHE_ERROR_STATE: ++ if (old_state != PROFILECACHE_DEOPTIMIZED) { ++ can_transfer = true; ++ } ++ break; ++ default: ++ if (new_state == old_state + 1) { ++ can_transfer = true; ++ } ++ break; ++ } ++ if (can_transfer) { ++ if (Atomic::cmpxchg((jint)new_state, (jint*)&_state, (jint)old_state) == old_state) { ++ return true; ++ } else { ++ jprofilecache_log_warning(profilecache)("JProfileCache [WARNING]: failed to transfer profilecache state from %s to %s, conflict with other operation", ++ get_state(old_state), get_state(new_state)); ++ return false; ++ } ++ } else { ++ jprofilecache_log_warning(profilecache)("JProfileCache [WARNING]: can not transfer profilecache state from %s to %s", ++ get_state(old_state), get_state(new_state)); ++ return false; ++ } ++} ++ ++void ProfileCacheClassChain::mark_loaded_class(InstanceKlass* k) { ++ Symbol* class_name = k->name(); ++ unsigned int crc32 = k->crc32(); ++ unsigned int size = k->bytes_size(); ++ ++ if (!can_record_class()) { ++ return; ++ } ++ ++ ProfileCacheClassEntry* class_entry = holder()->jit_profile_cache_dict()->find_entry(k); ++ if (class_entry == NULL) { ++ return; ++ } ++ int chain_index = class_entry->chain_offset(); ++ ProfileCacheClassHolder* holder = class_entry->find_class_holder(size, crc32); ++ if (holder != NULL) { ++ if (holder->resolved()) { ++ handle_duplicate_class(k, chain_index); ++ return; ++ } else { ++ resolve_class_methods(k, holder, chain_index); ++ } ++ } else { ++ ResourceMark rm; ++ jprofilecache_log_debug(profilecache)("[JitProfileCache] DEBUG : class %s is not in proFile", ++ k->name()->as_C_string()); ++ } ++ ++ update_class_chain(k, chain_index); ++} ++ ++void ProfileCacheClassChain::handle_duplicate_class(InstanceKlass *k, int chain_index) { ++ Thread *const t = Thread::current(); ++ if (!t->is_super_class_resolution_active()) { ++ assert(k->is_not_initialized(), "Invalid klass state"); ++ assert(t->is_Java_thread(), "Thread type mismatch"); ++ ResourceMark rm; ++ jprofilecache_log_warning(profilecache)("[JitProfileCache] WARNING : Duplicate load class %s at index %d", ++ k->name()->as_C_string(), chain_index); ++ } ++} ++ ++void ProfileCacheClassChain::resolve_class_methods(InstanceKlass* k, ProfileCacheClassHolder* holder, int chain_index) { ++ MutexLockerEx mu(ProfileCacheClassChain_lock); ++ int methods = k->methods()->length(); ++ for (int index = 0; index < methods; index++) { ++ Method* m = k->methods()->at(index); ++ resolve_method_info(m, holder); ++ } ++ { ++ ResourceMark rm; ++ jprofilecache_log_debug(profilecache)("[JitProfileCache] DEBUG : class %s at index %d method_list has bean recorded", ++ k->name()->as_C_string(), chain_index); ++ } ++ holder->set_resolved(); ++} ++ ++void ProfileCacheClassChain::update_class_chain (InstanceKlass* k, int chain_index) { ++ MutexLockerEx mu(ProfileCacheClassChain_lock); ++ assert(chain_index >= 0 && chain_index <= length(), "index out of bound"); ++ assert(loaded_index() >= class_chain_inited_index(), "loaded index must larger than inited index"); ++ ProfileCacheClassChainEntry* chain_entry = &_entries[chain_index]; ++ ++ // check class state is skip or init return ++ if (chain_entry->is_skipped()) { ++ ResourceMark rm; ++ char* class_name = k->name()->as_C_string(); ++ int index = chain_index; ++ bool print_log_detail = false; ++ if (LogLevel::Warning >= LogLevel::LogLevelNum) { ++ print_log_detail = true; ++ } ++ os::Linux::handle_ignore_class(class_name, index, print_log_detail); ++ return; ++ } else if (chain_entry->is_inited()) { ++ return; ++ } ++ // set class reserved ++ chain_entry->resolved_klasses()->append(k); ++ Thread* thread = Thread::current(); ++ chain_entry->method_keep_holders()->append(JNIHandles::make_global(Handle(thread, k->klass_holder()))); ++ ++ int status = os::Linux::get_class_state(); ++ chain_entry->set_class_state(status); ++ ++ if (chain_index == loaded_index() + 1) { ++ update_loaded_index(chain_index); ++ } ++} ++ ++void ProfileCacheClassChain::add_method_at_index(ProfileCacheMethodHold* mh, int index) { ++ assert(index >= 0 && index < length(), "out of bound"); ++ ProfileCacheClassChainEntry* entry = &_entries[index]; ++ entry->add_method_holder(mh); ++} ++ ++void ProfileCacheClassChain::update_loaded_index(int index) { ++ assert(index >= 0 && index < length(), "out of bound"); ++ while (index < length() && !_entries[index].is_not_loaded()) { ++ index++; ++ } ++ set_loaded_index(index - 1); ++} ++ ++void ProfileCacheClassChain::compile_methodholders_queue(Stack& compile_queue) { ++ while (!compile_queue.is_empty()) { ++ ProfileCacheMethodHold* pmh = compile_queue.pop(); ++ compile_method(pmh); ++ Thread* THREAD = Thread::current(); ++ if (HAS_PENDING_EXCEPTION) { ++ ResourceMark rm; ++ jprofilecache_log_warning(profilecache)("[JitProfileCache] WARNING: Exceptions happened in compiling %s", ++ pmh->method_name()->as_C_string()); ++ CLEAR_PENDING_EXCEPTION; ++ continue; ++ } ++ } ++} ++ ++void ProfileCacheClassChain::precompilation() { ++ Thread* THREAD = Thread::current(); ++ if (!try_transition_to_state(PROFILECACHE_COMPILING)) { ++ jprofilecache_log_warning(profilecache)("JProfileCache [WARNING]: The compilation cannot be started in the current state"); ++ return; ++ } ++ ++ bool cancel_precompilation = false; ++ for ( int index = 0; index < length(); index++ ) { ++ if (cancel_precompilation) { ++ break; ++ } ++ InstanceKlass* klass = NULL; ++ Stack compile_queue; ++ { ++ MutexLockerEx mu(ProfileCacheClassChain_lock); ++ ProfileCacheClassChainEntry *entry = &_entries[index]; ++ switch(entry->class_state()) { ++ case ProfileCacheClassChainEntry::_not_loaded: ++ // if class not load before skip ++ entry->set_skipped(); ++ { ++ ResourceMark rm; ++ char* class_name = entry->class_name()->as_C_string(); ++ char* class_loader_name = entry->class_loader_name()->as_C_string(); ++ char* class_path = entry->class_path()->as_C_string(); ++ bool print_log_detail = false; ++ if (LogLevel::Warning >= LogLevel::LogLevelNum) { ++ print_log_detail = true; ++ } ++ os::Linux::handle_skipped(class_name, class_loader_name, class_path, print_log_detail); ++ } ++ case ProfileCacheClassChainEntry::_load_skipped: ++ break; ++ case ProfileCacheClassChainEntry::_class_loaded: ++ klass = entry->get_first_uninitialized_klass(); ++ entry->set_inited(); ++ case ProfileCacheClassChainEntry::_class_inited: ++ if (!entry->contains_redefined_class()){ ++ ProfileCacheMethodHold* mh = entry->method_holder(); ++ while (mh != NULL) { ++ compile_queue.push(mh); ++ mh = mh->next(); ++ } ++ } ++ break; ++ default: ++ { ++ ResourceMark rm; ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR: class %s has an invalid state %d", ++ entry->class_name()->as_C_string(), ++ entry->class_state()); ++ return; ++ } ++ } ++ } ++ if (klass != NULL) { ++ assert(THREAD->is_Java_thread(), "sanity check"); ++ klass->initialize(THREAD); ++ if (HAS_PENDING_EXCEPTION) { ++ Symbol *loader = JitProfileCache::get_class_loader_name(klass->class_loader_data()); ++ ResourceMark rm; ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR: Exceptions happened in initializing %s being loaded by %s", ++ klass->name()->as_C_string(), loader->as_C_string()); ++ return; ++ } ++ } ++ { ++ MutexLockerEx mu(ProfileCacheClassChain_lock); ++ refresh_indexes(); ++ if (index > class_chain_inited_index()) { ++ cancel_precompilation = true; ++ } ++ } ++ ++ // add method to compile queue and precompile ++ compile_methodholders_queue(compile_queue); ++ } ++} ++ ++bool ProfileCacheClassChain::compile_method(ProfileCacheMethodHold* mh) { ++ Thread* t = Thread::current(); ++ methodHandle m(t, mh->resolved_method()); ++ if (m() == NULL || m->compiled_by_jprofilecache() || m->has_compiled_code()) { ++ return false; ++ } ++ InstanceKlass* klass = m->constants()->pool_holder(); ++ ++ // if klass not initialize return ++ if (!klass->is_initialized()) { ++ return false; ++ } ++ ++ m->set_compiled_by_jprofilecache(true); ++ int bci = InvocationEntryBci; ++ ++ // commit compile ++ bool ret = JitProfileCache::commit_compilation(m, bci, t); ++ if (ret) { ++ ResourceMark rm; ++ jprofilecache_log_info(profilecache)("[JitProfileCache] method %s successfully compiled", ++ m->name_and_sig_as_C_string()); ++ } ++ return ret; ++} ++ ++void ProfileCacheClassChain::refresh_indexes() { ++ assert_lock_strong(ProfileCacheClassChain_lock); ++ int loaded = loaded_index(); ++ int inited = class_chain_inited_index(); ++ for (int i = inited + 1; i < length(); i++) { ++ ProfileCacheClassChainEntry* e = &_entries[i]; ++ int len = e->resolved_klasses()->length(); ++ if (e->is_not_loaded()) { ++ assert(len == 0, "wrong state"); ++ } ++ if (e->is_loaded()) { ++ assert(len > 0, "class init chain entry state error"); ++ if (e->is_all_initialized()) { ++ e->set_inited(); ++ } ++ } ++ if (e->is_loaded() && i == loaded + 1) { ++ loaded = i; ++ } else if (e->is_inited() && i == inited + 1) { ++ loaded = i; ++ inited = i; ++ } else if (e->is_skipped()) { ++ if (i == loaded + 1) { ++ loaded = i; ++ } ++ if (i == inited + 1) { ++ inited = i; ++ } ++ } else { ++ break; ++ } ++ } ++ assert(loaded >= inited, "loaded index must not less than inited index"); ++ set_loaded_index(loaded); ++ set_inited_index(inited); ++} ++ ++bool ProfileCacheClassChain::should_deoptimize_methods() { ++ assert(JProfilingCacheCompileAdvance, "Sanity check"); ++ assert(SafepointSynchronize::is_at_safepoint(), "must be in safepoint"); ++ ClassChainState state = current_state(); ++ if (state == PROFILECACHE_DEOPTIMIZED || state == PROFILECACHE_ERROR_STATE) { ++ return false; ++ } ++ if (!CompilationProfileCacheExplicitDeopt && JProfilingCacheDeoptTime > 0) { ++ if (_init_timestamp.seconds() < JProfilingCacheDeoptTime) { ++ return false; ++ } else if (state ==PROFILECACHE_DONE) { ++ try_transition_to_state(PROFILECACHE_PRE_DEOPTIMIZE); ++ } else { ++ } ++ } ++ ++ if (current_state() != PROFILECACHE_DEOPTIMIZING ++ && current_state() != PROFILECACHE_PRE_DEOPTIMIZE) { ++ return false; ++ } ++ ++ Method* dummy_method = JitProfileCache::instance()->dummy_method(); ++ if (dummy_method == NULL || dummy_method->code() == NULL) { ++ return false; ++ } ++ ++ if (_last_timestamp.seconds() < CompilationProfileCacheDeoptMinInterval) { ++ return false; ++ } ++ VM_Operation* op = VMThread::vm_operation(); ++ if (op != NULL && !op->allow_nested_vm_operations()) { ++ return false; ++ } ++ if (_length <= 1) { ++ return false; ++ } ++ return true; ++} ++ ++void ProfileCacheClassChain::deopt_prologue() { ++ if (current_state() == PROFILECACHE_PRE_DEOPTIMIZE) { ++ if (try_transition_to_state(PROFILECACHE_DEOPTIMIZING)) { ++ jprofilecache_log_info(profilecache)("JProfileCache [INFO]: start deoptimize profilecache methods"); ++ _deopt_index = length() - 1; ++ while (_deopt_index > 0 && _deopt_cur_holder == NULL) { ++ ProfileCacheClassChain::ProfileCacheClassChainEntry* entry = this->at(_deopt_index); ++ _deopt_cur_holder = entry->method_holder(); ++ _deopt_index--; ++ } ++ } else { ++ ShouldNotReachHere(); ++ } ++ } else { ++ guarantee(current_state() == PROFILECACHE_DEOPTIMIZING, "invalid profilecache state"); ++ } ++} ++ ++void ProfileCacheClassChain::deopt_epilogue() { ++ try_transition_to_state(PROFILECACHE_DEOPTIMIZED); ++ jprofilecache_log_info(profilecache)("JProfileCache [INFO]: all profilecache methods have been deoptimized"); ++ // free all keep alive method ++ for (int i = 0; i < length(); i++) { ++ ProfileCacheClassChainEntry *entry = this->at(i); ++ GrowableArray *array = entry->resolved_klasses(); ++ if (!entry->method_keep_holders()->is_empty()) { ++ int len = entry->method_keep_holders()->length(); ++ for (int i = 0; i < len; i++) { ++ JNIHandles::destroy_global(entry->method_keep_holders()->at(i)); ++ } ++ } ++ } ++} ++ ++void ProfileCacheClassChain::invoke_deoptimize_vmop() { ++ VM_Deoptimize op; ++ VMThread::execute(&op); ++} ++ ++void ProfileCacheClassChain::deoptimize_methods() { ++ assert(SafepointSynchronize::is_at_safepoint(), "profilecache deoptimize methods must be in safepoint"); ++ deopt_prologue(); ++ ++ Method* dummy_method = JitProfileCache::instance()->dummy_method(); ++ assert( dummy_method != NULL && dummy_method->code() != NULL, "profilecache the dummy method must be compiled"); ++ int dummy_compile_id = dummy_method->code()->compile_id(); ++ ++ MethodHolderIterator iter(this, _deopt_cur_holder, _deopt_index); ++ int num = 0; ++ while (*iter != NULL) { ++ ProfileCacheMethodHold* pmh = *iter; ++ if (pmh->resolved_method() == NULL) { ++ iter.next(); ++ continue; ++ } ++ methodHandle m(pmh->resolved_method()); ++ ++ if(m() == NULL || !m->compiled_by_jprofilecache()) { ++ iter.next(); ++ continue; ++ } ++#ifndef PRODUCT ++ m->set_deopted_by_jprofilecache(true); ++#endif ++ pmh->set_is_method_deopted(true); ++ if (m->code() != NULL && m->code()->compile_id() > dummy_compile_id) { ++ ResourceMark rm; ++ jprofilecache_log_warning(profilecache)("[JitProfileCache] WARNING : skip deoptimize %s because it is compiled after precompile", ++ m->name_and_sig_as_C_string()); ++ iter.next(); ++ continue; ++ } ++ int result = 0; ++ if (m->code() != NULL) { ++ m->code()->mark_for_deoptimization(); ++ result++; ++ } ++ result += CodeCache::mark_for_deoptimization(m()); ++ if (result > 0) { ++ ResourceMark rm; ++ jprofilecache_log_warning(profilecache)("[JitProfileCache] WARNING : deoptimize precompile method %s", ++ m->name_and_sig_as_C_string()); ++ num++; ++ } ++ iter.next(); ++ if (num == (int)CompilationProfileCacheDeoptNumOfMethodsPerIter) { ++ break; ++ } ++ } ++ if (num > 0) { ++ invoke_deoptimize_vmop(); ++ } ++ ++ _last_timestamp.update(); ++ _deopt_index = iter.index(); ++ _deopt_cur_holder = *iter; ++ ++ if (*iter == NULL) { ++ deopt_epilogue(); ++ } ++} ++ ++void ProfileCacheClassChain::unload_class(BoolObjectClosure *is_alive) { ++ assert(SafepointSynchronize::is_at_safepoint(), "must be in safepoint"); ++ if (deopt_has_done()) { ++ return; ++ } ++ for (int i = 0; i < length(); i++) { ++ ProfileCacheClassChainEntry* entry = this->at(i); ++ GrowableArray* array = entry->resolved_klasses(); ++ // Check whether the keep alive method should be unloading. ++ GrowableArray* keep_array = entry->method_keep_holders(); ++ for (int i = 0; i < array->length(); i++) { ++ InstanceKlass* k = array->at(i); ++ if (k == NULL) { ++ continue; ++ } ++ ++ // if class not load continue ++ if (entry->is_not_loaded() || entry->is_skipped()) { ++ continue; ++ } ++ ++ ClassLoaderData* data = k->class_loader_data(); ++ // if data is NULL or not alive should be remove ++ if (data == NULL || !data->is_alive(is_alive)) { ++ // remove class from chain ++ array->remove_at(i); ++ JNIHandles::destroy_global(keep_array->at(i)); ++ keep_array->remove_at(i); ++ } ++ } ++ for (ProfileCacheMethodHold* holder = entry->method_holder(); holder != NULL; ++ holder = holder->next()) { ++ // if method not compile or deopted continue ++ if (holder->is_method_deopted() || holder->resolved_method() == NULL) { ++ continue; ++ } ++ if (!holder->is_alive(is_alive)) { ++ // process the method in the class. ++ holder->set_resolved_method(NULL); ++ } ++ } ++ } ++} ++ ++ProfileCacheMethodHold* ProfileCacheClassChain::resolve_method_info(Method* method, ProfileCacheClassHolder* holder) { ++ ProfileCacheMethodHold* mh = NULL; ++ // find method ++ for (int i = 0; i < holder->method_list()->length(); i++) { ++ ProfileCacheMethodHold* current_mh = holder->method_list()->at(i); ++ if (current_mh->is_method_match(method)) { ++ mh = current_mh; ++ break; ++ } ++ } ++ if (mh == NULL) { ++ return mh; ++ } else if (mh->resolved_method() == NULL) { ++ mh->set_resolved_method(method); ++ return mh; ++ } else { ++ ProfileCacheMethodHold* new_holder = mh->clone_and_add(); ++ new_holder->set_resolved_method(method); ++ return new_holder; ++ } ++} ++ ++#define PRELOAD_CLASS_HS_SIZE 10240 ++ ++JitProfileCacheInfo::JitProfileCacheInfo() ++ : _jit_profile_cache_dict(NULL), ++ _profile_cache_chain(NULL), ++ _method_loaded_count(0), ++ _state(NOT_INIT), ++ _holder(NULL), ++ _jvm_booted_is_done(false) { ++} ++ ++JitProfileCacheInfo::~JitProfileCacheInfo() { ++ delete _jit_profile_cache_dict; ++ delete _profile_cache_chain; ++} ++ ++Symbol* JitProfileCacheInfo::remove_meaningless_suffix(Symbol* s) { ++ ResourceMark rm; ++ Thread* t = Thread::current(); ++ Symbol* result = s; ++ char* s_char = s->as_C_string(); ++ int len = (int)::strlen(s_char); ++ int i = 0; ++ for (i = 0; i < len - 1; i++) { ++ if (s_char[i] == '$' && s_char[i+1] == '$') { ++ break; ++ } ++ } ++ if (i < len - 1) { ++ i = i == 0 ? i = 1: i; ++ result = SymbolTable::new_symbol(s_char, i, t); ++ s_char = result->as_C_string(); ++ } ++ len = (int)::strlen(s_char); ++ i = len - 1; ++ for (; i >= 0; i--) { ++ if (s_char[i] >= '0' && s_char[i] <= '9') { ++ continue; ++ } else if (s_char[i] == '$') { ++ continue; ++ } else { ++ break; ++ } ++ } ++ if (i != len - 1){ ++ i = i == -1 ? 0 : i; ++ result = SymbolTable::new_symbol(s_char, i + 1, t); ++ } ++ return result; ++} ++ ++void JitProfileCacheInfo::jvm_booted_is_done() { ++ _jvm_booted_is_done = true; ++ ProfileCacheClassChain* chain = this->chain(); ++ assert(chain != NULL, "ProfileCacheClassChain is NULL"); ++} ++ ++void ProfileCacheClassChain::preload_class_in_constantpool() { ++ int index = 0; ++ int klass_index = 0; ++ while (true) { ++ InstanceKlass* current_k = NULL; ++ { ++ MutexLockerEx mu(ProfileCacheClassChain_lock); ++ if (index == length()) { ++ break; ++ } ++ ProfileCacheClassChain::ProfileCacheClassChainEntry* e = this->at(index); ++ GrowableArray* array = e->resolved_klasses(); ++ assert(array != NULL, "should not be NULL"); ++ if (e->is_skipped() || e->is_not_loaded() || klass_index >= array->length()) { ++ index++; ++ klass_index = 0; ++ continue; ++ } ++ current_k = array->at(klass_index); ++ } ++ ++ if (current_k != NULL) { ++ current_k->constants()->preload_jprofilecache_classes(Thread::current()); ++ } ++ klass_index++; ++ } ++} ++ ++void JitProfileCacheInfo::notify_precompilation() { ++ ProfileCacheClassChain *chain = this->chain(); ++ assert(chain != NULL, "ProfileCacheClassChain is NULL"); ++ chain->try_transition_to_state(ProfileCacheClassChain::PRE_PROFILECACHE); ++ ++ // preload class ++ jprofilecache_log_info(profilecache)("JProfileCache [INFO]: start preload class from constant pool"); ++ chain->preload_class_in_constantpool(); ++ ++ // precompile cache method ++ jprofilecache_log_info(profilecache)("JProfileCache [INFO]: start profilecache compilation"); ++ chain->precompilation(); ++ Thread *THREAD = Thread::current(); ++ if (HAS_PENDING_EXCEPTION) { ++ return; ++ } ++ ++ JitProfileCache *jpc = this->holder(); ++ Method *dm = jpc->dummy_method(); ++ if (dm != NULL) { ++ guarantee(dm->code() == NULL, "dummy method has been compiled unexceptedly!"); ++ methodHandle mh(THREAD, dm); ++ JitProfileCache::commit_compilation(mh, InvocationEntryBci, THREAD); ++ } ++ if (!chain->try_transition_to_state(ProfileCacheClassChain::PROFILECACHE_DONE)) { ++ jprofilecache_log_error(profilecache)("JProfileCache [ERROR]: can not change state to PROFILECACHE_DONE"); ++ } else { ++ jprofilecache_log_info(profilecache)("JProfileCache [INFO]: profilecache compilation is done"); ++ } ++} ++ ++bool JitProfileCacheInfo::should_preload_class(Symbol* s) { ++ SymbolRegexMatcher* matcher = holder()->excluding_matcher(); ++ if (matcher != NULL && matcher->matches(s)) { ++ return false; ++ } ++ int hash = s->identity_hash(); ++ ProfileCacheClassEntry* e = jit_profile_cache_dict()->find_head_entry(hash, s); ++ if (e == NULL) { ++ return false; ++ } ++ if (!CompilationProfileCacheResolveClassEagerly) { ++ int offset = e->chain_offset(); ++ ProfileCacheClassChain::ProfileCacheClassChainEntry* entry = chain()->at(offset); ++ return entry->is_not_loaded(); ++ } else { ++ return true; ++ } ++} ++ ++bool JitProfileCacheInfo::resolve_loaded_klass(InstanceKlass* k) { ++ if (k == NULL) { return false; } ++ if (k->is_jprofilecache_recorded()) { ++ return false; ++ } ++ { ++ MutexLockerEx mu(ProfileCacheClassChain_lock); ++ if (!chain()->can_record_class()) { ++ return false; ++ } ++ } ++ k->set_jprofilecache_recorded(true); ++ chain()->mark_loaded_class(k); ++ return true; ++} ++ ++class RandomFileStreamGuard : StackObj { ++public: ++ RandomFileStreamGuard(randomAccessFileStream* fs) ++ : _file_stream(fs) { ++ } ++ ++ ~RandomFileStreamGuard() { delete _file_stream; } ++ ++ randomAccessFileStream* operator ->() const { return _file_stream; } ++ randomAccessFileStream* operator ()() const { return _file_stream; } ++ ++private: ++ randomAccessFileStream* _file_stream; ++}; ++ ++#define MAX_DEOPT_NUMBER 500 ++ ++void JitProfileCacheInfo::init() { ++ ++ // param check ++ check_param(); ++ if (_state == IS_ERR) { ++ return; ++ } ++ ++ _jit_profile_cache_dict = new JProfileCacheClassDictionary(PRELOAD_CLASS_HS_SIZE); ++ // initialization parameters ++ _method_loaded_count = 0; ++ _state = IS_OK; ++ ++ if (ProfilingCacheFile == NULL) { ++ _state = IS_ERR; ++ return; ++ } ++ ++ RandomFileStreamGuard fsg(new (ResourceObj::C_HEAP, mtInternal) randomAccessFileStream( ++ ProfilingCacheFile, "rb+")); ++ JitProfileCacheLogParser parser(fsg(), this); ++ if (!fsg->is_open()) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR : JitProfile doesn't exist"); ++ _state = IS_ERR; ++ return; ++ } ++ parser.set_file_size(fsg->fileSize()); ++ ++ // parse header ++ if (!parser.parse_header()) { ++ _state = IS_ERR; ++ return; ++ } ++ ++ // parse class ++ if (!parser.parse_class()) { ++ _state = IS_ERR; ++ return; ++ } ++ ++ // parse method ++ while (parser.has_next_method_record()) { ++ ProfileCacheMethodHold* holder = parser.parse_method(); ++ if (holder != NULL) { ++ // count method parse successfully ++ ++_method_loaded_count; ++ } ++ parser.increment_parsed_number_count(); ++ } ++ jprofilecache_log_info(JitProfileCache)("JProfileCache [INFO]: parsed method number %d successful loaded %" PRIu64, parser.parsed_methods(), _method_loaded_count); ++} ++ ++void JitProfileCacheInfo::check_param() { ++ if (JProfilingCacheRecording) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR: you can not set both JProfilingCacheCompileAdvance and JProfilingCacheRecording"); ++ _state = IS_ERR; ++ return; ++ } ++ // check class data sharing ++ if (UseSharedSpaces) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR: when enable JProfilingCacheCompileAdvance, UseSharedSpaces must be disable"); ++ _state = IS_ERR; ++ return; ++ } ++ ++ // check CompilationProfileCacheDeoptNumOfMethodsPerIter ++ if (CompilationProfileCacheDeoptNumOfMethodsPerIter == 0 || CompilationProfileCacheDeoptNumOfMethodsPerIter > MAX_DEOPT_NUMBER) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR:CompilationProfileCacheDeoptNumOfMethodsPerIter is invalid must be large than 0 and less than or equal to 500."); ++ _state = IS_ERR; ++ return; ++ } ++ ++ if (Arguments::mode() == Arguments::_int) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR: when enable JProfilingCacheCompileAdvance, should not set -Xint"); ++ _state = IS_ERR; ++ return; ++ } ++} ++ ++ ++ +diff --git a/hotspot/src/share/vm/jprofilecache/jitProfileCache.hpp b/hotspot/src/share/vm/jprofilecache/jitProfileCache.hpp +new file mode 100644 +index 000000000..eb250da03 +--- /dev/null ++++ b/hotspot/src/share/vm/jprofilecache/jitProfileCache.hpp +@@ -0,0 +1,568 @@ ++/* ++* Copyright (c) 2025, Huawei and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ */ ++ ++#ifndef SHARED_VM_JPROFILECACHE_JITPROFILECACHE_HPP ++#define SHARED_VM_JPROFILECACHE_JITPROFILECACHE_HPP ++ ++#include "code/codeBlob.hpp" ++#include "libadt/dict.hpp" ++#include "memory/allocation.hpp" ++#include "utilities/hashtable.hpp" ++#include "utilities/linkedlist.hpp" ++#include "utilities/ostream.hpp" ++#include "utilities/globalDefinitions.hpp" ++#include "utilities/growableArray.hpp" ++#include "utilities/symbolRegexMatcher.hpp" ++#include "runtime/timer.hpp" ++#include "runtime/atomic.hpp" ++#include "runtime/jniHandles.hpp" ++#include "runtime/mutexLocker.hpp" ++#include "oops/klass.hpp" ++#include "oops/method.hpp" ++#include "oops/methodData.hpp" ++#include "oops/methodCounters.hpp" ++ ++class JitProfileRecorder; ++class JitProfileCacheInfo; ++ ++ ++#define INVALID_FIRST_INVOKE_INIT_ORDER -1 ++ ++class JitProfileCache : public CHeapObj { ++public: ++ enum JitProfileCacheState { ++ NOT_INIT = 0, ++ IS_OK = 1, ++ IS_ERR = 2 ++ }; ++ ++ unsigned int version() { return _jit_profile_cache_version; } ++ bool is_valid() { return _jit_profile_cache_state == JitProfileCache::IS_OK; } ++ ++ void set_dummy_method(Method* m) { _dummy_method = m; } ++ Method* dummy_method() { return _dummy_method; } ++ ++ // init in JVM start ++ void init(); ++ ++ void set_log_level(); ++ ++ static JitProfileCache* create_instance(); ++ static JitProfileCache* instance() { return _jit_profile_cache_instance; } ++ JitProfileRecorder* recorder() { return _jit_profile_cache_recorder; } ++ JitProfileCacheInfo* preloader() { return _jit_profile_cache_info; } ++ // init JProfilingCacheRecording ++ JitProfileCacheState init_for_recording(); ++ // init JProfilingCacheCompileAdvance ++ JitProfileCacheState init_for_profilecache(); ++ ++ SymbolRegexMatcher* excluding_matcher() { return _excluding_matcher; } ++ ++ JitProfileCacheState flush_recorder(); ++ ++ static bool commit_compilation(methodHandle m, int bci, TRAPS); ++ ++ static Symbol* get_class_loader_name(ClassLoaderData* cld); ++ ++ ++ bool profilecacheComplete; ++ ++protected: ++ JitProfileCache(); ++ virtual ~JitProfileCache(); ++ ++private: ++ unsigned int _jit_profile_cache_version; ++ static JitProfileCache* _jit_profile_cache_instance; ++ ++ JitProfileCacheState _jit_profile_cache_state; ++ ++ Method* _dummy_method; ++ JitProfileRecorder* _jit_profile_cache_recorder; ++ JitProfileCacheInfo* _jit_profile_cache_info; ++ SymbolRegexMatcher* _excluding_matcher; ++}; ++ ++// forward class ++class JitProfileRecorder; ++class ProfileCacheClassHolder; ++ ++class BytecodeProfileRecord : public CHeapObj { ++public: ++ BytecodeProfileRecord() { } ++ ~BytecodeProfileRecord() { } ++}; ++ ++class ProfileCacheMethodHold : public CHeapObj { ++ friend class ProfileCacheClassHolder; ++public: ++ ProfileCacheMethodHold(Symbol* name, Symbol* signature); ++ ProfileCacheMethodHold(ProfileCacheMethodHold& rhs); ++ virtual ~ProfileCacheMethodHold(); ++ ++ Symbol* method_name() const { return _method_name; } ++ Symbol* method_signature() const { return _method_signature; } ++ ++ unsigned int invocation_count() const { return _invocation_count;} ++ ++ void set_interpreter_invocation_count(unsigned int value) { _interpreter_invocation_count = value; } ++ void set_interpreter_exception_count(unsigned int value) { _interpreter_exception_count = value; } ++ void set_invocation_count(unsigned int value) { _invocation_count = value; } ++ void set_backage_count(unsigned int value) { _backage_count = value; } ++ ++ void set_method_hash(unsigned int value) { _method_hash = value; } ++ void set_method_size(unsigned int value) { _method_size = value; } ++ void set_method_bci(int value) { _method_bci = value; } ++ void set_mounted_offset(int value) { _mounted_offset = value; } ++ ++ bool is_method_deopted() const { return _is_method_deopted; } ++ void set_is_method_deopted(bool value) { _is_method_deopted = value; } ++ ++ bool is_method_match(Method* method); ++ ++ ProfileCacheMethodHold* next() const { return _next; } ++ void set_next(ProfileCacheMethodHold* h) { _next = h; } ++ ++ Method* resolved_method() const { return _resolved_method; } ++ void set_resolved_method(Method* m) { _resolved_method = m; } ++ ++ GrowableArray* method_list() const { return _method_list; } ++ void set_method_list(GrowableArray* value) { _method_list = value; } ++ ++ ProfileCacheMethodHold* clone_and_add(); ++ ++ bool is_alive(BoolObjectClosure* is_alive_closure) const; ++ ++private: ++ Symbol* _method_name; ++ Symbol* _method_signature; ++ ++ unsigned int _method_size; ++ unsigned int _method_hash; ++ int _method_bci; ++ ++ unsigned int _interpreter_invocation_count; ++ unsigned int _interpreter_exception_count; ++ unsigned int _invocation_count; ++ unsigned int _backage_count; ++ ++ int _mounted_offset; ++ ++ bool _owns_method_list; ++ ++ bool _is_method_deopted; ++ ++ // A single LinkedList stores entries with the same initialization order ++ ProfileCacheMethodHold* _next; ++ // The resolved method within the holder's list ++ Method* _resolved_method; ++ // An array of profile information, shared among entries with the same ++ GrowableArray* _method_list; ++}; ++ ++class ProfileCacheClassHolder : public CHeapObj { ++public: ++ ProfileCacheClassHolder(Symbol* name, Symbol* loader_name, ++ Symbol* path, unsigned int size, ++ unsigned int hash, unsigned int crc32); ++ virtual ~ProfileCacheClassHolder(); ++ ++ void add_method(ProfileCacheMethodHold* mh) { ++ assert(_class_method_list != NULL, "not initialize"); ++ _class_method_list->append(mh); ++ } ++ ++ unsigned int size() const { return _class_size; } ++ unsigned int hash() const { return _class_hash; } ++ unsigned int crc32() const { return _class_crc32; } ++ unsigned int methods_count() const { return _class_method_list->length(); } ++ Symbol* class_name() const { return _class_name; } ++ Symbol* class_loader_name() const { return _class_loader_name; } ++ Symbol* path() const { return _class_path; } ++ ProfileCacheClassHolder* next() const { return _next; } ++ bool resolved() const { return _class_resolved; } ++ ++ void set_resolved() { _class_resolved = true; } ++ void set_next(ProfileCacheClassHolder* h) { _next = h; } ++ ++ GrowableArray* method_list() const { return _class_method_list; } ++ ++private: ++ Symbol* _class_name; ++ Symbol* _class_loader_name; ++ Symbol* _class_path; ++ ++ unsigned int _class_size; ++ unsigned int _class_hash; ++ unsigned int _class_crc32; ++ unsigned int _class_init_chain_index; ++ ++ bool _class_resolved; ++ ++ GrowableArray* _class_method_list; ++ ++ ProfileCacheClassHolder* _next; ++}; ++ ++class ProfileCacheClassEntry : public HashtableEntry { ++ friend class JitProfileCacheInfo; ++public: ++ ProfileCacheClassEntry(ProfileCacheClassHolder* holder) ++ : _head_holder(holder), ++ _chain_offset(-1), ++ _class_loader_name(NULL), ++ _class_path(NULL) { ++ } ++ ++ ProfileCacheClassEntry() ++ : _head_holder(NULL), ++ _chain_offset(-1), ++ _class_loader_name(NULL), ++ _class_path(NULL) { ++ } ++ ++ virtual ~ProfileCacheClassEntry() { } ++ ++ void init() { ++ _head_holder = NULL; ++ _chain_offset = -1; ++ _class_loader_name = NULL; ++ _class_path = NULL; ++ } ++ ++ ProfileCacheClassHolder* head_holder() { return _head_holder; } ++ void set_head_holder(ProfileCacheClassHolder* h) { _head_holder = h; } ++ ++ int chain_offset() { return _chain_offset; } ++ void set_chain_offset(int offset) { _chain_offset = offset; } ++ ++ Symbol* class_loader_name() { return _class_loader_name; } ++ void set_class_loader_name(Symbol* s) { _class_loader_name = s; } ++ Symbol* class_path() { return _class_path; } ++ void set_class_path(Symbol* s) { _class_path = s; } ++ ++ ++ ProfileCacheClassEntry* next() { ++ return (ProfileCacheClassEntry*)HashtableEntry::next(); ++ } ++ ++ void add_class_holder(ProfileCacheClassHolder* h) { ++ h->set_next(_head_holder); ++ _head_holder = h; ++ } ++ ++ ProfileCacheClassHolder* find_class_holder(unsigned int size, unsigned int crc32); ++ ++private: ++ int _chain_offset; ++ Symbol* _class_loader_name; ++ Symbol* _class_path; ++ ProfileCacheClassHolder* _head_holder; ++ ++}; ++ ++class JProfileCacheClassDictionary : public Hashtable { ++public: ++ JProfileCacheClassDictionary(int size); ++ virtual ~JProfileCacheClassDictionary(); ++ ++ ProfileCacheClassEntry* find_entry(unsigned int hash_value, Symbol* name, ++ Symbol* loader_name, Symbol* path); ++ ++ ProfileCacheClassEntry* find_head_entry(unsigned int hash_value, Symbol* name); ++ ++ ProfileCacheClassEntry* find_entry(InstanceKlass* k); ++ ++ ProfileCacheClassEntry* bucket(int i) { ++ return (ProfileCacheClassEntry*)Hashtable::bucket(i); ++ } ++ ++ ProfileCacheClassEntry* find_or_create_class_entry(unsigned int hash_value, Symbol* symbol, ++ Symbol* loader_name, Symbol* path, ++ int order); ++ ++private: ++ ++ ProfileCacheClassEntry* new_entry(Symbol* symbol); ++}; ++ ++class ProfileCacheClassChain; ++ ++class MethodHolderIterator { ++public: ++ MethodHolderIterator() ++ : _profile_cache_class_chain(NULL), ++ _current_method_hold(NULL), ++ _holder_index(-1) { ++ } ++ ++ MethodHolderIterator(ProfileCacheClassChain* chain, ProfileCacheMethodHold* holder, int index) ++ : _profile_cache_class_chain(chain), ++ _current_method_hold(holder), ++ _holder_index(index) { ++ } ++ ++ ~MethodHolderIterator() { } ++ ++ ProfileCacheMethodHold* operator*() { return _current_method_hold; } ++ ++ int index() { return _holder_index; } ++ ++ bool initialized() { return _profile_cache_class_chain != NULL; } ++ ++ ProfileCacheMethodHold* next(); ++ ++private: ++ int _holder_index; // current holder's position in ProfileCacheClassChain ++ ProfileCacheClassChain* _profile_cache_class_chain; ++ ProfileCacheMethodHold* _current_method_hold; ++ ++}; ++ ++class ProfileCacheClassChain : public CHeapObj { ++public: ++ class ProfileCacheClassChainEntry : public CHeapObj { ++ public: ++ enum ClassState { ++ _not_loaded = 0, ++ _load_skipped, ++ _class_loaded, ++ _class_inited ++ }; ++ ++ ProfileCacheClassChainEntry() ++ : _class_name(NULL), ++ _class_loader_name(NULL), ++ _class_path(NULL), ++ _class_state(_not_loaded), ++ _method_holder(NULL), ++ _resolved_klasses(new (ResourceObj::C_HEAP, mtClass) ++ GrowableArray(1, true, mtClass)), ++ _method_keep_holders(new (ResourceObj::C_HEAP, mtClass) ++ GrowableArray(1, true)) { } ++ ++ ProfileCacheClassChainEntry(Symbol* class_name, Symbol* loader_name, Symbol* path) ++ : _class_name(class_name), ++ _class_loader_name(loader_name), ++ _class_path(path), ++ _class_state(_not_loaded), ++ _method_holder(NULL), ++ _resolved_klasses(new (ResourceObj::C_HEAP, mtClass) ++ GrowableArray(1, true, mtClass)), ++ _method_keep_holders(new (ResourceObj::C_HEAP, mtClass) ++ GrowableArray(1, true)) { } ++ ++ virtual ~ProfileCacheClassChainEntry() { ++ if(!_method_keep_holders->is_empty()) { ++ int len = _method_keep_holders->length(); ++ for (int i = 0; i < len; i++) { ++ JNIHandles::destroy_global(_method_keep_holders->at(i)); ++ } ++ } ++ } ++ ++ Symbol* class_name() const { return _class_name; } ++ Symbol* class_loader_name() const { return _class_loader_name; } ++ Symbol* class_path() const { return _class_path; } ++ void set_class_name(Symbol* name) { _class_name = name; } ++ void set_class_loader_name(Symbol* name) { _class_loader_name = name; } ++ void set_class_path(Symbol* path) { _class_path = path; } ++ ++ GrowableArray* resolved_klasses() ++ { return _resolved_klasses; } ++ ++ GrowableArray* method_keep_holders() ++ { return _method_keep_holders; } ++ ++ // entry state ++ bool is_not_loaded() const { return _class_state == _not_loaded; } ++ bool is_skipped() const { return _class_state == _load_skipped; } ++ bool is_loaded() const { return _class_state == _class_loaded; } ++ bool is_inited() const { return _class_state == _class_inited; } ++ void set_not_loaded() { _class_state = _not_loaded; } ++ void set_skipped() { _class_state = _load_skipped; } ++ void set_loaded() { _class_state = _class_loaded; } ++ void set_inited() { _class_state = _class_inited; } ++ ++ void set_class_state(int state) { _class_state = state;} ++ ++ int class_state() { return _class_state; } ++ ++ void add_method_holder(ProfileCacheMethodHold* h) { ++ h->set_next(_method_holder); ++ _method_holder = h; ++ } ++ ++ bool is_all_initialized(); ++ ++ bool contains_redefined_class(); ++ ++ InstanceKlass* get_first_uninitialized_klass(); ++ ++ ProfileCacheMethodHold* method_holder() { return _method_holder; } ++ ++ private: ++ int _class_state; ++ ++ Symbol* _class_name; ++ Symbol* _class_loader_name; ++ Symbol* _class_path; ++ ++ ProfileCacheMethodHold* _method_holder; ++ GrowableArray* _resolved_klasses; ++ GrowableArray* _method_keep_holders; ++ }; ++ ++ ProfileCacheClassChain(unsigned int size); ++ virtual ~ProfileCacheClassChain(); ++ ++ enum ClassChainState { ++ NOT_INITED = 0, ++ INITED = 1, ++ PRE_PROFILECACHE = 2, ++ PROFILECACHE_COMPILING = 3, ++ PROFILECACHE_DONE = 4, ++ PROFILECACHE_PRE_DEOPTIMIZE = 5, ++ PROFILECACHE_DEOPTIMIZING = 6, ++ PROFILECACHE_DEOPTIMIZED = 7, ++ PROFILECACHE_ERROR_STATE = 8 ++ }; ++ const char* get_state(ClassChainState state); ++ bool try_transition_to_state(ClassChainState new_state); ++ ClassChainState current_state() { return _state; } ++ ++ int class_chain_inited_index() const { return _class_chain_inited_index; } ++ int loaded_index() const { return _loaded_class_index; } ++ int length() const { return _length; } ++ ++ void set_loaded_index(int index) { _loaded_class_index = index; } ++ void set_length(int length) { _length = length; } ++ void set_inited_index(int index) { _class_chain_inited_index = index; } ++ ++ JitProfileCacheInfo* holder() { return _holder; } ++ void set_holder(JitProfileCacheInfo* preloader) { _holder = preloader; } ++ ++ bool notify_deopt_signal() { ++ return try_transition_to_state(PROFILECACHE_PRE_DEOPTIMIZE); ++ } ++ ++ bool can_record_class() { ++ return _state == INITED || _state == PRE_PROFILECACHE || _state == PROFILECACHE_COMPILING; ++ } ++ ++ bool deopt_has_done() { ++ return _state == PROFILECACHE_DEOPTIMIZED; ++ } ++ ++ void mark_loaded_class(InstanceKlass* klass); ++ ++ ProfileCacheClassChainEntry* at(int index) { return &_entries[index]; } ++ ++ void refresh_indexes(); ++ ++ void precompilation(); ++ ++ ++ // count method ++ void add_method_at_index(ProfileCacheMethodHold* mh, int index); ++ ++ bool compile_method(ProfileCacheMethodHold* mh); ++ ++ void unload_class(BoolObjectClosure* is_alive); ++ ++ void deopt_prologue(); ++ void deopt_epilogue(); ++ ++ bool should_deoptimize_methods(); ++ ++ // deoptimize number methods per invocation ++ void deoptimize_methods(); ++ ++ void invoke_deoptimize_vmop(); ++ ++ void preload_class_in_constantpool(); ++ ++private: ++ int _class_chain_inited_index; ++ int _loaded_class_index; ++ int _length; ++ ++ volatile ClassChainState _state; ++ ++ ProfileCacheClassChainEntry* _entries; ++ ++ JitProfileCacheInfo* _holder; ++ ++ TimeStamp _init_timestamp; ++ TimeStamp _last_timestamp; ++ ++ int _deopt_index; ++ ProfileCacheMethodHold* _deopt_cur_holder; ++ ++ bool _has_unmarked_compiling_flag; ++ ++ void handle_duplicate_class(InstanceKlass* k, int chain_index); ++ ++ void resolve_class_methods(InstanceKlass* k, ProfileCacheClassHolder* holder, int chain_index); ++ ++ void update_class_chain(InstanceKlass* ky, int chain_index); ++ ++ void compile_methodholders_queue(Stack& compile_queue); ++ ++ void update_loaded_index(int index); ++ ++ ProfileCacheMethodHold* resolve_method_info(Method* method, ++ ProfileCacheClassHolder* holder); ++}; ++ ++class JitProfileCacheInfo : public CHeapObj { ++public: ++ enum JitProfileCacheInfoState { ++ NOT_INIT = 0, ++ IS_OK = 1, ++ IS_ERR = 2 ++ }; ++ ++ JitProfileCacheInfo(); ++ virtual ~JitProfileCacheInfo(); ++ ++ bool is_valid() { return _state == IS_OK; } ++ void init(); ++ void check_param(); ++ ++ bool should_preload_class(Symbol* s); ++ ++ JProfileCacheClassDictionary* jit_profile_cache_dict() { return _jit_profile_cache_dict; } ++ uint64_t loaded_count() { return _method_loaded_count; } ++ ++ ProfileCacheClassChain* chain() { return _profile_cache_chain; } ++ void set_chain(ProfileCacheClassChain* chain) { _profile_cache_chain = chain; } ++ ++ JitProfileCache* holder() { return _holder; } ++ void set_holder(JitProfileCache* h) { _holder = h; } ++ ++ bool resolve_loaded_klass(InstanceKlass* klass); ++ ++ void jvm_booted_is_done(); ++ ++ void notify_precompilation(); ++ ++ static Symbol* remove_meaningless_suffix(Symbol* s); ++ ++private: ++ JitProfileCacheInfoState _state; ++ JitProfileCache* _holder; ++ JProfileCacheClassDictionary* _jit_profile_cache_dict; ++ ProfileCacheClassChain* _profile_cache_chain; ++ uint64_t _method_loaded_count; ++ bool _jvm_booted_is_done; ++}; ++ ++#endif //SHARED_VM_JPROFILECACHE_JITPROFILECACHE_HPP +diff --git a/hotspot/src/share/vm/jprofilecache/jitProfileCacheDcmds.cpp b/hotspot/src/share/vm/jprofilecache/jitProfileCacheDcmds.cpp +new file mode 100644 +index 000000000..d643d6f97 +--- /dev/null ++++ b/hotspot/src/share/vm/jprofilecache/jitProfileCacheDcmds.cpp +@@ -0,0 +1,130 @@ ++/* ++* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++* ++*/ ++ ++#include "precompiled.hpp" ++#include "classfile/javaClasses.hpp" ++#include "classfile/vmSymbols.hpp" ++#include "jprofilecache/jitProfileCache.hpp" ++#include "jprofilecache/jitProfileCacheThread.hpp" ++#include "jprofilecache/jitProfileCacheDcmds.hpp" ++#include "memory/resourceArea.hpp" ++#include "oops/oop.inline.hpp" ++#include "oops/symbol.hpp" ++#include "runtime/handles.inline.hpp" ++#include "services/diagnosticArgument.hpp" ++#include "services/diagnosticFramework.hpp" ++#include "utilities/globalDefinitions.hpp" ++#include "runtime/javaCalls.hpp" ++#include "runtime/init.hpp" ++ ++ ++JitProfileCacheDCmds::JitProfileCacheDCmds(outputStream* output, bool heap_allocated) : DCmdWithParser(output, heap_allocated), ++ _notify_precompile("-notify", "Notify JVM can start precompile", "BOOLEAN", false, "false"), ++ _check_compile_finished("-check", "Check if the last precompilation submitted by JProfileCache is complete", "BOOLEAN", false, "false"), ++ _deoptimize_compilation("-deopt", "Notify JVM to de-optimize precompile methods", "BOOLEAN", false, "false"), ++ _help("-help", "Print this help information", "BOOLEAN", false, "false") ++{ ++ _dcmdparser.add_dcmd_option(&_notify_precompile); ++ _dcmdparser.add_dcmd_option(&_check_compile_finished); ++ _dcmdparser.add_dcmd_option(&_deoptimize_compilation); ++ _dcmdparser.add_dcmd_option(&_help); ++} ++ ++int JitProfileCacheDCmds::num_arguments() { ++ ResourceMark rm; ++ JitProfileCacheDCmds* dcmd = new JitProfileCacheDCmds(NULL, false); ++ if (dcmd != NULL) { ++ DCmdMark mark(dcmd); ++ return dcmd->_dcmdparser.num_arguments(); ++ } else { ++ return 0; ++ } ++} ++ ++void JitProfileCacheDCmds::execute(DCmdSource source, TRAPS) { ++ assert(is_init_completed(), "JVM is not fully initialized. Please try it later."); ++ ++ Klass* profilecache_class = SystemDictionary::resolve_or_fail(vmSymbols::com_huawei_jprofilecache_JProfileCache(), true, CHECK); ++ instanceKlassHandle profilecacheClass (THREAD, profilecache_class); ++ if (profilecacheClass->should_be_initialized()) { ++ profilecacheClass->initialize(THREAD); ++ } ++ ++ if (checkAndHandlePendingExceptions(output(), THREAD)) { ++ return; ++ } ++ ++ if (_notify_precompile.value()) { ++ execute_trigger_precompilation(profilecacheClass, output(), THREAD); ++ } else if (_check_compile_finished.value()) { ++ execute_checkCompilation_finished(profilecacheClass, output(), THREAD); ++ } else if (_deoptimize_compilation.value()) { ++ execute_notifyDeopt_profileCache(profilecacheClass, output(), THREAD); ++ } else { ++ print_help_info(); ++ } ++} ++ ++void JitProfileCacheDCmds::execute_trigger_precompilation(instanceKlassHandle profilecacheClass, outputStream* output, Thread* THREAD) { ++ if (!JProfilingCacheCompileAdvance) { ++ output->print_cr("JProfilingCacheCompileAdvance is off, triggerPrecompilation is invalid"); ++ return; ++ } ++ ++ JavaValue result(T_VOID); ++ JavaCalls::call_static(&result, profilecacheClass, vmSymbols::jprofilecache_trigger_precompilation_name(), vmSymbols::void_method_signature(), THREAD); ++ if (checkAndHandlePendingExceptions(output, THREAD)) { ++ return; ++ } ++} ++ ++void JitProfileCacheDCmds::execute_checkCompilation_finished(instanceKlassHandle profilecacheClass, outputStream* output, Thread* THREAD) { ++ if (!JProfilingCacheCompileAdvance) { ++ output->print_cr("JProfilingCacheCompileAdvance is off, checkIfCompilationIsComplete is invalid"); ++ return; ++ } ++ ++ JavaValue result(T_BOOLEAN); ++ JavaCalls::call_static(&result, profilecacheClass, vmSymbols::jprofilecache_check_if_compilation_is_complete_name(), vmSymbols::void_boolean_signature(), THREAD); ++ if (checkAndHandlePendingExceptions(output, THREAD)) { ++ return; ++ } ++ ++ if (result.get_jboolean()) { ++ output->print_cr("Last compilation task has compile finished."); ++ } else { ++ output->print_cr("Last compilation task not compile finish."); ++ } ++} ++ ++void JitProfileCacheDCmds::execute_notifyDeopt_profileCache(instanceKlassHandle profilecacheClass, outputStream* output, Thread* THREAD) { ++ if (!(JProfilingCacheCompileAdvance && CompilationProfileCacheExplicitDeopt)) { ++ output->print_cr("JProfilingCacheCompileAdvance or CompilationProfileCacheExplicitDeopt is off, notifyJVMDeoptProfileCacheMethods is invalid"); ++ return; ++ } ++ ++ JavaValue result(T_VOID); ++ JavaCalls::call_static(&result, profilecacheClass, vmSymbols::jprofilecache_notify_jvm_deopt_profilecache_methods_name(), vmSymbols::void_method_signature(), THREAD); ++ if (checkAndHandlePendingExceptions(output, THREAD)) { ++ return; ++ } ++} ++ ++bool JitProfileCacheDCmds::checkAndHandlePendingExceptions(outputStream* out, Thread* THREAD) { ++ if (HAS_PENDING_EXCEPTION) { ++ java_lang_Throwable::print(PENDING_EXCEPTION, out); ++ CLEAR_PENDING_EXCEPTION; ++ return true; ++ } ++ return false; ++} ++void JitProfileCacheDCmds::print_help_info() { ++ output()->print_cr("The following commands are available:\n" ++ "-notify: %s\n" ++ "-check: %s\n" ++ "-deopt: %s\n" ++ "-help: %s\n", ++ _notify_precompile.description(), _check_compile_finished.description(), _deoptimize_compilation.description(), _help.description()); ++} +diff --git a/hotspot/src/share/vm/jprofilecache/jitProfileCacheDcmds.hpp b/hotspot/src/share/vm/jprofilecache/jitProfileCacheDcmds.hpp +new file mode 100644 +index 000000000..14d1d7012 +--- /dev/null ++++ b/hotspot/src/share/vm/jprofilecache/jitProfileCacheDcmds.hpp +@@ -0,0 +1,52 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#ifndef SHARE_JPROFILECACHE_JITPROFILECACHEDCMDS_HPP ++#define SHARE_JPROFILECACHE_JITPROFILECACHEDCMDS_HPP ++ ++#include "services/diagnosticCommand.hpp" ++ ++class JitProfileCacheDCmds : public DCmdWithParser { ++public: ++ JitProfileCacheDCmds(outputStream* output, bool heap_allocated); ++ static const char* name() { ++ return "JProfilecache"; ++ } ++ static const char* description() { ++ return "JProfilecache command. "; ++ } ++ static int num_arguments(); ++ virtual void execute(DCmdSource source, TRAPS); ++protected: ++ DCmdArgument _notify_precompile; ++ DCmdArgument _check_compile_finished; ++ DCmdArgument _deoptimize_compilation; ++ DCmdArgument _help; ++ void print_help_info(); ++ void execute_trigger_precompilation(instanceKlassHandle profilecacheClass, outputStream* output, Thread* THREAD); ++ void execute_checkCompilation_finished(instanceKlassHandle profilecacheClass, outputStream* output, Thread* THREAD); ++ void execute_notifyDeopt_profileCache(instanceKlassHandle profilecacheClass, outputStream* output, Thread* THREAD); ++ bool checkAndHandlePendingExceptions(outputStream* out, Thread* THREAD); ++}; ++ ++#endif // SHARE_JPROFILECACHE_JITPROFILECACHEDCMDS_HPP +\ No newline at end of file +diff --git a/hotspot/src/share/vm/jprofilecache/jitProfileCacheLog.cpp b/hotspot/src/share/vm/jprofilecache/jitProfileCacheLog.cpp +new file mode 100644 +index 000000000..e86d63e74 +--- /dev/null ++++ b/hotspot/src/share/vm/jprofilecache/jitProfileCacheLog.cpp +@@ -0,0 +1,26 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#include "jprofilecache/jitProfileCacheLog.hpp" ++ ++int LogLevel::LogLevelNum = LogLevel::Info; +diff --git a/hotspot/src/share/vm/jprofilecache/jitProfileCacheLog.hpp b/hotspot/src/share/vm/jprofilecache/jitProfileCacheLog.hpp +new file mode 100644 +index 000000000..174e996b6 +--- /dev/null ++++ b/hotspot/src/share/vm/jprofilecache/jitProfileCacheLog.hpp +@@ -0,0 +1,89 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef SHARE_VM_JPROFILECACHE_JITPROFILECACHELOG_HPP ++#define SHARE_VM_JPROFILECACHE_JITPROFILECACHELOG_HPP ++ ++/* ++ * We use this file to be compatible with log framework in JDK11 ++ * To avoid conflict with jfr log, it's only used by jitProfileCache.cpp ++ */ ++ ++#include "runtime/os.hpp" ++#include "utilities/debug.hpp" ++ ++#ifdef jprofilecache_log_error ++#undef jprofilecache_log_error ++#endif ++#ifdef jprofilecache_log_warning ++#undef jprofilecache_log_warning ++#endif ++#ifdef jprofilecache_log_info ++#undef jprofilecache_log_info ++#endif ++#ifdef jprofilecache_log_debug ++#undef jprofilecache_log_debug ++#endif ++#ifdef jprofilecache_log_trace ++#undef jprofilecache_log_trace ++#endif ++#ifdef jprofilecache_log_enabled ++#undef jprofilecache_log_enabled ++#endif ++ ++#define jprofilecache_log_error(...) (!log_is_enabled(Error, __VA_ARGS__)) ? (void)0 : tty->print_cr ++#define jprofilecache_log_warning(...) (!log_is_enabled(Warning, __VA_ARGS__)) ? (void)0 : tty->print_cr ++#define jprofilecache_log_info(...) (!log_is_enabled(Info, __VA_ARGS__)) ? (void)0 : tty->print_cr ++#define jprofilecache_log_debug(...) (!log_is_enabled(Debug, __VA_ARGS__)) ? (void)0 : tty->print_cr ++#define jprofilecache_log_trace(...) (!log_is_enabled(Trace, __VA_ARGS__)) ? (void)0 : tty->print_cr ++ ++ ++#define log_is_enabled(level, ...) (JitProfileCacheLog::is_level(LogLevel::level)) ++ ++class LogLevel : public AllStatic { ++public: ++ enum type { ++ Trace, ++ Debug, ++ Info, ++ Warning, ++ Error, ++ Off ++ }; ++ static int LogLevelNum; ++}; ++typedef LogLevel::type LogLevelType; ++ ++class JitProfileCacheLog { ++public: ++ static bool is_level(LogLevelType level) { ++ if (level >= LogLevel::LogLevelNum) { ++ return true; ++ } ++ ++ return false; ++ } ++}; ++ ++#endif +\ No newline at end of file +diff --git a/hotspot/src/share/vm/jprofilecache/jitProfileCacheLogParser.cpp b/hotspot/src/share/vm/jprofilecache/jitProfileCacheLogParser.cpp +new file mode 100644 +index 000000000..72e6801c6 +--- /dev/null ++++ b/hotspot/src/share/vm/jprofilecache/jitProfileCacheLogParser.cpp +@@ -0,0 +1,399 @@ ++/* ++* Copyright (c) 2025, Huawei and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "runtime/arguments.hpp" ++#include "runtime/fieldType.hpp" ++#include "runtime/javaCalls.hpp" ++#include "runtime/thread.hpp" ++#include "runtime/atomic.hpp" ++#include "classfile/classLoaderData.inline.hpp" ++#include "classfile/symbolTable.hpp" ++#include "classfile/systemDictionary.hpp" ++#include "compiler/compileBroker.hpp" ++#include "jprofilecache/jitProfileCache.hpp" ++#include "jprofilecache/jitProfileRecord.hpp" ++#include "jitProfileCacheLogParser.hpp" ++#include "jprofilecache/jitProfileCacheLog.hpp" ++#include "libadt/dict.hpp" ++ ++// offset ++#define PROFILECACHE_VERSION_OFFSET 0 ++#define PROFILECACHE_MAGIC_NUMBER_OFFSET 4 ++#define FILE_SIZE_OFFSET 8 ++#define PROFILECACHE_CRC32_OFFSET 12 ++#define APPID_OFFSET 16 ++#define MAX_SYMBOL_LENGTH_OFFSET 20 ++#define RECORD_COUNT_OFFSET 24 ++#define PROFILECACHE_TIME_OFFSET 28 ++ ++#define HEADER_SIZE 36 ++ ++// width section ++#define RECORE_VERSION_WIDTH (PROFILECACHE_MAGIC_NUMBER_OFFSET - PROFILECACHE_VERSION_OFFSET) ++#define RECORE_MAGIC_WIDTH (FILE_SIZE_OFFSET - PROFILECACHE_MAGIC_NUMBER_OFFSET) ++#define FILE_SIZE_WIDTH (PROFILECACHE_CRC32_OFFSET - FILE_SIZE_OFFSET) ++#define RECORE_CRC32_WIDTH (APPID_OFFSET - PROFILECACHE_CRC32_OFFSET) ++#define RECORE_APPID_WIDTH (MAX_SYMBOL_LENGTH_OFFSET - APPID_OFFSET) ++#define RECORE_MAX_SYMBOL_LENGTH_WIDTH (RECORD_COUNT_OFFSET - MAX_SYMBOL_LENGTH_OFFSET) ++#define RECORD_COUNTS_WIDTH (PROFILECACHE_TIME_OFFSET - RECORD_COUNT_OFFSET) ++#define RECORE_TIME_WIDTH (HEADER_SIZE - PROFILECACHE_TIME_OFFSET) ++ ++// value ++#define MAGIC_NUMBER 0xBABA ++#define RECORE_FILE_DEFAULT_NUMBER 0 ++#define RECORE_CRC32_DEFAULT_NUMBER 0 ++ ++#define ARENA_SIZE 128 ++#define READ_U1_INTERVAL 1 ++#define READ_U4_INTERVAL 4 ++#define READ_U8_INTERVAL 8 ++ ++#define JVM_DEFINE_CLASS_PATH "_JVM_DefineClass_" ++ ++JitProfileCacheLogParser::JitProfileCacheLogParser(randomAccessFileStream* fs, JitProfileCacheInfo* holder) ++ : _is_valid(false), ++ _has_parsed_header(false), ++ _position(0), ++ _parsed_method_count(0), ++ _total_recorder_method(0), ++ _file_size(0), ++ _file_stream(fs), ++ _max_symbol_length(0), ++ _parse_str_buf(NULL), ++ _holder(holder), ++ _arena(new (mtInternal) Arena(mtInternal, ARENA_SIZE)) { ++} ++ ++JitProfileCacheLogParser::~JitProfileCacheLogParser() { ++ delete _arena; ++} ++ ++char parse_int_buf[8]; ++u1 JitProfileCacheLogParser::read_u1() { ++ _file_stream->read(parse_int_buf, 1, 1); ++ _position += READ_U1_INTERVAL; ++ return *(u1*)parse_int_buf; ++} ++ ++u4 JitProfileCacheLogParser::read_u4() { ++ _file_stream->read(parse_int_buf, READ_U4_INTERVAL, 1); ++ _position += READ_U4_INTERVAL; ++ return *(u4*)parse_int_buf; ++} ++ ++u8 JitProfileCacheLogParser::read_u8() { ++ _file_stream->read(parse_int_buf, READ_U8_INTERVAL, 1); ++ _position += READ_U8_INTERVAL; ++ return *(u8*)parse_int_buf; ++} ++ ++const char* JitProfileCacheLogParser::read_string() { ++ int current_read_pos = 0; ++ do { ++ _file_stream->read(_parse_str_buf + current_read_pos, 1, 1); ++ current_read_pos++; ++ } while (*(_parse_str_buf + current_read_pos - 1) != '\0' ++ && current_read_pos <= _max_symbol_length + 1); ++ ++ _position += current_read_pos; ++ int actual_string_length = current_read_pos - 1; ++ if (actual_string_length == 0) { ++ jprofilecache_log_warning(profilecache)("[JitProfileCache] WARNING : Parsed empty symbol at position %d\n", _position); ++ return ""; ++ } else if (actual_string_length > max_symbol_length()) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR : The parsed symbol length exceeds %d\n", max_symbol_length()); ++ return NULL; ++ } else { ++ char* parsed_string = NEW_RESOURCE_ARRAY(char, actual_string_length + 1); ++ memcpy(parsed_string, _parse_str_buf, actual_string_length + 1); ++ return parsed_string; ++ } ++} ++ ++#define MAX_COUNT_VALUE (1024 * 1024 * 128) ++ ++bool JitProfileCacheLogParser::logparse_illegal_check(const char* s, bool ret_value, int end_position) { ++ if (_position > end_position) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR : read out of bound, " ++ "file format error"); ++ return ret_value; ++ } ++ if (s == NULL) { ++ _position = end_position; ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR : illegal string in log file"); ++ return ret_value; ++ } ++ return true; ++} ++ ++bool JitProfileCacheLogParser::logparse_illegal_count_check(int cnt, bool ret_value, int end_position) { ++ if (_position > end_position) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR : read out of bound, " ++ "file format error"); ++ return ret_value; ++ } ++ if ((u4)cnt > MAX_COUNT_VALUE) { ++ _position = end_position; ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR : illegal count (" ++ UINT32_FORMAT ") too big", cnt); ++ return ret_value; ++ } ++ return true; ++} ++ ++bool JitProfileCacheLogParser::should_ignore_this_class(Symbol* symbol) { ++ // deal with spring auto-generated ++ ResourceMark rm; ++ char* name = symbol->as_C_string(); ++ const char* CGLIB_SIG = "CGLIB$$"; ++ const char* ACCESSER_SUFFIX = "ConstructorAccess"; ++ if (::strstr(name, CGLIB_SIG) != NULL || ++ ::strstr(name, ACCESSER_SUFFIX) != NULL) { ++ return true; ++ } ++ JitProfileCache* jprofilecache = info_holder()->holder(); ++ SymbolRegexMatcher* matcher = jprofilecache->excluding_matcher(); ++ if (matcher == NULL) { ++ return false; ++ } ++ return matcher->matches(symbol); ++} ++ ++#define SYMBOL_TERMINATOR_SPACE 2 ++ ++bool JitProfileCacheLogParser::parse_header() { ++ int begin_position = _position; ++ int end_position = begin_position + HEADER_SIZE; ++ u4 parse_version = read_u4(); ++ u4 parse_magic_number = read_u4(); ++ u4 parse_file_size = read_u4(); ++ int parse_crc32_recorded = (int)read_u4(); ++ u4 appid = read_u4(); ++ unsigned int version = JitProfileCache::instance()->version(); ++ ++ if (parse_version != version) { ++ _is_valid = false; ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR : Version mismatch, expect %d but %d", version, parse_version); ++ return false; ++ } ++ if (parse_magic_number != MAGIC_NUMBER ++ || (long)parse_file_size != this->file_size()) { ++ _is_valid = false; ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR : illegal header"); ++ return false; ++ } ++ // valid appid ++ if (CompilationProfileCacheAppID != 0 && CompilationProfileCacheAppID != appid) { ++ _is_valid = false; ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR : illegal CompilationProfileCacheAppID"); ++ return false; ++ } ++ // valid crc32 ++ int crc32_actual = JitProfileRecorder::compute_crc32(_file_stream); ++ if (parse_crc32_recorded != crc32_actual) { ++ _is_valid = false; ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR : JitProfile crc32 check failure"); ++ return false; ++ } ++ ++ u4 parse_max_symbol_length = read_u4(); ++ logparse_illegal_count_check(parse_max_symbol_length, false, end_position); ++ _parse_str_buf = (char*)_arena->Amalloc(parse_max_symbol_length + SYMBOL_TERMINATOR_SPACE); ++ _max_symbol_length = (int)parse_max_symbol_length; ++ ++ u4 parse_record_count = read_u4(); ++ logparse_illegal_count_check(parse_record_count, false, end_position); ++ _total_recorder_method = parse_record_count; ++ u4 utc_time = read_u8(); ++ _is_valid = true; ++ return true; ++} ++ ++Symbol* JitProfileCacheLogParser::create_symbol(const char* char_name) { ++ return SymbolTable::new_symbol(char_name, strlen(char_name), Thread::current()); ++} ++ ++bool JitProfileCacheLogParser::parse_class() { ++ ResourceMark rm; ++ int begin_position = _position; ++ u4 section_size = read_u4(); ++ int end_position = begin_position + (int)section_size; ++ u4 parse_cnt = read_u4(); ++ logparse_illegal_count_check(parse_cnt, false, end_position); ++ ++ ProfileCacheClassChain* chain = new ProfileCacheClassChain(parse_cnt); ++ info_holder()->set_chain(chain); ++ chain->set_holder(this->info_holder()); ++ ++ for (int i = 0; i < (int)parse_cnt; i++) { ++ const char* parse_name_char = read_string(); ++ logparse_illegal_check(parse_name_char, false, end_position); ++ const char* parse_loader_char = read_string(); ++ logparse_illegal_check(parse_loader_char, false, end_position); ++ const char* parse_path_char = read_string(); ++ logparse_illegal_check(parse_path_char, false, end_position); ++ Symbol* name = create_symbol(parse_name_char); ++ Symbol* loader_name = create_symbol(parse_loader_char); ++ Symbol* path = create_symbol(parse_path_char); ++ loader_name = JitProfileCacheInfo::remove_meaningless_suffix(loader_name); ++ chain->at(i)->set_class_name(name); ++ chain->at(i)->set_class_loader_name(loader_name); ++ chain->at(i)->set_class_path(path); ++ ++ check_class(i, name, loader_name, path, chain); ++ ++ } // end of for loop ++ ++ // check section size ++ if (_position - begin_position != (int)section_size) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR : JitProfile class parse fail"); ++ return false; ++ } ++ return true; ++} ++ ++void JitProfileCacheLogParser::check_class(int i, Symbol* name, Symbol* loader_name, Symbol* path, ProfileCacheClassChain* chain) { ++ // add to preload class dictionary ++ unsigned int hash_value = name->identity_hash(); ++ ProfileCacheClassEntry* e = info_holder()->jit_profile_cache_dict()-> ++ find_or_create_class_entry(hash_value, name, loader_name, path, i); ++ // e->chain_offset() < i : means same class symbol already existed in the chain ++ // should_ignore_this_class(name): means this class is in skipped list(build-in or user-defined) ++ // so set entry state is skipped, will be ignored in JitProfileCache ++ if (e->chain_offset() < i || should_ignore_this_class(name)) { ++ chain->at(i)->set_skipped(); ++ } else { ++ Symbol* name_no_suffix = JitProfileCacheInfo::remove_meaningless_suffix(name); ++ if (name_no_suffix->fast_compare(name) != 0) { ++ unsigned int hash_no_suffix = name_no_suffix->identity_hash(); ++ ProfileCacheClassEntry* e_no_suffix = info_holder()->jit_profile_cache_dict()-> ++ find_or_create_class_entry(hash_no_suffix, name_no_suffix, loader_name, path, i); ++ if (e_no_suffix->chain_offset() < i) { ++ chain->at(i)->set_skipped(); ++ } ++ } ++ } ++} ++ ++bool JitProfileCacheLogParser::valid() { ++ if(!_has_parsed_header) { ++ parse_header(); ++ } ++ return _is_valid; ++} ++ ++bool JitProfileCacheLogParser::has_next_method_record() { ++ return _parsed_method_count < _total_recorder_method && _position < _file_size; ++} ++ ++ProfileCacheMethodHold* JitProfileCacheLogParser::parse_method() { ++ ResourceMark rm; ++ _file_stream->seek(_position, SEEK_SET); ++ int begin_position = _position; ++ u4 section_size = read_u4(); ++ int end_position = begin_position + section_size; ++ ++ u4 comp_order = read_u4(); ++ u1 compilation_type = read_u1(); ++ if (compilation_type != 0 && compilation_type != 1) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR : illegal compilation type in JitProfile"); ++ _position = end_position; ++ return NULL; ++ } ++ // parse method info ++ const char* parse_method_name_char = read_string(); ++ logparse_illegal_check(parse_method_name_char, NULL, end_position); ++ Symbol* method_name = create_symbol(parse_method_name_char); ++ const char* parse_method_sig_char = read_string(); ++ logparse_illegal_check(parse_method_sig_char, NULL, end_position); ++ Symbol* method_sig = create_symbol(parse_method_sig_char); ++ u4 parse_first_invoke_init_order = read_u4(); ++ ++ if ((int)parse_first_invoke_init_order == INVALID_FIRST_INVOKE_INIT_ORDER) { ++ parse_first_invoke_init_order = this->info_holder()->chain()->length() - 1; ++ } ++ u4 parse_method_size = read_u4(); ++ u4 parse_method_hash = read_u4(); ++ int32_t parse_bci = (int32_t)read_u4(); ++ if (parse_bci != InvocationEntryBci) { ++ logparse_illegal_count_check(parse_bci, NULL, end_position); ++ } ++ ++ // parse class info ++ const char* parse_class_name_char = read_string(); ++ logparse_illegal_check(parse_class_name_char, NULL, end_position); ++ Symbol* class_name = create_symbol(parse_class_name_char); ++ // ignore ++ if (should_ignore_this_class(class_name)) { ++ _position = end_position; ++ return NULL; ++ } ++ const char* parse_class_loader_char = read_string(); ++ logparse_illegal_check(parse_class_loader_char, NULL, end_position); ++ Symbol* class_loader = create_symbol(parse_class_loader_char); ++ class_loader = JitProfileCacheInfo::remove_meaningless_suffix(class_loader); ++ const char* path_char = read_string(); ++ logparse_illegal_check(path_char, NULL, end_position); ++ Symbol* path = create_symbol(path_char); ++ ++ JProfileCacheClassDictionary* dict = this->info_holder()->jit_profile_cache_dict(); ++ unsigned int dict_hash = class_name->identity_hash(); ++ ProfileCacheClassEntry* entry = dict->find_head_entry(dict_hash, class_name); ++ if (entry == NULL) { ++ jprofilecache_log_warning(profilecache)("[JitProfileCache] WARNING : class %s is missed in method parse", parse_class_name_char); ++ _position = end_position; ++ return NULL; ++ } ++ u4 parse_class_size = read_u4(); ++ u4 parse_class_crc32 = read_u4(); ++ u4 parse_class_hash = read_u4(); ++ ++ // method counters info ++ u4 parse_intp_invocation_count = read_u4(); ++ u4 parse_intp_throwout_count = read_u4(); ++ u4 parse_invocation_count = read_u4(); ++ u4 parse_backedge_count = read_u4(); ++ ++ int class_chain_offset = entry->chain_offset(); ++ ProfileCacheClassHolder* holder = entry->find_class_holder(parse_class_size, parse_class_crc32); ++ if (holder == NULL) { ++ holder = new ProfileCacheClassHolder(class_name, class_loader, path, parse_class_size, parse_class_hash, parse_class_crc32); ++ entry->add_class_holder(holder); ++ } ++ ProfileCacheMethodHold* mh = new ProfileCacheMethodHold(method_name, method_sig); ++ mh->set_interpreter_invocation_count(parse_intp_invocation_count); ++ mh->set_interpreter_exception_count(parse_intp_throwout_count); ++ mh->set_invocation_count(parse_invocation_count); ++ mh->set_backage_count(parse_backedge_count); ++ mh->set_method_bci((int)parse_bci); ++ ++ mh->set_method_hash(parse_method_hash); ++ mh->set_method_size(parse_method_size); ++ ++ int method_chain_offset = class_chain_offset; ++ mh->set_mounted_offset(method_chain_offset); ++ this->info_holder()->chain()->add_method_at_index(mh, method_chain_offset); ++ holder->add_method(mh); ++ return mh; ++} +diff --git a/hotspot/src/share/vm/jprofilecache/jitProfileCacheLogParser.hpp b/hotspot/src/share/vm/jprofilecache/jitProfileCacheLogParser.hpp +new file mode 100644 +index 000000000..c7315d5bd +--- /dev/null ++++ b/hotspot/src/share/vm/jprofilecache/jitProfileCacheLogParser.hpp +@@ -0,0 +1,88 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#ifndef LINUX_AARCH64_NORMAL_SERVER_FASTDEBUG_JITPROFILECACHELOGPARSER_H ++#define LINUX_AARCH64_NORMAL_SERVER_FASTDEBUG_JITPROFILECACHELOGPARSER_H ++ ++#include "memory/allocation.hpp" ++ ++// JitProfileCache log parser ++class JitProfileCacheLogParser : CHeapObj { ++public: ++ JitProfileCacheLogParser(randomAccessFileStream* fs, JitProfileCacheInfo* holder); ++ virtual ~JitProfileCacheLogParser(); ++ ++ bool valid(); ++ ++ bool parse_header(); ++ Symbol* create_symbol(const char* char_name); ++ bool parse_class(); ++ ++ void check_class(int i, Symbol* name, Symbol* loader_name, Symbol* path, ProfileCacheClassChain* chain); ++ ++ bool should_ignore_this_class(Symbol* symbol); ++ ++ bool has_next_method_record(); ++ ProfileCacheMethodHold* parse_method(); ++ ++ void increment_parsed_number_count() { _parsed_method_count++; } ++ ++ int parsed_methods() { return _parsed_method_count; } ++ int total_recorder_method() { return _total_recorder_method; } ++ ++ long file_size() { return _file_size; } ++ void set_file_size(long size) { _file_size = size; } ++ ++ int max_symbol_length() { return _max_symbol_length; } ++ ++ JitProfileCacheInfo* info_holder() { return _holder; } ++ void set_info_holder(JitProfileCacheInfo* holder) { _holder = holder; } ++ bool logparse_illegal_check(const char* s, bool ret_value, int end_position); ++ bool logparse_illegal_count_check(int cnt, bool ret_value, int end_position); ++ ++private: ++ // disable default constructor ++ JitProfileCacheLogParser(); ++ ++ bool _is_valid; ++ bool _has_parsed_header; ++ long _file_size; ++ int _position; ++ int _parsed_method_count; ++ int _total_recorder_method; ++ randomAccessFileStream* _file_stream; ++ ++ int _max_symbol_length; ++ char* _parse_str_buf; ++ ++ JitProfileCacheInfo* _holder; ++ Arena* _arena; ++ ++ u1 read_u1(); ++ u4 read_u4(); ++ u8 read_u8(); ++ const char* read_string(); ++}; ++ ++ ++#endif // LINUX_AARCH64_NORMAL_SERVER_FASTDEBUG_JITPROFILECACHELOGPARSER_H +\ No newline at end of file +diff --git a/hotspot/src/share/vm/jprofilecache/jitProfileCacheThread.cpp b/hotspot/src/share/vm/jprofilecache/jitProfileCacheThread.cpp +new file mode 100644 +index 000000000..3ee3aa89f +--- /dev/null ++++ b/hotspot/src/share/vm/jprofilecache/jitProfileCacheThread.cpp +@@ -0,0 +1,79 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#include "precompiled.hpp" ++ ++#include "code/codeCache.hpp" ++#include "jprofilecache/jitProfileCache.hpp" ++#include "jprofilecache/jitProfileCacheThread.hpp" ++#include "runtime/java.hpp" ++#include "runtime/mutex.hpp" ++#include "runtime/mutexLocker.hpp" ++#include "runtime/orderAccess.hpp" ++ ++JitProfileCacheThread* JitProfileCacheThread::_jprofilecache_thread = NULL; ++ ++JitProfileCacheThread::JitProfileCacheThread(unsigned int sec) : NamedThread() { ++ set_name("JitProfileCache Flush Thread"); ++ set_interval_seconds(sec); ++ if (os::create_thread(this, os::vm_thread)) { ++ os::set_priority(this, MaxPriority); ++ } else { ++ tty->print_cr("[JitProfileCache] ERROR : failed to create JitProfileCacheThread"); ++ vm_exit(-1); ++ } ++} ++ ++JitProfileCacheThread::~JitProfileCacheThread() { ++ // do nothing ++} ++ ++#define MILLISECONDS_PER_SECOND 1000 ++ ++void JitProfileCacheThread::run() { ++ assert(_jprofilecache_thread == this, "sanity check"); ++ this->record_stack_base_and_size(); ++ this->_is_active = true; ++ os::sleep(this, MILLISECONDS_PER_SECOND * interval_seconds(), false); ++ JitProfileCache::instance()->flush_recorder(); ++ { ++ MutexLockerEx mu(JitProfileCachePrint_lock); ++ _jprofilecache_thread = NULL; ++ } ++} ++ ++void JitProfileCacheThread::launch_with_delay(unsigned int sec) { ++ JitProfileCacheThread* t = new JitProfileCacheThread(sec); ++ _jprofilecache_thread = t; ++ Thread::start(t); ++} ++ ++void JitProfileCacheThread::print_jit_profile_cache_thread_info_on(outputStream* st) { ++ MutexLockerEx mu(JitProfileCachePrint_lock); ++ if (_jprofilecache_thread == NULL || !_jprofilecache_thread->is_active()) { ++ return; ++ } ++ st->print("\"%s\" ", _jprofilecache_thread->name()); ++ _jprofilecache_thread->print_on(st); ++ st->cr(); ++} +diff --git a/hotspot/src/share/vm/jprofilecache/jitProfileCacheThread.hpp b/hotspot/src/share/vm/jprofilecache/jitProfileCacheThread.hpp +new file mode 100644 +index 000000000..4291e23d8 +--- /dev/null ++++ b/hotspot/src/share/vm/jprofilecache/jitProfileCacheThread.hpp +@@ -0,0 +1,55 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#ifndef SHARE_VM_JPROFILECACHE_JITPROFILECACHETHREAD_HPP ++#define SHARE_VM_JPROFILECACHE_JITPROFILECACHETHREAD_HPP ++ ++#include "runtime/thread.hpp" ++ ++class JitProfileCacheThread : public NamedThread { ++public: ++ virtual void run(); ++ ++ unsigned int interval_seconds() { return _interval_seconds; } ++ ++ void set_interval_seconds(unsigned int sec) { _interval_seconds = sec; } ++ ++ bool is_active() { return _is_active; } ++ ++ static void launch_with_delay(unsigned int sec); ++ ++ static void print_jit_profile_cache_thread_info_on(outputStream* st); ++ ++protected: ++ JitProfileCacheThread(unsigned int sec); ++ virtual ~JitProfileCacheThread(); ++ ++ ++private: ++ unsigned int _interval_seconds; ++ volatile bool _is_active; ++ ++ static JitProfileCacheThread* _jprofilecache_thread; ++}; ++ ++#endif //SHARE_VM_JPROFILECACHE_JITPROFILECACHETHREAD_HPP +\ No newline at end of file +diff --git a/hotspot/src/share/vm/jprofilecache/jitProfileRecord.cpp b/hotspot/src/share/vm/jprofilecache/jitProfileRecord.cpp +new file mode 100644 +index 000000000..e2bfe19d8 +--- /dev/null ++++ b/hotspot/src/share/vm/jprofilecache/jitProfileRecord.cpp +@@ -0,0 +1,597 @@ ++/* ++* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++* ++ */ ++ ++#include "jprofilecache/jitProfileRecord.hpp" ++#include "jprofilecache/jitProfileCache.hpp" ++#include "jprofilecache/jitProfileCacheLog.hpp" ++#include "runtime/arguments.hpp" ++#include "runtime/globals.hpp" ++#include "classfile/classLoaderData.hpp" ++#include "classfile/classLoaderData.inline.hpp" ++#include "jprofilecache/jitProfileCacheThread.hpp" ++ ++// define offset ++#define PROFILECACHE_VERSION_OFFSET 0 ++#define PROFILECACHE_MAGIC_NUMBER_OFFSET 4 ++#define FILE_SIZE_OFFSET 8 ++#define PROFILECACHE_CRC32_OFFSET 12 ++#define APPID_OFFSET 16 ++#define MAX_SYMBOL_LENGTH_OFFSET 20 ++#define RECORD_COUNT_OFFSET 24 ++#define PROFILECACHE_TIME_OFFSET 28 ++ ++#define HEADER_SIZE 36 ++ ++// define width ++#define RECORE_VERSION_WIDTH (PROFILECACHE_MAGIC_NUMBER_OFFSET - PROFILECACHE_VERSION_OFFSET) ++#define RECORE_MAGIC_WIDTH (FILE_SIZE_OFFSET - PROFILECACHE_MAGIC_NUMBER_OFFSET) ++#define FILE_SIZE_WIDTH (PROFILECACHE_CRC32_OFFSET - FILE_SIZE_OFFSET) ++#define RECORE_CRC32_WIDTH (APPID_OFFSET - PROFILECACHE_CRC32_OFFSET) ++#define RECORE_APPID_WIDTH (MAX_SYMBOL_LENGTH_OFFSET - APPID_OFFSET) ++#define RECORE_MAX_SYMBOL_LENGTH_WIDTH (RECORD_COUNT_OFFSET - MAX_SYMBOL_LENGTH_OFFSET) ++#define RECORD_COUNTS_WIDTH (PROFILECACHE_TIME_OFFSET - RECORD_COUNT_OFFSET) ++#define RECORE_TIME_WIDTH (HEADER_SIZE - PROFILECACHE_TIME_OFFSET) ++ ++// default value ++#define MAGIC_NUMBER 0xBABA ++#define RECORE_FILE_DEFAULT_NUMBER 0 ++#define RECORE_CRC32_DEFAULT_NUMBER 0 ++ ++#define JVM_DEFINE_CLASS_PATH "_JVM_DefineClass_" ++ ++JitProfileRecorder::JitProfileRecorder() ++ : _holder(NULL), ++ _profilelog(NULL), ++ _pos(0), ++ _recorder_state(NOT_INIT), ++ _class_init_list(NULL), ++ _init_list_tail_node(NULL), ++ _profile_record_dict(NULL), ++ _class_init_order_num(-1), ++ _flushed(false), ++ _record_file_name(NULL), ++ _max_symbol_length(0) { ++} ++ ++JitProfileRecorder::~JitProfileRecorder() { ++ if (!ProfilingCacheFile) { ++ os::free((void*)logfile_name()); ++ } ++ delete _class_init_list; ++} ++ ++#define PROFILE_RECORDER_HT_SIZE 10240 ++ ++void JitProfileRecorder::set_logfile_name(const char* name) { ++ _record_file_name = make_log_name(name, NULL); ++} ++ ++#define PROFILECACHE_PID_BUFFER_SIZE 100 ++#define RECORD_MIN_LIMIT 0 ++#define RECORD_MAX_LINIT 3 ++ ++ ++void JitProfileRecorder::init() { ++ assert(_recorder_state == NOT_INIT, "JitProfileRecorder state error"); ++ ++ // check param ++ if (!param_check()) { ++ return; ++ } ++ ++ // log file name ++ if (ProfilingCacheFile == NULL) { ++ char* buf = (char*)os::malloc(100, mtInternal); ++ char fmt[] = "jprofilecache_%p.profile"; ++ Arguments::copy_expand_pid(fmt, sizeof(fmt), buf, PROFILECACHE_PID_BUFFER_SIZE); ++ _record_file_name = buf; ++ } else { ++ set_logfile_name(ProfilingCacheFile); ++ } ++ ++ _class_init_list = new (ResourceObj::C_HEAP, mtInternal) LinkedListImpl(); ++ _profile_record_dict = new JitProfileRecordDictionary(PROFILE_RECORDER_HT_SIZE); ++ _recorder_state = IS_OK; ++ ++ jprofilecache_log_debug(profilecache)("[JitProfileCache] DEBUG begin to collect, log file is %s", logfile_name()); ++} ++ ++bool JitProfileRecorder::param_check() { ++ if (JProfilingCacheCompileAdvance) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR: JProfilingCacheCompileAdvance and JProfilingCacheRecording cannot be enabled at the same time"); ++ _recorder_state = IS_ERR; ++ return false; ++ } ++ if (!ProfileInterpreter) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR: ProfileInterpreter must be enable"); ++ _recorder_state = IS_ERR; ++ return false; ++ } ++ // disable class unloading ++ if (ClassUnloading) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR: ClassUnloading must be disable"); ++ _recorder_state = IS_ERR; ++ return false; ++ } ++ if (UseConcMarkSweepGC) { ++ if (FLAG_IS_DEFAULT(CMSClassUnloadingEnabled)) { ++ FLAG_SET_DEFAULT(CMSClassUnloadingEnabled, false); ++ } ++ if (CMSClassUnloadingEnabled) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR: if use CMS gc, CMSClassUnloadingEnabled must be disabled"); ++ _recorder_state = IS_ERR; ++ return false; ++ } ++ } ++ if (UseG1GC) { ++ if (FLAG_IS_DEFAULT(ClassUnloadingWithConcurrentMark)) { ++ FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false); ++ } ++ if (ClassUnloadingWithConcurrentMark) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR: if use G1 gc, ClassUnloadingWithConcurrentMark must be disabled"); ++ _recorder_state = IS_ERR; ++ return false; ++ } ++ } ++ // check class data sharing ++ if (UseSharedSpaces) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR: UseSharedSpaces must be disabled"); ++ _recorder_state = IS_ERR; ++ return false; ++ } ++ // check CompilationProfileCacheRecordMinLevel ++ if (CompilationProfileCacheRecordMinLevel < RECORD_MIN_LIMIT || CompilationProfileCacheRecordMinLevel > RECORD_MAX_LINIT) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR: CompilationProfileCacheRecordMinLevel is invalid must be in the range: [0-3]."); ++ _recorder_state = IS_ERR; ++ return false; ++ } ++ ++ if (Arguments::mode() == Arguments::_int) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR: when enable JProfilingCacheRecording, should not set -Xint"); ++ _recorder_state = IS_ERR; ++ return false; ++ } ++ return true; ++} ++ ++int JitProfileRecorder::assign_class_init_order(InstanceKlass* klass) { ++ // ignore anonymous class ++ if (klass->is_anonymous()) { ++ return -1; ++ } ++ Symbol* record_name = klass->name(); ++ Symbol* record_path = klass->source_file_path(); ++ Symbol* record_loader_name = JitProfileCache::get_class_loader_name(klass->class_loader_data()); ++ if (record_name == NULL || record_name->utf8_length() == 0) { ++ return -1; ++ } ++ MutexLockerEx mu(JitProfileRecorder_lock); ++ if (_init_list_tail_node == NULL) { ++ _class_init_list->add(ClassSymbolEntry(record_name, record_loader_name, record_path)); ++ _init_list_tail_node = _class_init_list->head(); ++ } else { ++ _class_init_list->insert_after(ClassSymbolEntry(record_name, record_loader_name, record_path), ++ _init_list_tail_node); ++ _init_list_tail_node = _init_list_tail_node->next(); ++ } ++ _class_init_order_num++; ++#ifndef PRODUCT ++ klass->set_initialize_order(_class_init_order_num); ++#endif ++ return _class_init_order_num; ++} ++ ++void JitProfileRecorder::add_method(Method* method, int method_bci) { ++ MutexLockerEx mu(JitProfileRecorder_lock, Mutex::_no_safepoint_check_flag); ++ // if is flushed, stop adding method ++ if (is_flushed()) { ++ return; ++ } ++ // not deal with OSR Compilation ++ if (method_bci != InvocationEntryBci) { ++ return; ++ } ++ assert(is_valid(), "JProfileCache state must be OK"); ++ unsigned int hash = compute_hash(method); ++ dict()->add_method(hash, method, method_bci); ++} ++ ++void JitProfileRecorder::update_max_symbol_length(int len) { ++ if (len > _max_symbol_length) { ++ _max_symbol_length = len; ++ } ++} ++ ++JitProfileRecordDictionary::JitProfileRecordDictionary(unsigned int size) ++ : Hashtable(size, sizeof(JitProfileRecorderEntry)), ++ _count(0) { ++ // do nothing ++} ++ ++JitProfileRecordDictionary::~JitProfileRecordDictionary() { ++ // free allocate memory ++ for (int index = 0; index < table_size(); ++index) { ++ for (JitProfileRecorderEntry* e = bucket(index); e != NULL;) { ++ JitProfileRecorderEntry* to_remove = e; ++ // read next before freeing. ++ e = e->next(); ++ unlink_entry(to_remove); ++ to_remove->free_allocate(); ++ FREE_C_HEAP_ARRAY(char, to_remove, mtInternal); ++ } ++ } ++ assert(number_of_entries() == 0, "should have removed all entries"); ++ free_buckets(); ++ for (BasicHashtableEntry* e = new_entry_free_list(); e != NULL; e = new_entry_free_list()) { ++ ((JitProfileRecorderEntry*)e)->free_allocate(); ++ FREE_C_HEAP_ARRAY(char, e, mtInternal); ++ } ++} ++ ++JitProfileRecorderEntry* JitProfileRecordDictionary::new_entry(unsigned int hash, Method* method) { ++ JitProfileRecorderEntry* entry = (JitProfileRecorderEntry*) new_entry_free_list(); ++ if (entry == NULL) { ++ entry = (JitProfileRecorderEntry*) NEW_C_HEAP_ARRAY2(char, entry_size(), mtInternal, CURRENT_PC); ++ } ++ entry->set_next(NULL); ++ entry->set_hash(hash); ++ entry->set_literal(method); ++ entry->init(); ++ return entry; ++} ++ ++JitProfileRecorderEntry* JitProfileRecordDictionary::add_method(unsigned int method_hash, Method* method, int bci) { ++ assert_lock_strong(JitProfileRecorder_lock); ++ int target_bucket = hash_to_index(method_hash); ++ JitProfileRecorderEntry* record_entry = find_entry(method_hash, method); ++ if (record_entry != NULL) { ++ return record_entry; ++ } ++ // add method entry ++ record_entry = new_entry(method_hash, method); ++ record_entry->set_bci(bci); ++ record_entry->set_order(count()); ++ ++ ConstMethod *const_method = method->constMethod(); ++ MethodCounters *method_counters = method->method_counters(); ++ InstanceKlass *klass = const_method->constants()->pool_holder(); ++ ++ // record method info ++ char *record_method_name = method->name()->as_C_string(); ++ char *method_name = (char*) malloc(strlen(record_method_name) + 1); ++ memcpy(method_name, record_method_name, strlen(record_method_name) + 1); ++ record_entry->set_method_name(method_name); ++ ++ char *record_method_sig = method->signature()->as_C_string(); ++ char *method_sig = (char*) malloc(strlen(record_method_sig) + 1); ++ memcpy(method_sig, record_method_sig, strlen(record_method_sig) + 1); ++ record_entry->set_method_sig(method_sig); ++ // first invoke init order ++ record_entry->set_first_invoke_init_order((u4) method->first_invoke_init_order()); ++ // bytecode size ++ record_entry->set_method_code_size((u4) const_method->code_size()); ++ ++#ifdef _LP64 ++ int record_method_hash = compute_universal_hash((char*)(const_method->code_base()), const_method->code_size()); ++ record_entry->set_method_hash((u4) record_method_hash); ++#endif ++ ++ record_entry->set_method_bci((u4) bci); ++ ++ // record class info ++ char *record_class_name = klass->name()->as_C_string(); ++ char *class_name = (char*) malloc(strlen(record_class_name) + 1); ++ memcpy(class_name, record_class_name, strlen(record_class_name) + 1); ++ Symbol *record_path_sym = klass->source_file_path(); ++ const char *record_path = NULL; ++ if (record_path_sym != NULL) { ++ record_path = record_path_sym->as_C_string(); ++ char *class_path = (char*) malloc(strlen(record_path) + 1); ++ memcpy(class_path, record_path, strlen(record_path) + 1); ++ record_entry->set_class_path(class_path); ++ } else { ++ record_path = JVM_DEFINE_CLASS_PATH; ++ char *class_path = (char*) malloc(strlen(record_path) + 1); ++ memcpy(class_path, record_path, strlen(record_path) + 1); ++ record_entry->set_class_path(class_path); ++ } ++ oop record_class_loader = klass->class_loader(); ++ const char *record_loader_name = NULL; ++ if (record_class_loader != NULL) { ++ record_loader_name = record_class_loader->klass()->name()->as_C_string(); ++ char *class_load_name = (char*) malloc(strlen(record_loader_name) + 1); ++ memcpy(class_load_name, record_loader_name, strlen(record_loader_name) + 1); ++ record_entry->set_class_loader_name(class_load_name); ++ } else { ++ record_loader_name = "NULL"; ++ char *class_load_name = (char*) malloc(strlen(record_loader_name) + 1); ++ memcpy(class_load_name, record_loader_name, strlen(record_loader_name) + 1); ++ record_entry->set_class_loader_name(class_load_name); ++ } ++ record_entry->set_class_name(class_name); ++ record_entry->set_class_bytes_size((u4)klass->bytes_size()); ++ record_entry->set_class_crc32((u4)klass->crc32()); ++ record_entry->set_class_number((u4)0x00); ++ ++ // record method counters ++ if (method_counters != NULL) { ++ record_entry->set_interpreter_invocation_count((u4)method_counters->interpreter_invocation_count()); ++ record_entry->set_interpreter_throwout_count((u4)method_counters->interpreter_throwout_count()); ++ record_entry->set_invocation_counter((u4)method_counters->invocation_counter()->raw_counter()); ++ record_entry->set_backedge_counter((u4)method_counters->backedge_counter()->raw_counter()); ++ } else { ++ jprofilecache_log_warning(profilecache)("[JitProfileCache] WARNING : the method counter is NULL"); ++ record_entry->set_interpreter_invocation_count((u4)0); ++ record_entry->set_interpreter_throwout_count((u4)0); ++ record_entry->set_invocation_counter((u4)0); ++ record_entry->set_backedge_counter((u4)0); ++ } ++ ++ add_entry(target_bucket, record_entry); ++ _count++; ++ return record_entry; ++} ++ ++JitProfileRecorderEntry* JitProfileRecordDictionary::find_entry(unsigned int hash, Method* method) { ++ int index = hash_to_index(hash); ++ for (JitProfileRecorderEntry* p = bucket(index); p != NULL; p = p->next()) { ++ if (p->literal() == method) { ++ return p; ++ } ++ } ++ return NULL; ++} ++ ++void JitProfileRecordDictionary::free_entry(JitProfileRecorderEntry* entry) { ++ Hashtable::free_entry(entry); ++} ++ ++#define WRITE_U1_INTERVAL 1 ++#define WRITE_U4_INTERVAL 4 ++#define OVERWRITE_U4_INTERVAL 4 ++ ++static char record_buf[12]; ++void JitProfileRecorder::write_u1(u1 value) { ++ *(u1*)record_buf = value; ++ _profilelog->write(record_buf, WRITE_U1_INTERVAL); ++ _pos += WRITE_U1_INTERVAL; ++} ++ ++void JitProfileRecorder::write_u4(u4 value) { ++ *(u4*)record_buf = value; ++ _profilelog->write(record_buf, WRITE_U4_INTERVAL); ++ _pos += WRITE_U4_INTERVAL; ++} ++ ++void JitProfileRecorder::overwrite_u4(u4 value, unsigned int offset) { ++ *(u4*)record_buf = value; ++ _profilelog->write(record_buf, OVERWRITE_U4_INTERVAL, offset); ++} ++ ++void JitProfileRecorder::write_string(const char* src, size_t len) { ++ assert(src != NULL && len != 0, "empty string is not allowed"); ++ _profilelog->write(src, len); ++ _profilelog->write("\0", 1); ++ _pos += len + 1; ++ update_max_symbol_length((int)len); ++} ++ ++#define JVM_DEFINE_CLASS_PATH "_JVM_DefineClass_" ++ ++#define CRC32_BUF_SIZE 1024 ++static char crc32_buf[CRC32_BUF_SIZE]; ++ ++int JitProfileRecorder::compute_crc32(randomAccessFileStream* fileStream) { ++ long old_position = (long)fileStream->tell(); ++ fileStream->seek(HEADER_SIZE, SEEK_SET); ++ int content_size = fileStream->fileSize() - HEADER_SIZE; ++ assert(content_size > 0, "sanity check"); ++ int loops = content_size / CRC32_BUF_SIZE; ++ int partial_chunk_size = content_size % CRC32_BUF_SIZE; ++ int crc = 0; ++ ++ for (int i = 0; i < loops; ++i) { ++ fileStream->read(crc32_buf, CRC32_BUF_SIZE, 1); ++ crc = ClassLoader::crc32(crc, crc32_buf, CRC32_BUF_SIZE); ++ } ++ if (partial_chunk_size > 0) { ++ fileStream->read(crc32_buf, partial_chunk_size, 1); ++ crc = ClassLoader::crc32(crc, crc32_buf, partial_chunk_size); ++ } ++ fileStream->seek(old_position, SEEK_SET); ++ ++ return crc; ++} ++#undef CRC32_BUF_SIZE ++ ++static char header_buf[HEADER_SIZE]; ++void JitProfileRecorder::write_profilecache_header() { ++ assert(_profilelog->is_open(), ""); ++ ++ size_t offset = 0; ++ ++ *(unsigned int*)header_buf = holder()->version(); ++ _pos += RECORE_VERSION_WIDTH; ++ offset += RECORE_VERSION_WIDTH; ++ ++ *(unsigned int*)((char*)header_buf + offset) = MAGIC_NUMBER; ++ _pos += RECORE_MAGIC_WIDTH; ++ offset += RECORE_MAGIC_WIDTH; ++ ++ *(unsigned int*)((char*)header_buf + offset) = RECORE_FILE_DEFAULT_NUMBER; ++ _pos += RECORE_CRC32_WIDTH; ++ offset += RECORE_CRC32_WIDTH; ++ ++ *(unsigned int*)((char*)header_buf + offset) = RECORE_CRC32_DEFAULT_NUMBER; ++ _pos += RECORE_CRC32_WIDTH; ++ offset += RECORE_CRC32_WIDTH; ++ ++ ++ *(unsigned int*)((char*)header_buf + offset) = CompilationProfileCacheAppID; ++ _pos += RECORE_APPID_WIDTH; ++ offset += RECORE_APPID_WIDTH; ++ ++ ++ *(unsigned int*)((char*)header_buf + offset) = 0; ++ _pos += RECORE_MAX_SYMBOL_LENGTH_WIDTH; ++ offset += RECORE_MAX_SYMBOL_LENGTH_WIDTH; ++ ++ ++ *(unsigned int*)((char*)header_buf + offset) = recorded_count(); ++ _pos += RECORD_COUNTS_WIDTH; ++ offset += RECORD_COUNTS_WIDTH; ++ ++ *(unsigned jlong*)((char*)header_buf + offset) = os::javaTimeMillis(); ++ _pos += RECORE_TIME_WIDTH; ++ offset += RECORE_TIME_WIDTH; ++ ++ _profilelog->write(header_buf, offset); ++} ++ ++void JitProfileRecorder::write_inited_class() { ++ assert(_profilelog->is_open(), "log file must be opened"); ++ ResourceMark rm; ++ unsigned int begin_position = _pos; ++ unsigned int size_anchor = begin_position; ++ ++ write_u4((u4)MAGIC_NUMBER); ++ write_u4((u4)class_init_count()); ++ ++ int cnt = 0; ++ const LinkedListNode* node = class_init_list()->head(); ++ while (node != NULL) { ++ const ClassSymbolEntry* record_entry = node->peek(); ++ char* record_class_name = record_entry->class_name()->as_C_string(); ++ const char* record_class_loader_name = NULL; ++ if (record_entry->class_loader_name() == NULL) { ++ record_class_loader_name = "NULL"; ++ } else { ++ record_class_loader_name = record_entry->class_loader_name()->as_C_string(); ++ } ++ const char* path = NULL; ++ if (record_entry->path() == NULL) { ++ path = JVM_DEFINE_CLASS_PATH; ++ } else { ++ path = record_entry->path()->as_C_string(); ++ } ++ write_string(record_class_name, strlen(record_class_name)); ++ write_string(record_class_loader_name, strlen(record_class_loader_name)); ++ write_string(path, strlen(path)); ++ node = node->next(); ++ cnt++; ++ } ++ assert(cnt == class_init_count(), "error happened in profile info record"); ++ unsigned int end_position = _pos; ++ unsigned int section_size = end_position - begin_position; ++ overwrite_u4(section_size, size_anchor); ++} ++ ++void JitProfileRecorder::write_profilecache_record(JitProfileRecorderEntry* entry, int bci, int order) { ++ ResourceMark rm; ++ unsigned int begin_position = _pos; ++ unsigned int total_size = 0; ++ ++ unsigned int size_anchor = begin_position; ++ write_u4((u4)MAGIC_NUMBER); ++ write_u4((u4)order); ++ ++ // record compilation type ++ u1 compilation_type = bci == -1 ? 0 : 1; ++ write_u1(compilation_type); ++ ++ // record method info ++ record_method_info(entry, bci); ++ ++ // record class info ++ record_class_info(entry); ++ ++ write_u4(entry->get_interpreter_invocation_count()); ++ write_u4(entry->get_interpreter_throwout_count()); ++ write_u4(entry->get_invocation_counter()); ++ write_u4(entry->get_backedge_counter()); ++ ++ ++ unsigned int end_position = _pos; ++ unsigned int section_size = end_position - begin_position; ++ overwrite_u4(section_size, size_anchor); ++} ++ ++void JitProfileRecorder::record_class_info(JitProfileRecorderEntry* entry) { ++ const char* record_class_name = entry->get_class_name(); ++ const char* loader_name = entry->get_class_loader_name(); ++ const char* record_path = entry->get_class_path(); ++ ++ write_string(record_class_name, strlen(record_class_name)); ++ write_string(loader_name, strlen(loader_name)); ++ write_string(record_path, strlen(record_path)); ++ write_u4((u4)entry->get_class_bytes_size()); ++ write_u4((u4)entry->get_class_crc32()); ++ write_u4((u4)0x00); ++} ++ ++void JitProfileRecorder::record_method_info(JitProfileRecorderEntry* entry, int bci) { ++ const char* record_method_name = entry->get_method_name(); ++ write_string(record_method_name, strlen(record_method_name)); ++ const char* record_method_sig = entry->get_method_sig(); ++ write_string(record_method_sig, strlen(record_method_sig)); ++ // first invoke init order ++ write_u4((u4) entry->get_first_invoke_init_order()); ++ // bytecode size ++ write_u4((u4) entry->get_method_code_size()); ++ write_u4((u4) entry->get_method_hash()); ++ write_u4((u4) entry->get_method_bci()); ++} ++ ++void JitProfileRecorder::write_profilecache_footer() { ++} ++ ++void JitProfileRecorder::flush_record() { ++ MutexLockerEx mu(JitProfileRecorder_lock); ++ if (!is_valid() || is_flushed()) { ++ return; ++ } ++ set_flushed(true); ++ ++ // open randomAccessFileStream ++ _profilelog = new (ResourceObj::C_HEAP, mtInternal) randomAccessFileStream(logfile_name(), "wb+"); ++ if (_profilelog == NULL || !_profilelog->is_open()) { ++ jprofilecache_log_error(profilecache)("[JitProfileCache] ERROR : open log file fail! path is %s", logfile_name()); ++ _recorder_state = IS_ERR; ++ return; ++ } ++ ++ // head section ++ write_profilecache_header(); ++ // write class init section ++ write_inited_class(); ++ // write method profile info ++ for (int index = 0; index < dict()->table_size(); index++) { ++ for (JitProfileRecorderEntry* entry = dict()->bucket(index); ++ entry != NULL; ++ entry = entry->next()) { ++ write_profilecache_record(entry, entry->bci(), entry->order()); ++ } ++ } ++ // foot section ++ write_profilecache_footer(); ++ ++ // set file size ++ overwrite_u4((u4)_pos, FILE_SIZE_OFFSET); ++ // set max symbol length ++ overwrite_u4((u4)_max_symbol_length, MAX_SYMBOL_LENGTH_OFFSET); ++ // compute and set file's crc32 ++ int crc32 = JitProfileRecorder::compute_crc32(_profilelog); ++ overwrite_u4((u4)crc32, PROFILECACHE_CRC32_OFFSET); ++ ++ _profilelog->flush(); ++ // close fd ++ delete _profilelog; ++ _profilelog = NULL; ++ ++ delete _profile_record_dict; ++ _profile_record_dict = NULL; ++ ++ jprofilecache_log_info(profilecache)("[JitProfileCache] Profile information output completed. File: %s", logfile_name() == NULL ? "NULL" : logfile_name()); ++} +\ No newline at end of file +diff --git a/hotspot/src/share/vm/jprofilecache/jitProfileRecord.hpp b/hotspot/src/share/vm/jprofilecache/jitProfileRecord.hpp +new file mode 100644 +index 000000000..66f107dc5 +--- /dev/null ++++ b/hotspot/src/share/vm/jprofilecache/jitProfileRecord.hpp +@@ -0,0 +1,282 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. ++ * ++ */ ++ ++#ifndef SHARED_VM_JPROFILECACHE_JITPROFILERECORD_HPP ++#define SHARED_VM_JPROFILECACHE_JITPROFILERECORD_HPP ++ ++#include "jprofilecache/jitProfileCache.hpp" ++ ++class JitProfileCache; ++ ++class JitProfileRecorderEntry : public HashtableEntry { ++public: ++ JitProfileRecorderEntry() { } ++ void free_allocate() { ++ if (_method_name != NULL) { ++ free((void*)_method_name); ++ _method_name = NULL; ++ } ++ if (_method_sig != NULL) { ++ free((void*)_method_sig); ++ _method_sig = NULL; ++ } ++ if (_class_name != NULL) { ++ free((void*)_class_name); ++ _class_name = NULL; ++ } ++ if (_class_loader_name != NULL) { ++ free((void*)_class_loader_name); ++ _class_loader_name = NULL; ++ } ++ if (_class_path != NULL) { ++ free((void*)_class_path); ++ _class_path = NULL; ++ } ++ } ++ ++ void init() { ++ _bci = InvocationEntryBci; ++ _magic_number = 0; ++ _compilation_type = 0; ++ _method_name = NULL; ++ _method_sig = NULL; ++ _first_invoke_init_order = 0; ++ _method_code_size = 0; ++ _method_hash = 0; ++ _method_bci = 0; ++ _class_name = NULL; ++ _class_loader_name = NULL; ++ _class_path = NULL; ++ _class_bytes_size = 0; ++ _class_crc32 = 0; ++ _class_number = 0; ++ _interpreter_invocation_count = 0; ++ _interpreter_throwout_count = 0; ++ _invocation_counter = 0; ++ _backedge_counter = 0; ++ } ++ ++ void set_bci(int bci) { _bci = bci; } ++ int bci() { return _bci; } ++ ++ void set_order(int order) { _order = order; } ++ int order() { return _order; } ++ ++ JitProfileRecorderEntry* next() { ++ return (JitProfileRecorderEntry*)HashtableEntry::next(); ++ } ++ ++ u4 get_magic_number(){ return _magic_number;} ++ u1 get_compilation_type(){ return _compilation_type;} ++ const char* get_method_name(){ return _method_name;} ++ const char* get_method_sig(){ return _method_sig;} ++ u4 get_first_invoke_init_order(){ return _first_invoke_init_order;} ++ u4 get_method_code_size(){ return _method_code_size;} ++ u4 get_method_hash(){ return _method_hash;} ++ u4 get_method_bci(){ return _method_bci;} ++ u4 get_class_bytes_size(){ return _class_bytes_size;} ++ u4 get_class_crc32(){ return _class_crc32;} ++ u4 get_class_number(){ return _class_number;} ++ u4 get_interpreter_invocation_count(){ return _interpreter_invocation_count;} ++ u4 get_interpreter_throwout_count(){ return _interpreter_throwout_count;} ++ u4 get_invocation_counter(){ return _invocation_counter;} ++ u4 get_backedge_counter(){ return _backedge_counter;} ++ const char* get_class_name(){ return _class_name;} ++ const char* get_class_loader_name(){ return _class_loader_name;} ++ const char* get_class_path(){ return _class_path;} ++ ++ void set_magic_number(u4 magic_number) { _magic_number = magic_number; } ++ void set_compilation_type(u1 compilation_type) { _compilation_type = compilation_type; } ++ void set_method_name(const char* method_name) { _method_name = method_name; } ++ void set_method_sig(const char* method_sig) { _method_sig = method_sig; } ++ void set_first_invoke_init_order(u4 first_invoke_init_order) { _first_invoke_init_order = first_invoke_init_order; } ++ void set_method_code_size(u4 method_code_size) { _method_code_size = method_code_size; } ++ void set_method_hash(u4 method_hash) { _method_hash = method_hash; } ++ void set_method_bci(u4 method_bci) { _method_bci = method_bci; } ++ void set_class_bytes_size(u4 class_bytes_size) { _class_bytes_size = class_bytes_size; } ++ void set_class_crc32(u4 class_crc32) { _class_crc32 = class_crc32; } ++ void set_class_number(u4 class_number) { _class_number = class_number; } ++ void set_interpreter_invocation_count(u4 interpreter_invocation_count) { _interpreter_invocation_count = interpreter_invocation_count; } ++ void set_interpreter_throwout_count(u4 interpreter_throwout_count) { _interpreter_throwout_count = interpreter_throwout_count; } ++ void set_invocation_counter(u4 invocation_counter) { _invocation_counter = invocation_counter; } ++ void set_backedge_counter(u4 backedge_counter) { _backedge_counter = backedge_counter; } ++ void set_class_name(const char* class_name) { _class_name = class_name; } ++ void set_class_loader_name(const char* class_loader_name) { _class_loader_name = class_loader_name; } ++ void set_class_path(const char* class_path) { _class_path = class_path; } ++ ++private: ++ int _bci; ++ int _order; ++ u4 _magic_number; ++ u1 _compilation_type; ++ const char* _method_name; ++ const char* _method_sig; ++ u4 _first_invoke_init_order; ++ u4 _method_code_size; ++ u4 _method_hash; ++ u4 _method_bci; ++ const char* _class_name; ++ const char* _class_loader_name; ++ const char* _class_path; ++ u4 _class_bytes_size; ++ u4 _class_crc32; ++ u4 _class_number; ++ u4 _interpreter_invocation_count; ++ u4 _interpreter_throwout_count; ++ u4 _invocation_counter; ++ u4 _backedge_counter; ++}; ++ ++class JitProfileRecordDictionary : public Hashtable { ++ friend class VMStructs; ++ friend class JitProfileCache; ++public: ++ JitProfileRecordDictionary(unsigned int size); ++ virtual ~JitProfileRecordDictionary(); ++ ++ JitProfileRecorderEntry* add_method(unsigned int method_hash, Method* method, int bci); ++ ++ JitProfileRecorderEntry* find_entry(unsigned int hash, Method* method); ++ ++ void free_entry(JitProfileRecorderEntry* entry); ++ ++ unsigned int count() { return _count; } ++ ++ void print(); ++ ++ JitProfileRecorderEntry* bucket(int i) { ++ return (JitProfileRecorderEntry*)Hashtable::bucket(i); ++ } ++ ++private: ++ unsigned int _count; ++ JitProfileRecorderEntry* new_entry(unsigned int hash, Method* method); ++}; ++ ++class ClassSymbolEntry { ++public: ++ ClassSymbolEntry(Symbol* class_name, Symbol* class_loader_name, Symbol* path) ++ : _class_name(class_name), ++ _class_loader_name(class_loader_name), ++ _class_path(path) { ++ if (_class_name != NULL) _class_name->increment_refcount(); ++ if (_class_loader_name != NULL) _class_loader_name->increment_refcount(); ++ if (_class_path != NULL) _class_path->increment_refcount(); ++ } ++ ++ ClassSymbolEntry() ++ : _class_name(NULL), ++ _class_loader_name(NULL), ++ _class_path(NULL) { ++ } ++ ++ ~ClassSymbolEntry() { ++ if (_class_name != NULL) _class_name->decrement_refcount(); ++ if (_class_loader_name != NULL) _class_loader_name->decrement_refcount(); ++ if (_class_path != NULL) _class_path->decrement_refcount(); ++ } ++ ++ Symbol* class_name() const { return _class_name; } ++ Symbol* class_loader_name() const { return _class_loader_name; } ++ Symbol* path() const { return _class_path; } ++ ++ bool equals(const ClassSymbolEntry& rhs) const { ++ return _class_name == rhs._class_name; ++ } ++ ++private: ++ Symbol* _class_name; ++ Symbol* _class_loader_name; ++ Symbol* _class_path; ++}; ++ ++#define KNUTH_HASH_MULTIPLIER 2654435761UL ++#define ADDR_CHANGE_NUMBER 3 ++ ++class JitProfileRecorder : public CHeapObj { ++public: ++ enum RecorderState { ++ IS_OK = 0, ++ IS_ERR = 1, ++ NOT_INIT = 2 ++ }; ++public: ++ JitProfileRecorder(); ++ virtual ~JitProfileRecorder(); ++ ++ void init(); ++ ++ int class_init_count() { return _class_init_order_num + 1; } ++ ++ address current_init_order_addr() { return (address)&_class_init_order_num;} ++ ++ unsigned int is_flushed() { return _flushed; } ++ void set_flushed(bool value) { _flushed = value; } ++ ++ const char* logfile_name() { return _record_file_name; } ++ ++ JitProfileRecorder* recorder() { return _jit_profile_cache_recorder; } ++ ++ JitProfileCache* holder() { return _holder; } ++ void set_holder(JitProfileCache* h) { _holder = h; } ++ ++ unsigned int recorded_count() { return _profile_record_dict->count(); } ++ JitProfileRecordDictionary* dict() { return _profile_record_dict; } ++ ++ void set_logfile_name(const char* name); ++ ++ bool is_valid() { return _recorder_state == IS_OK;} ++ ++ LinkedListImpl* ++ class_init_list() { return _class_init_list; } ++ ++ void add_method(Method* method, int method_bci); ++ ++ void flush_record(); ++ ++ bool param_check(); ++ ++ int assign_class_init_order(InstanceKlass* klass); ++ ++ unsigned int compute_hash(Method* method) { ++ uint64_t m_addr = (uint64_t)method; ++ return (m_addr >> ADDR_CHANGE_NUMBER) * KNUTH_HASH_MULTIPLIER; // Knuth multiply hash ++ } ++ ++ static int compute_crc32(randomAccessFileStream* fileStream); ++ ++private: ++ int _max_symbol_length; ++ unsigned int _pos; ++ volatile int _class_init_order_num; ++ volatile bool _flushed; ++ const char* _record_file_name; ++ ++ JitProfileRecorder* _jit_profile_cache_recorder; ++ JitProfileCache* _holder; ++ randomAccessFileStream* _profilelog; ++ RecorderState _recorder_state; ++ LinkedListImpl* _class_init_list; ++ LinkedListNode* _init_list_tail_node; ++ JitProfileRecordDictionary* _profile_record_dict; ++ ++private: ++ void write_u1(u1 value); ++ void write_u4(u4 value); ++ ++ void write_profilecache_header(); ++ void write_inited_class(); ++ void write_profilecache_record(JitProfileRecorderEntry* entry, int bci, int order); ++ void record_class_info(JitProfileRecorderEntry* entry); ++ void record_method_info(JitProfileRecorderEntry* entry, int bci); ++ void write_profilecache_footer(); ++ ++ void write_string(const char* src, size_t len); ++ void overwrite_u4(u4 value, unsigned int offset); ++ ++ void update_max_symbol_length(int len); ++}; ++ ++#endif +\ No newline at end of file +diff --git a/hotspot/src/share/vm/libadt/dict.cpp b/hotspot/src/share/vm/libadt/dict.cpp +index 8bef7d6ac..835c43581 100644 +--- a/hotspot/src/share/vm/libadt/dict.cpp ++++ b/hotspot/src/share/vm/libadt/dict.cpp +@@ -330,6 +330,29 @@ int hashstr(const void *t) { + return (int)((sum+xsum[k]) >> 1); // Hash key, un-modulo'd table size + } + ++int compute_universal_hash(const char *input, int len) { ++ char current_char, k = 0; ++ int32_t sum = 0; ++ int current_position = 0; ++ ++ if( !initflag ) { ++ xsum[0] = (1<> 1); ++} ++ + //------------------------------hashptr-------------------------------------- + // Slimey cheap hash function; no guaranteed performance. Better than the + // default for pointers, especially on MS-DOS machines. +diff --git a/hotspot/src/share/vm/libadt/dict.hpp b/hotspot/src/share/vm/libadt/dict.hpp +index dad45832d..a25a40a5d 100644 +--- a/hotspot/src/share/vm/libadt/dict.hpp ++++ b/hotspot/src/share/vm/libadt/dict.hpp +@@ -89,6 +89,7 @@ class Dict : public ResourceObj { // Dictionary structure + + // Hashing functions + int hashstr(const void *s); // Nice string hash ++int compute_universal_hash(const char *input, int len); // hash string with given length + // Slimey cheap hash function; no guaranteed performance. Better than the + // default for pointers, especially on MS-DOS machines. + int hashptr(const void *key); +diff --git a/hotspot/src/share/vm/oops/constantPool.cpp b/hotspot/src/share/vm/oops/constantPool.cpp +index 929284de4..f9a9d2480 100644 +--- a/hotspot/src/share/vm/oops/constantPool.cpp ++++ b/hotspot/src/share/vm/oops/constantPool.cpp +@@ -32,6 +32,7 @@ + #include "classfile/systemDictionaryShared.hpp" + #include "classfile/vmSymbols.hpp" + #include "interpreter/linkResolver.hpp" ++#include "jprofilecache/jitProfileCache.hpp" + #include "memory/heapInspection.hpp" + #include "memory/metadataFactory.hpp" + #include "memory/oopFactory.hpp" +@@ -44,6 +45,9 @@ + #include "runtime/javaCalls.hpp" + #include "runtime/signature.hpp" + #include "runtime/vframe.hpp" ++#include "utilities/stack.hpp" ++#include "utilities/stack.inline.hpp" ++#include "jprofilecache/jitProfileCacheLog.hpp" + + PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC + +@@ -59,12 +63,19 @@ ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, T + // the resolved_references array, which is recreated at startup time. + // But that could be moved to InstanceKlass (although a pain to access from + // assembly code). Maybe it could be moved to the cpCache which is RW. +- return new (loader_data, size, false, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags); ++ if (JProfilingCacheCompileAdvance) { ++ Array* jpc_tags = NULL; ++ jpc_tags = MetadataFactory::new_array(loader_data, length, 0, CHECK_NULL); ++ return new (loader_data, size, false, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags, jpc_tags); ++ } else { ++ return new (loader_data, size, false, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags); ++ } + } + + ConstantPool::ConstantPool(Array* tags) { + set_length(tags->length()); + set_tags(NULL); ++ set_jpc_tags(NULL); + set_cache(NULL); + set_reference_map(NULL); + set_resolved_references(NULL); +@@ -84,6 +95,35 @@ ConstantPool::ConstantPool(Array* tags) { + set_tags(tags); + } + ++ConstantPool::ConstantPool(Array* raw_tags, Array* jpt_markers) { ++ assert(JProfilingCacheCompileAdvance, "must in JProfilingCacheCompileAdvance"); ++ assert(jpt_markers != NULL, "invariant"); ++ assert(jpt_markers->length() == raw_tags->length(), "invariant"); ++ set_length(raw_tags->length()); ++ set_flags(0); ++ set_version(0); ++ set_tags(NULL); ++ set_cache(NULL); ++ set_jpc_tags(NULL); ++ set_operands(NULL); ++ set_reference_map(NULL); ++ set_resolved_references(NULL); ++ set_pool_holder(NULL); ++ ++ set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock")); ++ ++ int length = raw_tags->length(); ++ for (int index = 0; index < length; index++) { ++ raw_tags->at_put(index, JVM_CONSTANT_Invalid); ++ } ++ set_tags(raw_tags); ++ ++ for (int i = 0; i < jpt_markers->length(); i++) { ++ jpt_markers->at_put(i, _jwp_has_not_been_traversed); ++ } ++ set_jpc_tags(jpt_markers); ++} ++ + void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) { + MetadataFactory::free_metadata(loader_data, cache()); + set_cache(NULL); +@@ -98,6 +138,12 @@ void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) { + // free tag array + MetadataFactory::free_array(loader_data, tags()); + set_tags(NULL); ++ ++ if (JProfilingCacheCompileAdvance) { ++ assert(jwp_tags() != NULL, "should not be NULL"); ++ MetadataFactory::free_array(loader_data, jwp_tags()); ++ set_jpc_tags(NULL); ++ } + } + + void ConstantPool::release_C_heap_structures() { +@@ -1992,9 +2038,85 @@ void ConstantPool::preload_and_initialize_all_classes(ConstantPool* obj, TRAPS) + + #endif + ++void ConstantPool::preload_jprofilecache_classes(TRAPS) { ++ constantPoolHandle cp(THREAD, this); ++ guarantee(cp->pool_holder() != NULL, "must be fully loaded"); ++ if (THREAD->is_eager_class_loading_active()) { ++ return; ++ } ++ THREAD->set_is_eager_class_loading_active(true); ++ Stack s; ++ s.push(cp->pool_holder()); ++ preload_classes_for_jprofilecache(s, THREAD); ++ THREAD->set_is_eager_class_loading_active(false); ++} + +-// Printing ++Klass* ConstantPool::resolve_class_at_index(int constant_pool_index, TRAPS) { ++ assert(THREAD->is_Java_thread(), "must be a Java thread"); ++ if (CompilationProfileCacheResolveClassEagerly) { ++ Klass* k = klass_at(constant_pool_index, CHECK_NULL); ++ return k; ++ } else { ++ Handle mirror_handle; ++ constantPoolHandle current_pool(THREAD, this); ++ Symbol* name = NULL; ++ Handle loader; ++ { ++ if (current_pool->tag_at(constant_pool_index).is_unresolved_klass()) { ++ if (current_pool->tag_at(constant_pool_index).is_unresolved_klass_in_error()) { ++ return NULL; ++ } else { ++ name = current_pool->klass_name_at(constant_pool_index); ++ loader = Handle(THREAD, current_pool->pool_holder()->class_loader()); ++ } ++ } ++ } ++ oop protection_domain = current_pool->pool_holder()->protection_domain(); ++ Handle protection_domain_handle (THREAD, protection_domain); ++ Klass* loaded_oop = SystemDictionary::resolve_or_fail(name, loader, protection_domain_handle, true, THREAD); ++ return loaded_oop; ++ } ++} ++ ++void ConstantPool::preload_classes_for_jprofilecache(Stack& class_processing_stack, ++ TRAPS) { ++ JitProfileCache* jprofilecache = JitProfileCache::instance(); ++ while (!class_processing_stack.is_empty()) { ++ constantPoolHandle current_constant_pool(class_processing_stack.pop()->constants()); ++ for (int i = 0; i< current_constant_pool->length(); i++) { ++ bool is_unresolved = false; ++ Symbol* name = NULL; ++ { ++ if (current_constant_pool->tag_at(i).is_unresolved_klass() && !current_constant_pool->jprofilecache_traversed_at(i)) { ++ name = current_constant_pool->klass_name_at(i); ++ is_unresolved = true; ++ current_constant_pool->jprofilecache_has_traversed_at(i); ++ } ++ } ++ if (is_unresolved) { ++ if (name != NULL && !jprofilecache->preloader()->should_preload_class(name)) { ++ continue; ++ } ++ Klass* klass = current_constant_pool->resolve_class_at_index(i, THREAD); ++ if (HAS_PENDING_EXCEPTION) { ++ ResourceMark rm; ++ if (LogLevel::Warning >= LogLevel::LogLevelNum) { ++ tty->print_cr("[JitProfileCache] WARNING : resolve %s from constant pool failed", ++ name->as_C_string()); ++ } ++ if (PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) { ++ CLEAR_PENDING_EXCEPTION; ++ } ++ } ++ if (klass != NULL && klass->oop_is_instance()) { ++ class_processing_stack.push((InstanceKlass*)klass); ++ } ++ } ++ } ++ } ++} + ++// Printing + void ConstantPool::print_on(outputStream* st) const { + EXCEPTION_MARK; + assert(is_constantPool(), "must be constantPool"); +diff --git a/hotspot/src/share/vm/oops/constantPool.hpp b/hotspot/src/share/vm/oops/constantPool.hpp +index a8803b744..3a5ac3933 100644 +--- a/hotspot/src/share/vm/oops/constantPool.hpp ++++ b/hotspot/src/share/vm/oops/constantPool.hpp +@@ -32,6 +32,7 @@ + #include "oops/typeArrayOop.hpp" + #include "runtime/handles.hpp" + #include "utilities/constantTag.hpp" ++#include "utilities/stack.hpp" + #ifdef TARGET_ARCH_x86 + # include "bytes_x86.hpp" + #endif +@@ -99,6 +100,13 @@ class ConstantPool : public Metadata { + jobject _resolved_references; + Array* _reference_map; + ++ enum { ++ _jwp_has_not_been_traversed = 0, ++ _jwp_has_been_traversed = 1 ++ }; ++ ++ Array* _jpc_tags; // the jpc tag array records the corresponding tag whether is traversed ++ + enum { + _has_preresolution = 1, // Flags + _on_stack = 2 +@@ -117,6 +125,7 @@ class ConstantPool : public Metadata { + Monitor* _lock; + + void set_tags(Array* tags) { _tags = tags; } ++ void set_jpc_tags(Array* tags) { _jpc_tags = tags; } + void tag_at_put(int which, jbyte t) { tags()->at_put(which, t); } + void release_tag_at_put(int which, jbyte t) { tags()->release_at_put(which, t); } + +@@ -167,6 +176,8 @@ class ConstantPool : public Metadata { + } + + ConstantPool(Array* tags); ++ // for JProfileCache ++ ConstantPool(Array* tags, Array* jwp_tags); + ConstantPool() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); } + public: + static ConstantPool* allocate(ClassLoaderData* loader_data, int length, TRAPS); +@@ -176,6 +187,18 @@ class ConstantPool : public Metadata { + Array* tags() const { return _tags; } + Array* operands() const { return _operands; } + ++ Array* jwp_tags() const { return _jpc_tags; } ++ ++ bool jprofilecache_traversed_at(int which) { ++ assert(0 < which && which < jwp_tags()->length(), "out of bound"); ++ return jwp_tags()->at(which) == _jwp_has_been_traversed; ++ } ++ ++ void jprofilecache_has_traversed_at(int which) { ++ assert(which < jwp_tags()->length(), "out of bound"); ++ jwp_tags()->at_put(which, _jwp_has_been_traversed); ++ } ++ + bool has_preresolution() const { return (_flags & _has_preresolution) != 0; } + void set_has_preresolution() { _flags |= _has_preresolution; } + +@@ -906,6 +929,13 @@ class ConstantPool : public Metadata { + // Compile the world support + static void preload_and_initialize_all_classes(ConstantPool* constant_pool, TRAPS); + #endif ++ ++ void preload_jprofilecache_classes(TRAPS); ++ ++ Klass* resolve_class_at_index(int constant_pool_index, TRAPS); ++ ++private: ++ void preload_classes_for_jprofilecache(Stack& class_processing_stack, TRAPS); + }; + + class SymbolHashMapEntry : public CHeapObj { +diff --git a/hotspot/src/share/vm/oops/instanceKlass.cpp b/hotspot/src/share/vm/oops/instanceKlass.cpp +index df9aaabfb..4e2672d95 100644 +--- a/hotspot/src/share/vm/oops/instanceKlass.cpp ++++ b/hotspot/src/share/vm/oops/instanceKlass.cpp +@@ -35,6 +35,7 @@ + #include "interpreter/oopMapCache.hpp" + #include "interpreter/rewriter.hpp" + #include "jvmtifiles/jvmti.h" ++#include "jprofilecache/jitProfileCache.hpp" + #include "memory/genOopClosures.inline.hpp" + #include "memory/heapInspection.hpp" + #include "memory/iterator.inline.hpp" +@@ -327,6 +328,14 @@ InstanceKlass::InstanceKlass(int vtable_len, + // Set temporary value until parseClassFile updates it with the real instance + // size. + set_layout_helper(Klass::instance_layout_helper(0, true)); ++ ++ set_jprofilecache_recorded(false); ++#ifndef PRODUCT ++ set_initialize_order(-1); ++#endif ++ set_crc32(0); ++ set_bytes_size(0); ++ set_source_file_path(NULL); + } + + +@@ -381,6 +390,13 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) { + // Need to take this class off the class loader data list. + loader_data->remove_class(this); + ++ if (JProfilingCacheCompileAdvance || JProfilingCacheRecording) { ++ if (source_file_path() != NULL) { ++ source_file_path()->decrement_refcount(); ++ set_source_file_path(NULL); ++ } ++ } ++ + // The array_klass for this class is created later, after error handling. + // For class redefinition, we keep the original class so this scratch class + // doesn't have an array class. Either way, assert that there is nothing +@@ -971,6 +987,9 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) { + this_oop->set_init_state(being_initialized); + this_oop->set_init_thread(jt); + } ++ if (JProfilingCacheRecording) { ++ JitProfileCache::instance()->recorder()->assign_class_init_order(this_oop()); ++ } + + // Step 7 + // Next, if C is a class rather than an interface, initialize its super class and super +diff --git a/hotspot/src/share/vm/oops/instanceKlass.hpp b/hotspot/src/share/vm/oops/instanceKlass.hpp +index 14556a38e..431a6475a 100644 +--- a/hotspot/src/share/vm/oops/instanceKlass.hpp ++++ b/hotspot/src/share/vm/oops/instanceKlass.hpp +@@ -209,6 +209,21 @@ class InstanceKlass: public Klass { + // if this class is unloaded. + Symbol* _array_name; + ++ // if not using JProfileCache, default value is 0 ++ unsigned int _crc32; ++ // if not using JProfileCache, default value is 0 ++ unsigned int _class_bytes_size; ++ ++ // JProfilingCacheCompileAdvance eager init support ++ bool _is_jprofilecache_recorded; ++ ++ // source file path, e.g. /home/xxx/liba.jar ++ Symbol* _source_file_path; ++ ++#ifndef PRODUCT ++ int _initialize_order; ++#endif ++ + // Number of heapOopSize words used by non-static fields in this klass + // (including inherited fields but after header_size()). + int _nonstatic_field_size; +@@ -663,6 +678,23 @@ class InstanceKlass: public Klass { + Symbol* array_name() { return _array_name; } + void set_array_name(Symbol* name) { assert(_array_name == NULL || name == NULL, "name already created"); _array_name = name; } + ++ // JProfileCache support ++ unsigned int crc32() { return _crc32; } ++ void set_crc32(unsigned int crc32) { _crc32 = crc32; } ++ ++ unsigned int bytes_size() { return _class_bytes_size; } ++ void set_bytes_size(unsigned int size) { _class_bytes_size = size; } ++ ++ bool is_jprofilecache_recorded() { return _is_jprofilecache_recorded; } ++ void set_jprofilecache_recorded(bool value) { _is_jprofilecache_recorded = value; } ++ ++ Symbol* source_file_path() { return _source_file_path; } ++ void set_source_file_path(Symbol* value) { _source_file_path = value; } ++ ++#ifndef PRODUCT ++ unsigned int initialize_order() { return _initialize_order; } ++ void set_initialize_order(int order) { _initialize_order = order; } ++#endif + // nonstatic oop-map blocks + static int nonstatic_oop_map_size(unsigned int oop_map_count) { + return oop_map_count * OopMapBlock::size_in_words(); +diff --git a/hotspot/src/share/vm/oops/method.cpp b/hotspot/src/share/vm/oops/method.cpp +index 7cf7e08ed..8151c3524 100644 +--- a/hotspot/src/share/vm/oops/method.cpp ++++ b/hotspot/src/share/vm/oops/method.cpp +@@ -32,6 +32,7 @@ + #include "interpreter/bytecodes.hpp" + #include "interpreter/interpreter.hpp" + #include "interpreter/oopMapCache.hpp" ++#include "jprofilecache/jitProfileCache.hpp" + #include "memory/gcLocker.hpp" + #include "memory/generation.hpp" + #include "memory/heapInspection.hpp" +@@ -97,6 +98,13 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) { + clear_method_counters(); + set_vtable_index(Method::garbage_vtable_index); + ++ set_first_invoke_init_order(INVALID_FIRST_INVOKE_INIT_ORDER); ++ set_compiled_by_jprofilecache(false); ++ ++#ifndef PRODUCT ++ set_deopted_by_jprofilecache(false); ++#endif ++ + // Fix and bury in Method* + set_interpreter_entry(NULL); // sets i2i entry and from_int + set_adapter_entry(NULL); +diff --git a/hotspot/src/share/vm/oops/method.hpp b/hotspot/src/share/vm/oops/method.hpp +index ee74d959d..f1145b577 100644 +--- a/hotspot/src/share/vm/oops/method.hpp ++++ b/hotspot/src/share/vm/oops/method.hpp +@@ -141,6 +141,12 @@ class Method : public Metadata { + nmethod* volatile _code; // Points to the corresponding piece of native code + volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry + ++ int _first_invoke_init_order; // record class initialize order when this method first been invoked ++ bool _compiled_by_jprofilecache; ++#ifndef PRODUCT ++ bool _deopted_by_jprofilecache; ++#endif ++ + // Constructor + Method(ConstMethod* xconst, AccessFlags access_flags, int size); + public: +@@ -207,6 +213,21 @@ class Method : public Metadata { + return constMethod()->type_annotations(); + } + ++ int first_invoke_init_order() { return _first_invoke_init_order; } ++ void set_first_invoke_init_order(int value) { _first_invoke_init_order = value; } ++ ++ bool compiled_by_jprofilecache() { return _compiled_by_jprofilecache; } ++ void set_compiled_by_jprofilecache(bool value) { _compiled_by_jprofilecache = value; } ++ ++#ifndef PRODUCT ++ bool deopted_by_jprofilecache() { return _deopted_by_jprofilecache; } ++ void set_deopted_by_jprofilecache(bool value) { _deopted_by_jprofilecache = value; } ++#endif ++ ++ static ByteSize first_invoke_init_order_offset() { ++ return byte_offset_of(Method, _first_invoke_init_order); ++ } ++ + // Helper routine: get klass name + "." + method name + signature as + // C string, for the purpose of providing more useful NoSuchMethodErrors + // and fatal error handling. The string is allocated in resource +diff --git a/hotspot/src/share/vm/oops/methodData.hpp b/hotspot/src/share/vm/oops/methodData.hpp +index eb121268f..5a8246466 100644 +--- a/hotspot/src/share/vm/oops/methodData.hpp ++++ b/hotspot/src/share/vm/oops/methodData.hpp +@@ -30,6 +30,7 @@ + #include "oops/method.hpp" + #include "oops/oop.hpp" + #include "runtime/orderAccess.hpp" ++#include "jprofilecache/jitProfileRecord.hpp" + + class BytecodeStream; + class KlassSizeStats; +@@ -282,6 +283,8 @@ class ProfileData : public ResourceObj { + friend class TypeEntries; + friend class ReturnTypeEntry; + friend class TypeStackSlotEntries; ++ friend class JitProfileRecorder; ++ friend class JitProfileCacheInfo; + private: + #ifndef PRODUCT + enum { +diff --git a/hotspot/src/share/vm/opto/callGenerator.cpp b/hotspot/src/share/vm/opto/callGenerator.cpp +index a79f1d294..4a6c99e88 100644 +--- a/hotspot/src/share/vm/opto/callGenerator.cpp ++++ b/hotspot/src/share/vm/opto/callGenerator.cpp +@@ -207,7 +207,8 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) { + if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || + ((ImplicitNullCheckThreshold > 0) && caller_md && + (caller_md->trap_count(Deoptimization::Reason_null_check) +- >= (uint)ImplicitNullCheckThreshold))) { ++ >= (uint)ImplicitNullCheckThreshold)) || ++ (JProfilingCacheCompileAdvance && kit.C->env()->task()->is_jprofilecache_compilation())) { + // Make an explicit receiver null_check as part of this call. + // Since we share a map with the caller, his JVMS gets adjusted. + receiver = kit.null_check_receiver_before_call(method()); +diff --git a/hotspot/src/share/vm/opto/compile.cpp b/hotspot/src/share/vm/opto/compile.cpp +index f55437eb3..a43efe5f1 100644 +--- a/hotspot/src/share/vm/opto/compile.cpp ++++ b/hotspot/src/share/vm/opto/compile.cpp +@@ -729,6 +729,14 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr + + print_compile_messages(); + ++ if (JProfilingCacheCompileAdvance) { ++ bool fields_resolved = ci_env->are_method_fields_all_resolved(method()); ++ if (!fields_resolved) { ++ _failure_reason = "fields needed by method are not all resolved"; ++ return; ++ } ++ } ++ + _ilt = InlineTree::build_inline_tree_root(); + + // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice +diff --git a/hotspot/src/share/vm/opto/graphKit.cpp b/hotspot/src/share/vm/opto/graphKit.cpp +index dfadd3ef0..de67a941b 100644 +--- a/hotspot/src/share/vm/opto/graphKit.cpp ++++ b/hotspot/src/share/vm/opto/graphKit.cpp +@@ -2761,9 +2761,14 @@ bool GraphKit::seems_never_null(Node* obj, ciProfileData* data) { + && obj != null() // And not the -Xcomp stupid case? + && !too_many_traps(Deoptimization::Reason_null_check) + ) { +- if (data == NULL) ++ bool compiledByWarmUp = JProfilingCacheCompileAdvance && this->C->env()->task()->is_jprofilecache_compilation(); ++ if (data == NULL) { ++ if (compiledByWarmUp) { ++ return false; ++ } + // Edge case: no mature data. Be optimistic here. + return true; ++ } + // If the profile has not seen a null, assume it won't happen. + assert(java_bc() == Bytecodes::_checkcast || + java_bc() == Bytecodes::_instanceof || +@@ -2833,6 +2838,12 @@ Node* GraphKit::maybe_cast_profiled_obj(Node* obj, + bool not_null) { + // type == NULL if profiling tells us this object is always null + if (type != NULL) { ++ if (JProfilingCacheCompileAdvance) { ++ if (this->C->env()->task()->is_jprofilecache_compilation()) { ++ return obj; ++ } ++ } ++ + Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check; + Deoptimization::DeoptReason null_reason = Deoptimization::Reason_null_check; + if (!too_many_traps(null_reason) && !too_many_recompiles(null_reason) && +diff --git a/hotspot/src/share/vm/opto/lcm.cpp b/hotspot/src/share/vm/opto/lcm.cpp +index c6178a715..6384b5775 100644 +--- a/hotspot/src/share/vm/opto/lcm.cpp ++++ b/hotspot/src/share/vm/opto/lcm.cpp +@@ -98,6 +98,9 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo + // mechanism exists (yet) to set the switches at an os_cpu level + if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return; + ++ // to reduce deoptimization, disable implicit_null_check for jprofilecache compilation ++ if (JProfilingCacheCompileAdvance && this->C->env()->task()->is_jprofilecache_compilation()) return; ++ + // Make sure the ptr-is-null path appears to be uncommon! + float f = block->end()->as_MachIf()->_prob; + if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f; +diff --git a/hotspot/src/share/vm/prims/jvm.cpp b/hotspot/src/share/vm/prims/jvm.cpp +index 2cc38cf66..fb11c91d9 100644 +--- a/hotspot/src/share/vm/prims/jvm.cpp ++++ b/hotspot/src/share/vm/prims/jvm.cpp +@@ -35,8 +35,10 @@ + #include "classfile/systemDictionaryShared.hpp" + #endif + #include "classfile/vmSymbols.hpp" ++#include "code/codeCache.hpp" + #include "gc_interface/collectedHeap.inline.hpp" + #include "interpreter/bytecode.hpp" ++#include "jprofilecache/jitProfileCache.hpp" + #include "jfr/jfrEvents.hpp" + #include "memory/oopFactory.hpp" + #include "memory/referenceType.hpp" +@@ -52,6 +54,7 @@ + #include "prims/nativeLookup.hpp" + #include "prims/privilegedStack.hpp" + #include "runtime/arguments.hpp" ++#include "runtime/compilationPolicy.hpp" + #include "runtime/dtraceJSDT.hpp" + #include "runtime/handles.inline.hpp" + #include "runtime/init.hpp" +@@ -4778,3 +4781,66 @@ JVM_ENTRY(void, JVM_GetVersionInfo(JNIEnv* env, jvm_version_info* info, size_t i + info->is_attachable = AttachListener::is_attach_supported(); + } + JVM_END ++ ++JVM_ENTRY(void, JVM_TriggerPrecompilation(JNIEnv* env, jclass clz)) ++{ ++ JVMWrapper("JVM_TriggerPrecompilation"); ++ if (!JProfilingCacheCompileAdvance) { ++ tty->print_cr("JProfilingCacheCompileAdvance is off, " ++ "triggerPrecompilation is invalid"); ++ return; ++ } ++ Handle mirror(THREAD, JNIHandles::resolve_non_null(clz)); ++ assert(mirror() != NULL, "sanity check"); ++ Klass* k = java_lang_Class::as_Klass(mirror()); ++ Method* dummy_method = k->lookup_method(vmSymbols::jprofilecache_dummy_name(), vmSymbols::void_method_signature()); ++ assert(dummy_method != NULL, "Cannot find dummy method in com.huawei.jprofilecache.JProfileCache"); ++ JitProfileCache* jprofilecache = JitProfileCache::instance(); ++ assert(jprofilecache != NULL, "sanity check"); ++ jprofilecache->set_dummy_method(dummy_method); ++ jprofilecache->preloader()->notify_precompilation(); ++} ++JVM_END ++ ++JVM_ENTRY(jboolean, JVM_CheckJProfileCacheCompilationIsComplete(JNIEnv *env, jclass ignored)) ++{ ++ JVMWrapper("JVM_CheckJProfileCacheCompilationIsComplete"); ++ if (!JProfilingCacheCompileAdvance) { ++ tty->print_cr("JProfilingCacheCompileAdvance is off, " ++ "checkIfCompilationIsComplete is invalid"); ++ return JNI_TRUE; ++ } ++ JitProfileCache* jprofilecache = JitProfileCache::instance(); ++ Method* dummyMethod = jprofilecache->dummy_method(); ++ assert(dummyMethod != NULL, "sanity check"); ++ if (dummyMethod->code() != NULL) { ++ return JNI_TRUE; ++ } else { ++ return JNI_FALSE; ++ } ++} ++JVM_END ++ ++JVM_ENTRY(void, JVM_NotifyJVMDeoptProfileCacheMethods(JNIEnv *env, jclass clazz)) ++{ ++ JVMWrapper("JVM_notifyJVMDeoptProfileCacheMethods"); ++ if (!(JProfilingCacheCompileAdvance && CompilationProfileCacheExplicitDeopt)) { ++ tty->print_cr("JProfilingCacheCompileAdvance or CompilationProfileCacheExplicitDeopt is off, " ++ "notifyJVMDeoptProfileCacheMethods is invalid"); ++ return; ++ } ++ JitProfileCache* jprofilecache = JitProfileCache::instance(); ++ Method* dm = jprofilecache->dummy_method(); ++ if (dm != NULL && dm->code() != NULL) { ++ ProfileCacheClassChain* chain = jprofilecache->preloader()->chain(); ++ assert(chain != NULL, "sanity check"); ++ if (chain->notify_deopt_signal()) { ++ tty->print_cr("JitProfileCache: receive signal to deoptimize warmup methods"); ++ } else { ++ tty->print_cr("JitProfileCache: deoptimize signal is ignore"); ++ } ++ } else { ++ tty->print_cr("JitProfileCache: deoptimize signal is ignore because warmup is not finished"); ++ } ++} ++JVM_END +\ No newline at end of file +diff --git a/hotspot/src/share/vm/prims/jvm.h b/hotspot/src/share/vm/prims/jvm.h +index 46a65604c..7307cca14 100644 +--- a/hotspot/src/share/vm/prims/jvm.h ++++ b/hotspot/src/share/vm/prims/jvm.h +@@ -1728,6 +1728,15 @@ typedef struct JDK1_1InitArgs { + jint debugPort; + } JDK1_1InitArgs; + ++JNIEXPORT void JNICALL ++JVM_TriggerPrecompilation(JNIEnv* env, jclass clz); ++ ++JNIEXPORT jboolean JNICALL ++JVM_CheckJProfileCacheCompilationIsComplete(JNIEnv* env, jclass ignored); ++ ++JNIEXPORT void JNICALL ++JVM_NotifyJVMDeoptProfileCacheMethods(JNIEnv* env, jclass clz); ++ + #ifdef __cplusplus + } /* extern "C" */ + #endif /* __cplusplus */ +diff --git a/hotspot/src/share/vm/runtime/globals.hpp b/hotspot/src/share/vm/runtime/globals.hpp +index 2bace3596..77af67ad3 100644 +--- a/hotspot/src/share/vm/runtime/globals.hpp ++++ b/hotspot/src/share/vm/runtime/globals.hpp +@@ -4199,7 +4199,51 @@ class CommandLineFlags { + \ + diagnostic(bool, PrintAsyncGCLog, false, \ + "Print some information of Async GC Log") \ +- ++ \ ++ lp64_product(bool, JProfilingCacheRecording, false, \ ++ "Collect profiling information for JProfilingCache") \ ++ \ ++ lp64_product(bool, JProfilingCacheCompileAdvance, false, \ ++ "Enable JProfilingCacheCompileAdvance from a log file") \ ++ \ ++ lp64_product(ccstr, ProfilingCacheLogLevel, "info", \ ++ "Log level for JProfilingCache") \ ++ \ ++ lp64_product(ccstr, ProfilingCacheFile, NULL, \ ++ "Log file name for JProfilingCache") \ ++ \ ++ lp64_product(uintx, JProfilingCacheRecordTime, 0, \ ++ "Sleep time (in seconds) before flushing profling " \ ++ "information to log file ") \ ++ \ ++ lp64_product(uintx, CompilationProfileCacheAppID, 0, \ ++ "Application ID written in log file for verification ") \ ++ \ ++ lp64_product(ccstr, CompilationProfileCacheExclude, NULL, \ ++ "JProfilingCacheCompileAdvance excluding list ") \ ++ \ ++ lp64_product(bool, CompilationProfileCacheExplicitDeopt, false, \ ++ "Deoptimize JProfileCache methods by explicit api") \ ++ \ ++ lp64_product(uintx, JProfilingCacheDeoptTime, 1200, \ ++ "Sleep time (in seconds) before deoptimizing methods " \ ++ "compiled by JProfileCache ") \ ++ \ ++ diagnostic(uintx, CompilationProfileCacheDeoptMinInterval, 5, \ ++ "JProfileCache method deoptimization minimum interval (in seconds)") \ ++ \ ++ diagnostic(uintx, CompilationProfileCacheDeoptNumOfMethodsPerIter, 10, \ ++ "The max number of methods marked for " \ ++ "deoptimization per iteration") \ ++ \ ++ diagnostic(bool, CompilationProfileCacheResolveClassEagerly, true, \ ++ "resolve class from constant pool eagerly") \ ++ \ ++ lp64_product(bool, DeoptimizeBeforeProfileCache, false, \ ++ "Deoptimize recorded methods before JProfileCache compilation") \ ++ \ ++ lp64_product(intx, CompilationProfileCacheRecordMinLevel, 3, \ ++ "Minimal compilation level recorded in JProfileCache recording phase") \ + /* + * Macros for factoring of globals + */ +diff --git a/hotspot/src/share/vm/runtime/init.cpp b/hotspot/src/share/vm/runtime/init.cpp +index 71caac72b..94471af1e 100644 +--- a/hotspot/src/share/vm/runtime/init.cpp ++++ b/hotspot/src/share/vm/runtime/init.cpp +@@ -27,6 +27,8 @@ + #include "code/icBuffer.hpp" + #include "gc_interface/collectedHeap.hpp" + #include "interpreter/bytecodes.hpp" ++#include "jprofilecache/jitProfileCache.hpp" ++#include "jprofilecache/jitProfileRecord.hpp" + #include "memory/universe.hpp" + #include "prims/methodHandles.hpp" + #include "runtime/handles.inline.hpp" +@@ -108,6 +110,15 @@ jint init_globals() { + if (status != JNI_OK) + return status; + ++ if (JProfilingCacheRecording) { ++ JitProfileCache* jpc = JitProfileCache::create_instance(); ++ jpc->init(); ++ if (!jpc->is_valid()) { ++ tty->print_cr("[JitProfileCache] ERROR: init fail."); ++ vm_exit(-1); ++ } ++ } ++ + AsyncLogWriter::initialize(); + interpreter_init(); // before any methods loaded + invocationCounter_init(); // before any methods loaded +@@ -116,6 +127,14 @@ jint init_globals() { + templateTable_init(); + InterfaceSupport_init(); + SharedRuntime::generate_stubs(); ++ if (JProfilingCacheCompileAdvance) { ++ JitProfileCache* jpc = JitProfileCache::create_instance(); ++ jpc->init(); ++ if (!jpc->is_valid()) { ++ tty->print_cr("[JitProfileCache] ERROR: init fail."); ++ vm_exit(-1); ++ } ++ } + universe2_init(); // dependent on codeCache_init and stubRoutines_init1 + referenceProcessor_init(); + jni_handles_init(); +diff --git a/hotspot/src/share/vm/runtime/mutexLocker.cpp b/hotspot/src/share/vm/runtime/mutexLocker.cpp +index a5225012d..bf4d1ce35 100644 +--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp ++++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp +@@ -39,6 +39,9 @@ + + Mutex* Patching_lock = NULL; + Monitor* SystemDictionary_lock = NULL; ++Mutex* JitProfileRecorder_lock = NULL; ++Mutex* ProfileCacheClassChain_lock = NULL; ++Mutex* JitProfileCachePrint_lock = NULL; + Mutex* SharedDictionary_lock = NULL; + Mutex* PackageTable_lock = NULL; + Mutex* CompiledIC_lock = NULL; +@@ -230,6 +233,9 @@ void mutex_init() { + def(JmethodIdCreation_lock , Mutex , leaf, true ); // used for creating jmethodIDs. + + def(SystemDictionary_lock , Monitor, leaf, true ); // lookups done by VM thread ++ def(JitProfileRecorder_lock , Mutex , nonleaf+2, true ); // used for JitProfileCache ++ def(ProfileCacheClassChain_lock , Mutex , max_nonleaf, true ); // used for JitProfileCache ++ def(JitProfileCachePrint_lock , Mutex , max_nonleaf, true ); // used for JitProfileCache + def(SharedDictionary_lock , Mutex , leaf, true ); + def(PackageTable_lock , Mutex , leaf, false); + def(InlineCacheBuffer_lock , Mutex , leaf, true ); +diff --git a/hotspot/src/share/vm/runtime/mutexLocker.hpp b/hotspot/src/share/vm/runtime/mutexLocker.hpp +index 7a4518c67..d7f0cc945 100644 +--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp ++++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp +@@ -47,6 +47,9 @@ + + extern Mutex* Patching_lock; // a lock used to guard code patching of compiled code + extern Monitor* SystemDictionary_lock; // a lock on the system dictonary ++extern Mutex* JitProfileRecorder_lock; // a lock on the JProfileCache class JitProfileRecorder ++extern Mutex* ProfileCacheClassChain_lock; // a lock on the JProfileCache preload class chain ++extern Mutex* JitProfileCachePrint_lock; // a lock on the JProfileCache jstack print + extern Mutex* SharedDictionary_lock; // a lock on the CDS shared dictionary + extern Mutex* PackageTable_lock; // a lock on the class loader package table + extern Mutex* CompiledIC_lock; // a lock used to guard compiled IC patching and access +diff --git a/hotspot/src/share/vm/runtime/safepoint.cpp b/hotspot/src/share/vm/runtime/safepoint.cpp +index 8408bed4f..bc75482a6 100644 +--- a/hotspot/src/share/vm/runtime/safepoint.cpp ++++ b/hotspot/src/share/vm/runtime/safepoint.cpp +@@ -32,6 +32,7 @@ + #include "code/scopeDesc.hpp" + #include "gc_interface/collectedHeap.hpp" + #include "interpreter/interpreter.hpp" ++#include "jprofilecache/jitProfileCache.hpp" + #include "jfr/jfrEvents.hpp" + #include "memory/resourceArea.hpp" + #include "memory/universe.inline.hpp" +@@ -659,6 +660,15 @@ void SafepointSynchronize::do_cleanup_tasks() { + } + } + ++ if (JProfilingCacheCompileAdvance) { ++ JitProfileCache* jprofilecache = JitProfileCache::instance(); ++ assert(jprofilecache != NULL, "sanity check"); ++ ProfileCacheClassChain* chain = jprofilecache->preloader()->chain(); ++ if (chain->should_deoptimize_methods()) { ++ chain->deoptimize_methods(); ++ } ++ } ++ + if (StringTable::needs_rehashing()) { + const char* name = "rehashing string table"; + EventSafepointCleanupTask event; +diff --git a/hotspot/src/share/vm/runtime/thread.cpp b/hotspot/src/share/vm/runtime/thread.cpp +index fe8995b04..f0d7e3312 100644 +--- a/hotspot/src/share/vm/runtime/thread.cpp ++++ b/hotspot/src/share/vm/runtime/thread.cpp +@@ -35,6 +35,9 @@ + #include "interpreter/oopMapCache.hpp" + #include "jfr/jfrEvents.hpp" + #include "jvmtifiles/jvmtiEnv.hpp" ++#include "jprofilecache/jitProfileCache.hpp" ++#include "jprofilecache/jitProfileCacheThread.hpp" ++#include "jprofilecache/jitProfileCacheDcmds.hpp" + #include "memory/allocation.hpp" + #include "memory/gcLocker.inline.hpp" + #include "memory/metaspaceShared.hpp" +@@ -240,6 +243,7 @@ Thread::Thread() { + set_active_handles(NULL); + set_free_handle_block(NULL); + set_last_handle_mark(NULL); ++ set_is_eager_class_loading_active(false); + + // This initial value ==> never claimed. + _oops_do_parity = 0; +@@ -261,6 +265,7 @@ Thread::Thread() { + _current_pending_monitor = NULL; + _current_pending_monitor_is_from_java = true; + _current_waiting_monitor = NULL; ++ _super_class_resolution_depth = 0; + _num_nested_signal = 0; + omFreeList = NULL ; + omFreeCount = 0 ; +@@ -3893,6 +3898,12 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { + + BiasedLocking::init(); + ++ if (JProfilingCacheCompileAdvance) { ++ JitProfileCache* jprofilecache = JitProfileCache::instance(); ++ assert(jprofilecache != NULL, "sanity check"); ++ jprofilecache->preloader()->jvm_booted_is_done(); ++ } ++ + #if INCLUDE_RTM_OPT + RTMLockingCounters::init(); + #endif +@@ -4599,6 +4610,10 @@ void Threads::print_on(outputStream* st, bool print_stacks, + st->cr(); + } + CompileBroker::print_compiler_threads_on(st); ++ if (JProfilingCacheRecording) { ++ JitProfileCacheThread::print_jit_profile_cache_thread_info_on(st); ++ st->cr(); ++ } + if (UseAsyncGCLog) { + AsyncLogWriter* aio_writer = AsyncLogWriter::instance(); + if (aio_writer != NULL) { +diff --git a/hotspot/src/share/vm/runtime/thread.hpp b/hotspot/src/share/vm/runtime/thread.hpp +index 1d3caf9aa..d80ebe643 100644 +--- a/hotspot/src/share/vm/runtime/thread.hpp ++++ b/hotspot/src/share/vm/runtime/thread.hpp +@@ -214,6 +214,18 @@ class Thread: public ThreadShadow { + void leave_signal_handler() { _num_nested_signal--; } + bool is_inside_signal_handler() const { return _num_nested_signal > 0; } + ++ // JProfileCache support ++private: ++ int _super_class_resolution_depth; ++ bool _is_eager_class_loading_active; ++ ++public: ++ void super_class_depth_add() { _super_class_resolution_depth++; } ++ void super_class_depth_dec() { _super_class_resolution_depth--; } ++ bool is_eager_class_loading_active() { return _is_eager_class_loading_active; } ++ void set_is_eager_class_loading_active(bool value) { _is_eager_class_loading_active = value; } ++ bool is_super_class_resolution_active() const { return _super_class_resolution_depth > 0; } ++ + private: + // Debug tracing + static void trace(const char* msg, const Thread* const thread) PRODUCT_RETURN; +diff --git a/hotspot/src/share/vm/services/diagnosticCommand.cpp b/hotspot/src/share/vm/services/diagnosticCommand.cpp +index ee3fc2ccd..8df1d1b1b 100644 +--- a/hotspot/src/share/vm/services/diagnosticCommand.cpp ++++ b/hotspot/src/share/vm/services/diagnosticCommand.cpp +@@ -22,6 +22,7 @@ + * + */ + ++#include "jprofilecache/jitProfileCacheDcmds.hpp" + #include "precompiled.hpp" + #include "cds/dynamicArchive.hpp" + #include "classfile/classLoaderHierarchyDCmd.hpp" +@@ -84,6 +85,7 @@ void DCmdRegistrant::register_dcmds(){ + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); ++ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + #endif // LINUX + + // Enhanced JMX Agent Support +diff --git a/hotspot/src/share/vm/utilities/hashtable.cpp b/hotspot/src/share/vm/utilities/hashtable.cpp +index df290d99b..a041f23dd 100644 +--- a/hotspot/src/share/vm/utilities/hashtable.cpp ++++ b/hotspot/src/share/vm/utilities/hashtable.cpp +@@ -438,6 +438,7 @@ template class BasicHashtable; + template class Hashtable; + template class RehashableHashtable; + template class RehashableHashtable; ++template class Hashtable; + template class Hashtable; + template class Hashtable; + template class Hashtable; +@@ -457,3 +458,7 @@ template class BasicHashtable; + template class BasicHashtable; + template class BasicHashtable; + template class BasicHashtable; ++template class Hashtable; ++template class Hashtable; ++template class BasicHashtable; ++template class Hashtable; +diff --git a/hotspot/src/share/vm/utilities/ostream.cpp b/hotspot/src/share/vm/utilities/ostream.cpp +index 1eb0d4f12..4288412c1 100644 +--- a/hotspot/src/share/vm/utilities/ostream.cpp ++++ b/hotspot/src/share/vm/utilities/ostream.cpp +@@ -767,6 +767,33 @@ void fileStream::flush() { + fflush(_file); + } + ++randomAccessFileStream::randomAccessFileStream() : fileStream() { } ++ ++randomAccessFileStream::randomAccessFileStream(const char* file_name, const char* open_mode) ++ : fileStream(file_name, open_mode) { } ++ ++void randomAccessFileStream::write(const char* s, size_t len, long pos) { ++ assert(pos <= fileSize(), "pos check"); ++ if (_file != NULL) { ++ long old_pos = ::ftell(_file); ++ if (old_pos != pos) { ++ int ret = seek(pos, SEEK_SET); ++ assert(ret != -1, "fseek return value check"); ++ } ++ size_t count = fwrite(s, 1, len, _file); ++ if (old_pos != pos) { ++ seek(old_pos, SEEK_SET); ++ } ++ } ++} ++ ++void randomAccessFileStream::write(const char* s, size_t len) { ++ if (_file != NULL) { ++ // Make an unused local variable to avoid warning from gcc 4.x compiler. ++ size_t count = fwrite(s, 1, len, _file); ++ } ++} ++ + jsaFileStream::jsaFileStream(const char* file_name) : fileStream(file_name, "a") { + #ifdef __linux__ + if (_file != NULL) { +diff --git a/hotspot/src/share/vm/utilities/ostream.hpp b/hotspot/src/share/vm/utilities/ostream.hpp +index 85ff5991e..933cd273c 100644 +--- a/hotspot/src/share/vm/utilities/ostream.hpp ++++ b/hotspot/src/share/vm/utilities/ostream.hpp +@@ -202,7 +202,7 @@ class fileStream : public outputStream { + fileStream(const char* file_name); + fileStream(const char* file_name, const char* opentype); + fileStream(FILE* file, bool need_close = false) { _file = file; _need_close = need_close; } +- ~fileStream(); ++ virtual ~fileStream(); + bool is_open() const { return _file != NULL; } + void set_need_close(bool b) { _need_close = b;} + virtual void write(const char* c, size_t len); +@@ -214,6 +214,18 @@ class fileStream : public outputStream { + void flush(); + }; + ++class randomAccessFileStream : public fileStream { ++public: ++ randomAccessFileStream(); ++ randomAccessFileStream(const char* file_name, const char* open_mode); ++ virtual ~randomAccessFileStream() { } ++ virtual void write(const char* data, size_t length); ++ ++ int seek(long offset, int position) { return ::fseek(_file, offset, position); } ++ long tell() { return ::ftell(_file); } ++ virtual void write(const char* data, size_t length, long position); ++}; ++ + class jsaFileStream : public fileStream { + public: + jsaFileStream(const char* file_name); +diff --git a/hotspot/src/share/vm/utilities/symbolRegexMatcher.cpp b/hotspot/src/share/vm/utilities/symbolRegexMatcher.cpp +new file mode 100644 +index 000000000..d4d430239 +--- /dev/null ++++ b/hotspot/src/share/vm/utilities/symbolRegexMatcher.cpp +@@ -0,0 +1,103 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "memory/resourceArea.hpp" ++#include "oops/symbol.hpp" ++#include "utilities/globalDefinitions.hpp" ++#include "utilities/symbolRegexMatcher.hpp" ++ ++#define SYMBOLREGEXMATCHER_INIT_SIZE 4 ++ ++template SymbolRegexMatcher::SymbolRegexMatcher(const char* regexes) ++ : _patterns(new (ResourceObj::C_HEAP, F) ++ GrowableArray(SYMBOLREGEXMATCHER_INIT_SIZE, true, F)) { ++ assert(regexes != NULL, "illegal regexes"); ++ int input_length = (int)strlen(regexes); ++ int current_pattern_length = 0; ++ char* current_pattern_start = (char*)®exes[0]; ++ for (int i = 0; i < input_length + 1; i++) { ++ if (regexes[i] == ',' || regexes[i] == ';' || i == input_length) { ++ add_regex_pattern(current_pattern_start, current_pattern_length); ++ // reset ++ current_pattern_length = -1; ++ current_pattern_start = (char*)®exes[i+1]; ++ } ++ current_pattern_length++; ++ } ++} ++ ++template SymbolRegexMatcher::~SymbolRegexMatcher() { ++ delete _patterns; ++} ++ ++template void SymbolRegexMatcher::add_regex_pattern(const char* s, int len) { ++ if (len == 0) { ++ return; ++ } ++ _patterns->push(SymbolPatternMatcher(s, len)); ++} ++ ++template bool SymbolRegexMatcher::matches(Symbol* symbol) { ++ ResourceMark rm; ++ char* s = symbol->as_C_string(); ++ return matches(s); ++} ++ ++template bool SymbolRegexMatcher::matches(const char* s) { ++ int regex_num = _patterns->length(); ++ for (int i = 0; i < regex_num; i++) { ++ const char* regex = _patterns->at(i).regex_pattern(); ++ int regex_len = _patterns->at(i).length(); ++ if (matches_wildcard_pattern(regex, regex_len, s)) { ++ return true; ++ } ++ } ++ return false; ++} ++ ++template bool SymbolRegexMatcher::matches_wildcard_pattern(const char* wildcard_pattern, int pattern_length, const char* target_string) { ++ int s_len = (int)strlen(target_string); ++ if (s_len < pattern_length - 1) { ++ return false; ++ } ++ for (int i =0; i < pattern_length; i++) { ++ if (wildcard_pattern[i] == '*') { ++ return true; ++ } ++ if (wildcard_pattern[i] == target_string[i]) { ++ continue; ++ } ++ if ((wildcard_pattern[i] == '.' && target_string[i] == '/') ++ || (wildcard_pattern[i] == '/' && target_string[i] == '.')) { ++ continue; ++ } ++ if (wildcard_pattern[i] != '*' && wildcard_pattern[i] != target_string[i]) { ++ return false; ++ } ++ } ++ return (s_len == pattern_length); ++} ++ ++template class SymbolRegexMatcher; +\ No newline at end of file +diff --git a/hotspot/src/share/vm/utilities/symbolRegexMatcher.hpp b/hotspot/src/share/vm/utilities/symbolRegexMatcher.hpp +new file mode 100644 +index 000000000..5a719329d +--- /dev/null ++++ b/hotspot/src/share/vm/utilities/symbolRegexMatcher.hpp +@@ -0,0 +1,69 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef SHARED_VM_UTILITIES_SYMBOLREGEXMATCHER_HPP ++#define SHARED_VM_UTILITIES_SYMBOLREGEXMATCHER_HPP ++ ++#include "memory/allocation.hpp" ++#include "utilities/growableArray.hpp" ++ ++class SymbolPatternMatcher { ++public: ++ SymbolPatternMatcher() { } ++ SymbolPatternMatcher(const char* pattern, int length) ++ : _regex_pattern(pattern), ++ _pattern_length(length) { ++ } ++ ++ ~SymbolPatternMatcher() { } ++ ++ int length() { return _pattern_length; } ++ void set_length(int value) { _pattern_length = value; } ++ const char* regex_pattern() { return _regex_pattern; } ++ void set_regex_pattern(char* s) { _regex_pattern = s; } ++ ++private: ++ int _pattern_length; ++ const char* _regex_pattern; ++}; ++ ++template ++class SymbolRegexMatcher : public ResourceObj { ++public: ++ SymbolRegexMatcher(const char* regexes); ++ virtual ~SymbolRegexMatcher(); ++ GrowableArray* patterns() { return _patterns; } ++ ++ bool matches(Symbol* symbol); ++ bool matches(const char* s); ++ ++private: ++ void add_regex_pattern(const char* src, int len); ++ bool matches_wildcard_pattern(const char* wildcard_pattern, int pattern_length, const char* target_string); ++ ++ GrowableArray* _patterns; ++}; ++ ++ ++#endif // SHARED_VM_UTILITIES_SYMBOLREGEXMATCHER_HPP +\ No newline at end of file +diff --git a/jdk/make/CreateJars.gmk b/jdk/make/CreateJars.gmk +index cb3f26b30..4611efebe 100644 +--- a/jdk/make/CreateJars.gmk ++++ b/jdk/make/CreateJars.gmk +@@ -557,6 +557,7 @@ EXCLUDE_PROPWARN_PKGS = com.sun.java.swing.plaf.windows \ + # + EXPORTED_PRIVATE_PKGS = com.oracle.net \ + com.oracle.nio \ ++ com.huawei.jprofilecache \ + com.huawei.management \ + com.huawei.jvm.gc + +diff --git a/jdk/make/data/classlist/classlist.linux b/jdk/make/data/classlist/classlist.linux +index 737aefe26..f14040fd6 100644 +--- a/jdk/make/data/classlist/classlist.linux ++++ b/jdk/make/data/classlist/classlist.linux +@@ -2556,5 +2556,6 @@ javax/swing/plaf/basic/BasicToolBarSeparatorUI + java/awt/event/AdjustmentEvent + java/awt/MenuBar + sun/awt/X11/XErrorEvent ++com/huawei/jprofilecache/JProfileCache + com/huawei/jvm/gc + # eea35d9d56e0006e +diff --git a/jdk/make/lib/CoreLibraries.gmk b/jdk/make/lib/CoreLibraries.gmk +index 1af991693..f587b5db6 100644 +--- a/jdk/make/lib/CoreLibraries.gmk ++++ b/jdk/make/lib/CoreLibraries.gmk +@@ -141,6 +141,7 @@ LIBJAVA_SRC_DIRS += $(JDK_TOPDIR)/src/$(OPENJDK_TARGET_OS_API_DIR)/native/java/l + $(JDK_TOPDIR)/src/share/native/java/nio \ + $(JDK_TOPDIR)/src/share/native/java/security \ + $(JDK_TOPDIR)/src/share/native/common \ ++ $(JDK_TOPDIR)/src/share/native/com/huawei/jprofilecache \ + $(JDK_TOPDIR)/src/share/native/sun/misc \ + $(JDK_TOPDIR)/src/share/native/sun/reflect \ + $(JDK_TOPDIR)/src/share/native/com/huawei/jvm/gc \ +diff --git a/jdk/make/mapfiles/libjava/mapfile-linux b/jdk/make/mapfiles/libjava/mapfile-linux +index 61ec28513..c0382344b 100644 +--- a/jdk/make/mapfiles/libjava/mapfile-linux ++++ b/jdk/make/mapfiles/libjava/mapfile-linux +@@ -283,6 +283,8 @@ SUNWprivate_1.1 { + Java_jdk_internal_platform_CgroupMetrics_isUseContainerSupport; + Java_jdk_internal_platform_CgroupMetrics_getTotalMemorySize0; + ++ Java_com_huawei_jprofilecache_JProfileCache_registerNatives; ++ + # ZipFile.c needs this one + throwFileNotFoundException; + # zip_util.c needs this +diff --git a/jdk/make/mapfiles/libjava/mapfile-vers b/jdk/make/mapfiles/libjava/mapfile-vers +index abfde04a9..440663a00 100644 +--- a/jdk/make/mapfiles/libjava/mapfile-vers ++++ b/jdk/make/mapfiles/libjava/mapfile-vers +@@ -280,6 +280,8 @@ SUNWprivate_1.1 { + Java_sun_misc_VMSupport_initAgentProperties; + Java_sun_misc_VMSupport_getVMTemporaryDirectory; + ++ Java_com_huawei_jprofilecache_JProfileCache_registerNatives; ++ + # ZipFile.c needs this one + throwFileNotFoundException; + # zip_util.c needs this +diff --git a/jdk/make/mapfiles/libjava/reorder-sparc b/jdk/make/mapfiles/libjava/reorder-sparc +index 95793a5be..66eeebc58 100644 +--- a/jdk/make/mapfiles/libjava/reorder-sparc ++++ b/jdk/make/mapfiles/libjava/reorder-sparc +@@ -105,3 +105,4 @@ text: .text%findJavaTZ_md; + text: .text%Java_java_lang_StrictMath_log; + text: .text%Java_java_lang_StrictMath_sqrt; + text: .text%Java_com_huawei_jvm_gc_AdaptiveHeapMXBeanImpl_registerNatives; ++text: .text%Java_com_huawei_jprofilecache_JProfileCache_registerNatives; +diff --git a/jdk/make/mapfiles/libjava/reorder-sparcv9 b/jdk/make/mapfiles/libjava/reorder-sparcv9 +index c10007b78..223e5de1a 100644 +--- a/jdk/make/mapfiles/libjava/reorder-sparcv9 ++++ b/jdk/make/mapfiles/libjava/reorder-sparcv9 +@@ -100,3 +100,4 @@ text: .text%Java_java_util_TimeZone_getSystemTimeZoneID; + text: .text%findJavaTZ_md; + text: .text%Java_java_lang_StrictMath_log; + text: .text%Java_java_lang_StrictMath_sqrt; ++text: .text%Java_com_huawei_jprofilecache_JProfileCache_registerNatives; +diff --git a/jdk/make/mapfiles/libjava/reorder-x86 b/jdk/make/mapfiles/libjava/reorder-x86 +index 5b7a7ee16..d531b587d 100644 +--- a/jdk/make/mapfiles/libjava/reorder-x86 ++++ b/jdk/make/mapfiles/libjava/reorder-x86 +@@ -107,3 +107,4 @@ text: .text%Java_java_lang_Class_isInstance; + text: .text%Java_java_util_TimeZone_getSystemTimeZoneID; + text: .text%findJavaTZ_md; + text: .text%Java_java_lang_StrictMath_log; ++text: .text%Java_com_huawei_jprofilecache_JProfileCache_registerNatives; +diff --git a/jdk/src/share/classes/com/huawei/jprofilecache/JProfileCache.java b/jdk/src/share/classes/com/huawei/jprofilecache/JProfileCache.java +new file mode 100644 +index 000000000..4ec7ef5b5 +--- /dev/null ++++ b/jdk/src/share/classes/com/huawei/jprofilecache/JProfileCache.java +@@ -0,0 +1,71 @@ ++/* ++* Copyright (c) 2025, Huawei and/or its affiliates. Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++ package com.huawei.jprofilecache; ++ ++ public class JProfileCache { ++ ++ // register native methods ++ private static native void registerNatives(); ++ ++ static { ++ registerNatives(); ++ } ++ ++ private static boolean isStartupSignaled = false; ++ ++ public static synchronized void triggerPrecompilation() { ++ if (!isStartupSignaled) { ++ isStartupSignaled = true; ++ triggerPrecompilation0(); ++ } else { ++ throw new IllegalStateException("triggerPrecompilation can be triggered only once"); ++ } ++ } ++ ++ public static synchronized void notifyJVMDeoptProfileCacheMethods() { ++ if (isStartupSignaled && checkIfCompilationIsComplete()) { ++ notifyJVMDeoptProfileCacheMethods0(); ++ } ++ } ++ ++ public static synchronized boolean checkIfCompilationIsComplete() { ++ if (!isStartupSignaled) { ++ throw new IllegalStateException("Must call checkIfCompilationIsComplete after triggerPrecompilation"); ++ } else { ++ return checkIfCompilationIsComplete0(); ++ } ++ } ++ ++ // use for internal validation ++ private void dummy() { ++ throw new UnsupportedOperationException("dummy function"); ++ } ++ ++ private native static void triggerPrecompilation0(); ++ ++ private native static boolean checkIfCompilationIsComplete0(); ++ ++ private native static void notifyJVMDeoptProfileCacheMethods0(); ++ } +\ No newline at end of file +diff --git a/jdk/src/share/javavm/export/jvm.h b/jdk/src/share/javavm/export/jvm.h +index c37f2ad34..d490349a4 100644 +--- a/jdk/src/share/javavm/export/jvm.h ++++ b/jdk/src/share/javavm/export/jvm.h +@@ -333,6 +333,15 @@ JNIEXPORT jclass JNICALL + JVM_LoadClass0(JNIEnv *env, jobject obj, jclass currClass, + jstring currClassName); + ++JNIEXPORT void JNICALL ++JVM_TriggerPrecompilation(JNIEnv* env, jclass clz); ++ ++JNIEXPORT jboolean JNICALL ++JVM_CheckJProfileCacheCompilationIsComplete(JNIEnv* env, jclass ignored); ++ ++JNIEXPORT void JNICALL ++JVM_NotifyJVMDeoptProfileCacheMethods(JNIEnv* env, jclass clz); ++ + /* + * java.lang.reflect.Array + */ +diff --git a/jdk/src/share/native/com/huawei/jprofilecache/JProfileCache.c b/jdk/src/share/native/com/huawei/jprofilecache/JProfileCache.c +new file mode 100644 +index 000000000..8c389d318 +--- /dev/null ++++ b/jdk/src/share/native/com/huawei/jprofilecache/JProfileCache.c +@@ -0,0 +1,40 @@ ++/* ++* Copyright (c) 2025, Huawei Technologies Co., Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#include "jni.h" ++#include "jvm.h" ++#include "com_huawei_jprofilecache_JProfileCache.h" ++ ++#define ARRAY_LENGTH(a) (sizeof(a)/sizeof(a[0])) ++ ++static JNINativeMethod jprofilecache_methods[] = { ++ {"triggerPrecompilation0","()V", (void*)&JVM_TriggerPrecompilation}, ++ {"checkIfCompilationIsComplete0","()Z", (void*)&JVM_CheckJProfileCacheCompilationIsComplete}, ++ {"notifyJVMDeoptProfileCacheMethods0","()V", (void*)&JVM_NotifyJVMDeoptProfileCacheMethods} ++}; ++ ++JNIEXPORT void JNICALL ++Java_com_huawei_jprofilecache_JProfileCache_registerNatives(JNIEnv *env, jclass cls) ++{ ++ (*env)->RegisterNatives(env, cls, jprofilecache_methods, ARRAY_LENGTH(jprofilecache_methods)); ++} +\ No newline at end of file +-- +2.47.1.windows.1 + diff --git a/ICCCR2-Simply-the-parameters-of-AppCDS.patch b/ICCCR2-Simply-the-parameters-of-AppCDS.patch new file mode 100644 index 0000000000000000000000000000000000000000..7053562966a76fef27a1550d26867b13004f5fbd --- /dev/null +++ b/ICCCR2-Simply-the-parameters-of-AppCDS.patch @@ -0,0 +1,221 @@ +From fdd0ec805e9de2c0af42c6cf301ba5feb72e9974 Mon Sep 17 00:00:00 2001 +Date: Tue, 3 Jun 2025 22:16:02 +0800 +Subject: [PATCH 3/4] ICCCR2: Simply the parameters of AppCDS + +--- + hotspot/src/share/vm/runtime/globals.hpp | 7 + + hotspot/src/share/vm/runtime/thread.cpp | 159 +++++++++++++++++++++++ + 2 files changed, 166 insertions(+) + +diff --git a/hotspot/src/share/vm/runtime/globals.hpp b/hotspot/src/share/vm/runtime/globals.hpp +index 25c10134f..2bace3596 100644 +--- a/hotspot/src/share/vm/runtime/globals.hpp ++++ b/hotspot/src/share/vm/runtime/globals.hpp +@@ -2757,6 +2757,13 @@ class CommandLineFlags { + product(bool, PrintFlagsFinal, false, \ + "Print all VM flags after argument and ergonomic processing") \ + \ ++ product(ccstr, AutoSharedArchivePath, NULL, \ ++ "Auto enable the AppCDS feature" \ ++ "the path save classlist and jsa file") \ ++ \ ++ product(bool, PrintAutoAppCDS, false, \ ++ "Print some information about AutoSharedArchivePath") \ ++ \ + notproduct(bool, PrintFlagsWithComments, false, \ + "Print all VM flags with default values and descriptions and " \ + "exit") \ +diff --git a/hotspot/src/share/vm/runtime/thread.cpp b/hotspot/src/share/vm/runtime/thread.cpp +index 3f1b4444e..fe8995b04 100644 +--- a/hotspot/src/share/vm/runtime/thread.cpp ++++ b/hotspot/src/share/vm/runtime/thread.cpp +@@ -35,6 +35,7 @@ + #include "interpreter/oopMapCache.hpp" + #include "jfr/jfrEvents.hpp" + #include "jvmtifiles/jvmtiEnv.hpp" ++#include "memory/allocation.hpp" + #include "memory/gcLocker.inline.hpp" + #include "memory/metaspaceShared.hpp" + #include "memory/oopFactory.hpp" +@@ -119,6 +120,8 @@ + #include "jfr/jfr.hpp" + #endif + ++#include ++ + PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC + + #ifdef DTRACE_ENABLED +@@ -3358,6 +3361,160 @@ void Threads::threads_do(ThreadClosure* tc) { + // If CompilerThreads ever become non-JavaThreads, add them here + } + ++static char* get_java_executable_path() { ++ const char* java_home = Arguments::get_property("java.home"); ++ if (java_home != NULL) { ++ char* path = NEW_C_HEAP_ARRAY(char, MAXPATHLEN, mtInternal); ++ jio_snprintf(path, MAXPATHLEN, "%s/bin/java", java_home); ++ return path; ++ } ++ return os::strdup("java"); ++} ++ ++static char* get_complete_classpath() { ++ const char* env_cp = Arguments::get_property("env.class.path"); ++ if (env_cp == NULL || env_cp[0] == '\0') { ++ env_cp = Arguments::get_property("java.class.path"); ++ } ++ return (char *)env_cp; ++} ++ ++static bool can_read_classlist(const char* class_list_path) { ++ int fd = open(class_list_path, O_RDONLY); ++ if (fd >= 0) { ++ if (flock(fd, LOCK_EX | LOCK_NB) == 0) { ++ return true; ++ } ++ } ++ return false; ++} ++ ++static void construct_path(char *dest, size_t dest_size, const char *base, const char *suffix) { ++ size_t base_len = strlen(base); ++ size_t suffix_len = strlen(suffix); ++ guarantee(base_len + suffix_len < dest_size, "base path too long!"); ++ ++ jio_snprintf(dest, dest_size, "%s%s", base, suffix); ++} ++ ++static void create_jsa(const char* class_list_path, const char* appcds_path, const JavaVMInitArgs* original_args) { ++ pid_t pid = fork(); ++ if (pid == 0) { ++ // child process running on background ++ setsid(); ++ signal(SIGHUP, SIG_IGN); ++ const char* classpath = get_complete_classpath(); ++ if (classpath == NULL) { ++ classpath = "."; ++ } ++ char* java_path = get_java_executable_path(); ++ int arg_count = Arguments::num_jvm_args(); ++ char** vm_args = Arguments::jvm_args_array(); ++ ++ int total_args = arg_count + 8; ++ char** args = NEW_C_HEAP_ARRAY(char*, total_args + 1, mtInternal); ++ int idx = 0; ++ ++ args[idx++] = java_path; ++ args[idx++] = os::strdup("-Xshare:dump"); ++ args[idx++] = os::strdup("-XX:+UseAppCDS"); ++ ++ char shared_class_list_file[PATH_MAX]; ++ char shared_archive_file[PATH_MAX]; ++ construct_path(shared_class_list_file, sizeof(shared_class_list_file), "-XX:SharedClassListFile=", class_list_path); ++ construct_path(shared_archive_file, sizeof(shared_archive_file), "-XX:SharedArchiveFile=", appcds_path); ++ ++ args[idx++] = strdup(shared_class_list_file); ++ args[idx++] = strdup(shared_archive_file); ++ ++ args[idx++] = os::strdup("-classpath"); ++ args[idx++] = os::strdup(classpath); ++ for (int i = 0; i < arg_count; i++) { ++ if (vm_args[i] != NULL) { ++ args[idx++] = os::strdup(vm_args[i]); ++ } ++ } ++ args[idx++] = os::strdup("-version"); ++ args[idx] = NULL; ++ ++ if (PrintAutoAppCDS) { ++ int i = 0; ++ while (args[i] != NULL) { ++ tty->print_cr("args[%d] = %s", i, args[i]); ++ i++; ++ } ++ } ++ execv(java_path, args); ++ } ++} ++ ++static void handle_appcds_for_executor(const JavaVMInitArgs* args) { ++ if (FLAG_IS_DEFAULT(AutoSharedArchivePath)) { ++ return; ++ } ++ ++ if (AutoSharedArchivePath == NULL) { ++ warning("AutoSharedArchivePath should not be empty. Please set the specific path."); ++ return; ++ } ++ ++ static char base_path[JVM_MAXPATHLEN] = {'\0'}; ++ jio_snprintf(base_path, sizeof(base_path), "%s", AutoSharedArchivePath); ++ ++ struct stat st; ++ if (stat(base_path, &st) != 0) { ++ if (mkdir(base_path, 0755) != 0) { ++ vm_exit_during_initialization(err_msg("can't create dirs %s : %s", base_path, strerror(errno))); ++ } ++ } ++ ++ char class_list_path[PATH_MAX]; ++ char appcds_path[PATH_MAX]; ++ ++ construct_path(class_list_path, sizeof(class_list_path), base_path, "/appcds.lst"); ++ construct_path(appcds_path, sizeof(appcds_path), base_path, "/appcds.jsa"); ++ ++ if (PrintAutoAppCDS) { ++ tty->print_cr("classlist file : %s", class_list_path); ++ tty->print_cr("jsa file : %s", appcds_path); ++ } ++ ++ const char* class_list_ptr = class_list_path; ++ const char* appcds_ptr = appcds_path; ++ ++ if (stat(appcds_path, &st) == 0) { ++ FLAG_SET_CMDLINE(bool, UseAppCDS, true); ++ FLAG_SET_CMDLINE(bool, UseSharedSpaces, true); ++ FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true); ++ CommandLineFlags::ccstrAtPut("SharedArchiveFile", &appcds_ptr, Flag::COMMAND_LINE); ++ if (PrintAutoAppCDS) { ++ tty->print_cr("Use AppCDS JSA."); ++ } ++ return; ++ } ++ ++ if (stat(class_list_path, &st) == 0) { ++ if (!can_read_classlist(class_list_path)) { ++ if(PrintAutoAppCDS) { ++ tty->print_cr("classlist is generating."); ++ } ++ return; ++ } ++ if (stat(appcds_path, &st) != 0) { ++ if (PrintAutoAppCDS) { ++ tty->print_cr("Create JSA file."); ++ } ++ create_jsa(class_list_path, appcds_path, args); ++ } ++ } else { ++ can_read_classlist(class_list_path); ++ FLAG_SET_CMDLINE(bool, UseAppCDS, true); ++ FLAG_SET_CMDLINE(bool, UseSharedSpaces, false); ++ FLAG_SET_CMDLINE(bool, RequireSharedSpaces, false); ++ CommandLineFlags::ccstrAtPut("DumpLoadedClassList", &class_list_ptr, Flag::COMMAND_LINE); ++ } ++} ++ + jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { + + extern void JDK_Version_init(); +@@ -3393,6 +3550,8 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { + + os::init_before_ergo(); + ++ handle_appcds_for_executor(args); ++ + jint ergo_result = Arguments::apply_ergo(); + if (ergo_result != JNI_OK) return ergo_result; + +-- +2.47.1.windows.1 + diff --git a/openjdk-1.8.0.spec b/openjdk-1.8.0.spec index ad0972d94d4746436e266be4482511949c635e2e..00668dce2a1691a0f92fb0a67838ece177eadcca 100644 --- a/openjdk-1.8.0.spec +++ b/openjdk-1.8.0.spec @@ -953,7 +953,7 @@ Provides: java-%{javaver}-%{origin}-accessibility%{?1} = %{epoch}:%{version}-%{r Name: java-%{javaver}-%{origin} Version: %{javaver}.%{updatever}.%{buildver} -Release: 2 +Release: 3 # java-1.5.0-ibm from jpackage.org set Epoch to 1 for unknown reasons # and this change was brought into RHEL-4. java-1.5.0-ibm packages # also included the epoch in their virtual provides. This created a @@ -1371,6 +1371,9 @@ Patch458: 8352716-tz-Update-Timezone-Data-to-2025b.patch #452 Patch459: heapdump-bug-fix.patch +Patch460: Add-Dynamic-Max-Heap-feature-for-G1GC.patch +Patch461: ICCCR2-Simply-the-parameters-of-AppCDS.patch +Patch462: Add-JitProfileCache-feature.patch ############################################# # @@ -2050,6 +2053,9 @@ pushd %{top_level_dir_name} %patch457 -p1 %patch458 -p1 %patch459 -p1 +%patch460 -p1 +%patch461 -p1 +%patch462 -p1 %endif %ifarch loongarch64 @@ -2717,6 +2723,11 @@ cjc.mainProgram(args) -- the returns from copy_jdk_configs.lua should not affect %endif %changelog +* Sat Jun 7 2025 huangjie -1:1.8.0.452.b09-3 +- add Add-Dynamic-Max-Heap-feature-for-G1GC.patch +- add ICCCR2-Simply-the-parameters-of-AppCDS.patch +- add Add-JitProfileCache-feature.patch + * Sat May 17 2025 Benshuai5D -1:1.8.0.452.b09-2 - add heapdump-bug-fix.patch