From 55fcf26fdf2204a879f82c08c0b0cfa302c456eb Mon Sep 17 00:00:00 2001 From: cruise01 Date: Tue, 22 Dec 2020 16:44:56 +0800 Subject: [PATCH] I2ADAN: G1 memory uncommit --- ...ed-test-result-caused-by-C2-MergeMem.patch | 2 +- G1-memory-uncommit.patch | 2121 +++++++++++++++++ java-1.8.0-openjdk.spec | 7 +- 3 files changed, 2128 insertions(+), 2 deletions(-) create mode 100755 G1-memory-uncommit.patch diff --git a/8243670-Unexpected-test-result-caused-by-C2-MergeMem.patch b/8243670-Unexpected-test-result-caused-by-C2-MergeMem.patch index 6139bd6..c08b12c 100644 --- a/8243670-Unexpected-test-result-caused-by-C2-MergeMem.patch +++ b/8243670-Unexpected-test-result-caused-by-C2-MergeMem.patch @@ -112,7 +112,7 @@ index 000000000..d4c93b390 +++ b/hotspot/test/compiler/c2/TestReplaceEquivPhis.java @@ -0,0 +1,77 @@ +/* -+ * Copyright (c) 2020, Huawei Technologies Co. Ltd. All rights reserved. ++ * Copyright (c) 2020, Huawei Technologies Co., LTD. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it diff --git a/G1-memory-uncommit.patch b/G1-memory-uncommit.patch new file mode 100755 index 0000000..da20a27 --- /dev/null +++ b/G1-memory-uncommit.patch @@ -0,0 +1,2121 @@ +diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCCause.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCCause.java +index 84f0a4ac..b38ee52e 100644 +--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCCause.java ++++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCCause.java +@@ -54,6 +54,7 @@ public enum GCCause { + + _g1_inc_collection_pause ("G1 Evacuation Pause"), + _g1_humongous_allocation ("G1 Humongous Allocation"), ++ _g1_periodic_gc ("G1 Periodic GC"), + + _shenandoah_allocation_failure_evac ("Allocation Failure During Evacuation"), + _shenandoah_stop_vm ("Stopping VM"), +diff --git a/hotspot/make/bsd/makefiles/mapfile-vers-debug b/hotspot/make/bsd/makefiles/mapfile-vers-debug +index 49a70edc..00651d42 100644 +--- a/hotspot/make/bsd/makefiles/mapfile-vers-debug ++++ b/hotspot/make/bsd/makefiles/mapfile-vers-debug +@@ -230,6 +230,10 @@ + _JVM_SetPrimitiveArrayElement + _JVM_SetSockOpt + _JVM_SetThreadPriority ++ _JVM_AdaptiveHeapSetG1PeriodicGCInterval ++ _JVM_AdaptiveHeapGetG1PeriodicGCInterval ++ _JVM_AdaptiveHeapSetG1PeriodicGCLoadThreshold ++ _JVM_AdaptiveHeapGetG1PeriodicGCLoadThreshold + _JVM_Sleep + _JVM_Socket + _JVM_SocketAvailable +diff --git a/hotspot/make/bsd/makefiles/mapfile-vers-product b/hotspot/make/bsd/makefiles/mapfile-vers-product +index 50274671..4d51f425 100644 +--- a/hotspot/make/bsd/makefiles/mapfile-vers-product ++++ b/hotspot/make/bsd/makefiles/mapfile-vers-product +@@ -230,6 +230,10 @@ + _JVM_SetPrimitiveArrayElement + _JVM_SetSockOpt + _JVM_SetThreadPriority ++ _JVM_AdaptiveHeapGetG1PeriodicGCInterval ++ _JVM_AdaptiveHeapGetG1PeriodicGCLoadThreshold ++ _JVM_AdaptiveHeapSetG1PeriodicGCInterval ++ _JVM_AdaptiveHeapSetG1PeriodicGCLoadThreshold + _JVM_Sleep + _JVM_Socket + _JVM_SocketAvailable +diff --git a/hotspot/make/linux/makefiles/mapfile-vers-debug b/hotspot/make/linux/makefiles/mapfile-vers-debug +index 814a32f6..e1bb0c34 100644 +--- a/hotspot/make/linux/makefiles/mapfile-vers-debug ++++ b/hotspot/make/linux/makefiles/mapfile-vers-debug +@@ -233,6 +233,10 @@ SUNWprivate_1.1 { + JVM_SetPrimitiveArrayElement; + JVM_SetSockOpt; + JVM_SetThreadPriority; ++ JVM_AdaptiveHeapSetG1PeriodicGCInterval; ++ JVM_AdaptiveHeapGetG1PeriodicGCInterval; ++ JVM_AdaptiveHeapSetG1PeriodicGCLoadThreshold; ++ JVM_AdaptiveHeapGetG1PeriodicGCLoadThreshold; + JVM_Sleep; + JVM_Socket; + JVM_SocketAvailable; +diff --git a/hotspot/make/linux/makefiles/mapfile-vers-product b/hotspot/make/linux/makefiles/mapfile-vers-product +index caf9a1ce..f6aba6ef 100644 +--- a/hotspot/make/linux/makefiles/mapfile-vers-product ++++ b/hotspot/make/linux/makefiles/mapfile-vers-product +@@ -233,6 +233,10 @@ SUNWprivate_1.1 { + JVM_SetPrimitiveArrayElement; + JVM_SetSockOpt; + JVM_SetThreadPriority; ++ JVM_AdaptiveHeapSetG1PeriodicGCInterval; ++ JVM_AdaptiveHeapGetG1PeriodicGCInterval; ++ JVM_AdaptiveHeapSetG1PeriodicGCLoadThreshold; ++ JVM_AdaptiveHeapGetG1PeriodicGCLoadThreshold; + JVM_Sleep; + JVM_Socket; + JVM_SocketAvailable; +diff --git a/hotspot/src/os/aix/vm/os_aix.cpp b/hotspot/src/os/aix/vm/os_aix.cpp +index 4abd2f03..b078bee0 100644 +--- a/hotspot/src/os/aix/vm/os_aix.cpp ++++ b/hotspot/src/os/aix/vm/os_aix.cpp +@@ -4476,6 +4476,10 @@ bool os::is_thread_cpu_time_supported() { + return true; + } + ++double os::get_process_load() { ++ return -1.0; ++} ++ + // System loadavg support. Returns -1 if load average cannot be obtained. + // For now just return the system wide load average (no processor sets). + int os::loadavg(double values[], int nelem) { +diff --git a/hotspot/src/os/bsd/vm/os_bsd.cpp b/hotspot/src/os/bsd/vm/os_bsd.cpp +index 46673771..7cd79123 100644 +--- a/hotspot/src/os/bsd/vm/os_bsd.cpp ++++ b/hotspot/src/os/bsd/vm/os_bsd.cpp +@@ -4293,6 +4293,10 @@ bool os::is_thread_cpu_time_supported() { + #endif + } + ++double os::get_process_load() { ++ return -1.0; ++} ++ + // System loadavg support. Returns -1 if load average cannot be obtained. + // Bsd doesn't yet have a (official) notion of processor sets, + // so just return the system wide load average. +diff --git a/hotspot/src/os/linux/vm/os_linux.cpp b/hotspot/src/os/linux/vm/os_linux.cpp +index 5e0c18e6..ad3a82c4 100644 +--- a/hotspot/src/os/linux/vm/os_linux.cpp ++++ b/hotspot/src/os/linux/vm/os_linux.cpp +@@ -38,6 +38,7 @@ + #include "oops/oop.inline.hpp" + #include "os_share_linux.hpp" + #include "osContainer_linux.hpp" ++#include "process_load.hpp" + #include "prims/jniFastGetField.hpp" + #include "prims/jvm.h" + #include "prims/jvm_misc.hpp" +@@ -5889,6 +5890,15 @@ int os::loadavg(double loadavg[], int nelem) { + return ::getloadavg(loadavg, nelem); + } + ++double os::get_process_load() { ++ double u, s; ++ u = get_cpuload_internal(-1, &s, CPU_LOAD_VM_ONLY); ++ if (u < 0) { ++ return -1.0; ++ } ++ return u + s; ++} ++ + void os::pause() { + char filename[MAX_PATH]; + if (PauseAtStartupFile && PauseAtStartupFile[0]) { +diff --git a/hotspot/src/os/linux/vm/process_load.hpp b/hotspot/src/os/linux/vm/process_load.hpp +new file mode 100644 +index 00000000..83800b19 +--- /dev/null ++++ b/hotspot/src/os/linux/vm/process_load.hpp +@@ -0,0 +1,299 @@ ++/* ++ * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. Oracle designates this ++ * particular file as subject to the "Classpath" exception as provided ++ * by Oracle in the LICENSE file that accompanied this code. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#ifndef OS_LINUX_VM_PROCESS_LOAD_HPP ++#define OS_LINUX_VM_PROCESS_LOAD_HPP ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct ticks { ++ uint64_t used; ++ uint64_t usedKernel; ++ uint64_t total; ++}; ++ ++typedef struct ticks ticks; ++ ++typedef enum { ++ CPU_LOAD_VM_ONLY, ++ CPU_LOAD_GLOBAL, ++} CpuLoadTarget; ++ ++static struct perfbuf { ++ int nProcs; ++ ticks jvmTicks; ++ ticks cpuTicks; ++ ticks *cpus; ++} counters; ++ ++static void next_line(FILE *f) { ++ while (fgetc(f) != '\n'); ++} ++ ++/** ++ * Return the total number of ticks since the system was booted. ++ * If the usedTicks parameter is not NULL, it will be filled with ++ * the number of ticks spent on actual processes (user, system or ++ * nice processes) since system boot. Note that this is the total number ++ * of "executed" ticks on _all_ CPU:s, that is on a n-way system it is ++ * n times the number of ticks that has passed in clock time. ++ * ++ * Returns a negative value if the reading of the ticks failed. ++ */ ++static int get_totalticks(int which, ticks *pticks) { ++ FILE *fh; ++ uint64_t userTicks, niceTicks, systemTicks, idleTicks; ++ uint64_t iowTicks = 0, irqTicks = 0, sirqTicks= 0; ++ int n; ++ ++ if((fh = fopen("/proc/stat", "r")) == NULL) { ++ return -1; ++ } ++ ++ n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " ++ UINT64_FORMAT " " UINT64_FORMAT, ++ &userTicks, &niceTicks, &systemTicks, &idleTicks, ++ &iowTicks, &irqTicks, &sirqTicks); ++ ++ // Move to next line ++ next_line(fh); ++ ++ //find the line for requested cpu faster to just iterate linefeeds? ++ if (which != -1) { ++ int i; ++ for (i = 0; i < which; i++) { ++ if (fscanf(fh, "cpu%*d " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " ++ UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT, ++ &userTicks, &niceTicks, &systemTicks, &idleTicks, ++ &iowTicks, &irqTicks, &sirqTicks) < 4) { ++ fclose(fh); ++ return -2; ++ } ++ next_line(fh); ++ } ++ n = fscanf(fh, "cpu%*d " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " ++ UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT "\n", ++ &userTicks, &niceTicks, &systemTicks, &idleTicks, ++ &iowTicks, &irqTicks, &sirqTicks); ++ } ++ ++ fclose(fh); ++ if (n < 4) { ++ return -2; ++ } ++ ++ pticks->used = userTicks + niceTicks; ++ pticks->usedKernel = systemTicks + irqTicks + sirqTicks; ++ pticks->total = userTicks + niceTicks + systemTicks + idleTicks + ++ iowTicks + irqTicks + sirqTicks; ++ ++ return 0; ++} ++ ++static int vread_statdata(const char *procfile, const char *fmt, va_list args) { ++ FILE *f; ++ int n; ++ char buf[2048]; ++ ++ if ((f = fopen(procfile, "r")) == NULL) { ++ return -1; ++ } ++ ++ if ((n = fread(buf, 1, sizeof(buf), f)) != -1) { ++ char *tmp; ++ ++ buf[n-1] = '\0'; ++ /** skip through pid and exec name. the exec name _could be wacky_ (renamed) and ++ * make scanf go mupp. ++ */ ++ if ((tmp = strrchr(buf, ')')) != NULL) { ++ // skip the ')' and the following space but check that the buffer is long enough ++ tmp += 2; ++ if (tmp < buf + n) { ++ n = vsscanf(tmp, fmt, args); ++ } ++ } ++ } ++ ++ fclose(f); ++ ++ return n; ++} ++ ++static int read_statdata(const char *procfile, const char *fmt, ...) { ++ int n; ++ va_list args; ++ ++ va_start(args, fmt); ++ n = vread_statdata(procfile, fmt, args); ++ va_end(args); ++ return n; ++} ++ ++/** read user and system ticks from a named procfile, assumed to be in 'stat' format then. */ ++static int read_ticks(const char *procfile, uint64_t *userTicks, uint64_t *systemTicks) { ++ return read_statdata(procfile, "%*c %*d %*d %*d %*d %*d %*u %*u %*u %*u %*u "UINT64_FORMAT" "UINT64_FORMAT, ++ userTicks, systemTicks ++ ); ++} ++ ++/** ++ * Return the number of ticks spent in any of the processes belonging ++ * to the JVM on any CPU. ++ */ ++static int get_jvmticks(ticks *pticks) { ++ uint64_t userTicks; ++ uint64_t systemTicks; ++ ++ if (read_ticks("/proc/self/stat", &userTicks, &systemTicks) < 0) { ++ return -1; ++ } ++ ++ // get the total ++ if (get_totalticks(-1, pticks) < 0) { ++ return -1; ++ } ++ ++ pticks->used = userTicks; ++ pticks->usedKernel = systemTicks; ++ ++ return 0; ++} ++ ++/** ++ * This method must be called first, before any data can be gathererd. ++ */ ++int perfInit() { ++ static int initialized=1; ++ ++ if (!initialized) { ++ int i; ++ ++ int n = sysconf(_SC_NPROCESSORS_ONLN); ++ if (n <= 0) { ++ n = 1; ++ } ++ ++ counters.cpus = (ticks*)calloc(n,sizeof(ticks)); ++ if (counters.cpus != NULL) { ++ // For the CPU load ++ get_totalticks(-1, &counters.cpuTicks); ++ ++ for (i = 0; i < n; i++) { ++ get_totalticks(i, &counters.cpus[i]); ++ } ++ // For JVM load ++ get_jvmticks(&counters.jvmTicks); ++ initialized = 1; ++ } ++ } ++ ++ return initialized ? 0 : -1; ++} ++ ++static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; ++ ++/** ++ * Return the load of the CPU as a double. 1.0 means the CPU process uses all ++ * available time for user or system processes, 0.0 means the CPU uses all time ++ * being idle. ++ * ++ * Returns a negative value if there is a problem in determining the CPU load. ++ */ ++ ++static double get_cpuload_internal(int which, double *pkernelLoad, CpuLoadTarget target) { ++ uint64_t udiff, kdiff, tdiff; ++ ticks *pticks, tmp; ++ double user_load = -1.0; ++ int failed = 0; ++ ++ *pkernelLoad = 0.0; ++ ++ pthread_mutex_lock(&lock); ++ ++ if(perfInit() == 0) { ++ ++ if (target == CPU_LOAD_VM_ONLY) { ++ pticks = &counters.jvmTicks; ++ } else if (which == -1) { ++ pticks = &counters.cpuTicks; ++ } else { ++ pticks = &counters.cpus[which]; ++ } ++ ++ tmp = *pticks; ++ ++ if (target == CPU_LOAD_VM_ONLY) { ++ if (get_jvmticks(pticks) != 0) { ++ failed = 1; ++ } ++ } else if (get_totalticks(which, pticks) < 0) { ++ failed = 1; ++ } ++ ++ if(!failed) { ++ // seems like we sometimes end up with less kernel ticks when ++ // reading /proc/self/stat a second time, timing issue between cpus? ++ if (pticks->usedKernel < tmp.usedKernel) { ++ kdiff = 0; ++ } else { ++ kdiff = pticks->usedKernel - tmp.usedKernel; ++ } ++ tdiff = pticks->total - tmp.total; ++ udiff = pticks->used - tmp.used; ++ ++ if (tdiff == 0) { ++ user_load = 0; ++ } else { ++ if (tdiff < (udiff + kdiff)) { ++ tdiff = udiff + kdiff; ++ } ++ *pkernelLoad = (kdiff / (double)tdiff); ++ // BUG9044876, normalize return values to sane values ++ *pkernelLoad = MAX(*pkernelLoad, 0.0); ++ *pkernelLoad = MIN(*pkernelLoad, 1.0); ++ ++ user_load = (udiff / (double)tdiff); ++ user_load = MAX(user_load, 0.0); ++ user_load = MIN(user_load, 1.0); ++ } ++ } ++ } ++ pthread_mutex_unlock(&lock); ++ return user_load; ++} ++ ++#endif +diff --git a/hotspot/src/os/solaris/vm/os_solaris.cpp b/hotspot/src/os/solaris/vm/os_solaris.cpp +index 9c9de85a..73253843 100644 +--- a/hotspot/src/os/solaris/vm/os_solaris.cpp ++++ b/hotspot/src/os/solaris/vm/os_solaris.cpp +@@ -5709,6 +5709,10 @@ bool os::is_thread_cpu_time_supported() { + } + } + ++double os::get_process_load() { ++ return -1.0; ++} ++ + // System loadavg support. Returns -1 if load average cannot be obtained. + // Return the load average for our processor set if the primitive exists + // (Solaris 9 and later). Otherwise just return system wide loadavg. +diff --git a/hotspot/src/os/windows/vm/os_windows.cpp b/hotspot/src/os/windows/vm/os_windows.cpp +index 74412a3e..e7ff202a 100644 +--- a/hotspot/src/os/windows/vm/os_windows.cpp ++++ b/hotspot/src/os/windows/vm/os_windows.cpp +@@ -4312,6 +4312,10 @@ bool os::is_thread_cpu_time_supported() { + } + } + ++double os::get_process_load() { ++ return -1.0; ++} ++ + // Windows does't provide a loadavg primitive so this is stubbed out for now. + // It does have primitives (PDH API) to get CPU usage and run queue length. + // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" +diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp +index a42b8ec7..98a43ba6 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp +@@ -25,6 +25,7 @@ + #include "precompiled.hpp" + #include "gc_implementation/g1/concurrentG1Refine.hpp" + #include "gc_implementation/g1/concurrentG1RefineThread.hpp" ++#include "gc_implementation/g1/concurrentMarkThread.hpp" + #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" + #include "gc_implementation/g1/g1CollectorPolicy.hpp" + #include "memory/resourceArea.hpp" +@@ -43,7 +44,8 @@ ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *nex + _next(next), + _monitor(NULL), + _cg1r(cg1r), +- _vtime_accum(0.0) ++ _vtime_accum(0.0), ++ _last_periodic_gc_attempt_s(0.0) + { + + // Each thread has its own monitor. The i-th thread is responsible for signalling +@@ -98,6 +100,69 @@ void ConcurrentG1RefineThread::sample_young_list_rs_lengths() { + } + } + ++bool ConcurrentG1RefineThread::should_start_periodic_gc() { ++ // If we are currently in a concurrent mark we are going to uncommit memory soon. ++ if (G1CollectedHeap::heap()->concurrent_mark()->cmThread()->during_cycle()) { ++ if (G1UncommitLog) { ++ gclog_or_tty->print_cr("Concurrent cycle in progress. Skipping."); ++ } ++ return false; ++ } ++ ++ // Check if enough time has passed since the last GC. ++ uintx time_since_last_gc; ++ if ((time_since_last_gc = (uintx)Universe::heap()->millis_since_last_gc()) < G1PeriodicGCInterval) { ++ if (G1UncommitLog) { ++ gclog_or_tty->print_cr("Last GC occurred " UINTX_FORMAT "ms before which is below threshold " UINTX_FORMAT "ms. Skipping.", ++ time_since_last_gc, G1PeriodicGCInterval); ++ } ++ return false; ++ } ++ ++ return true; ++} ++ ++void ConcurrentG1RefineThread::check_for_periodic_gc() { ++ if (!G1Uncommit) { ++ return; ++ } ++ ++ assert(G1PeriodicGCInterval > 0, "just checking"); ++ double recent_load = -1.0; ++ G1CollectedHeap* g1h = G1CollectedHeap::heap(); ++ G1CollectorPolicy* g1p = g1h->g1_policy(); ++ if (G1PeriodicGCLoadThreshold) { ++ // Sample process load and store it ++ if (G1PeriodicGCProcessLoad) { ++ recent_load = os::get_process_load() * 100; ++ } ++ if (recent_load < 0) { ++ // Fallback to os load ++ G1PeriodicGCProcessLoad = false; ++ if (os::loadavg(&recent_load, 1) != -1) { ++ static int cpu_count = os::active_processor_count(); ++ assert(cpu_count > 0, "just checking"); ++ recent_load = recent_load * 100 / cpu_count; ++ } ++ } ++ if (recent_load >= 0) { ++ g1p->add_os_load(recent_load); ++ } ++ } ++ ++ double now = os::elapsedTime(); ++ if (now - _last_periodic_gc_attempt_s > G1PeriodicGCInterval / 1000.0) { ++ if (G1UncommitLog) { ++ recent_load < 0 ? gclog_or_tty->print_cr("Checking for periodic GC.") ++ : gclog_or_tty->print_cr("Checking for periodic GC. Current load %1.2f. Heap total " UINT32_FORMAT " free " UINT32_FORMAT, recent_load, g1h->_hrm.length(), g1h->_hrm.num_free_regions()); ++ } ++ if (should_start_periodic_gc()) { ++ g1p->set_periodic_gc(); ++ } ++ _last_periodic_gc_attempt_s = now; ++ } ++} ++ + void ConcurrentG1RefineThread::run_young_rs_sampling() { + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); + _vtime_start = os::elapsedVTime(); +@@ -110,6 +175,8 @@ void ConcurrentG1RefineThread::run_young_rs_sampling() { + _vtime_accum = 0.0; + } + ++ check_for_periodic_gc(); ++ + MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); + if (_should_terminate) { + break; +diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp +index 05a8dc44..8fa52137 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp +@@ -42,6 +42,8 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread { + uint _worker_id; + uint _worker_id_offset; + ++ double _last_periodic_gc_attempt_s; ++ + // The refinement threads collection is linked list. A predecessor can activate a successor + // when the number of the rset update buffer crosses a certain threshold. A successor + // would self-deactivate when the number of the buffers falls below the threshold. +@@ -68,6 +70,9 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread { + void activate(); + void deactivate(); + ++ void check_for_periodic_gc(); ++ bool should_start_periodic_gc(); ++ + public: + virtual void run(); + // Constructor +diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp +index d782c892..a69db6eb 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp +@@ -1354,6 +1354,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { + satb_mq_set.set_active_all_threads(false, /* new active value */ + true /* expected_active */); + ++ g1h->extract_uncommit_list(); + if (VerifyDuringGC) { + HandleMark hm; // handle scope + Universe::heap()->prepare_for_verify(); +diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp +index a90c15ea..b2d3b282 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp +@@ -47,15 +47,15 @@ G1BlockOffsetTable::block_start_const(const void* addr) const { + } + } + +-#define check_index(index, msg) \ +- assert((index) < (_reserved.word_size() >> LogN_words), \ +- err_msg("%s - index: "SIZE_FORMAT", _vs.committed_size: "SIZE_FORMAT, \ +- msg, (index), (_reserved.word_size() >> LogN_words))); \ +- assert(G1CollectedHeap::heap()->is_in_exact(address_for_index_raw(index)), \ +- err_msg("Index "SIZE_FORMAT" corresponding to "PTR_FORMAT \ +- " (%u) is not in committed area.", \ +- (index), \ +- p2i(address_for_index_raw(index)), \ ++#define check_index(index, msg) \ ++ assert((index) < (_reserved.word_size() >> LogN_words), \ ++ err_msg("%s - index: "SIZE_FORMAT", _vs.committed_size: "SIZE_FORMAT, \ ++ msg, (index), (_reserved.word_size() >> LogN_words))); \ ++ assert(!G1Uncommit && G1CollectedHeap::heap()->is_in_exact(address_for_index_raw(index)) || G1Uncommit, \ ++ err_msg("Index "SIZE_FORMAT" corresponding to "PTR_FORMAT \ ++ " (%u) is not in committed area.", \ ++ (index), \ ++ p2i(address_for_index_raw(index)), \ + G1CollectedHeap::heap()->addr_to_region(address_for_index_raw(index)))); + + u_char G1BlockOffsetSharedArray::offset_array(size_t index) const { +diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +index 91ad2e98..722e5985 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +@@ -1873,6 +1873,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : + _dirty_cards_region_list(NULL), + _worker_cset_start_region(NULL), + _worker_cset_start_region_time_stamp(NULL), ++ _uncommit_thread(NULL), + _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), + _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), + _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()), +@@ -1952,6 +1953,16 @@ jint G1CollectedHeap::initialize() { + size_t max_byte_size = collector_policy()->max_heap_byte_size(); + size_t heap_alignment = collector_policy()->heap_alignment(); + ++ if (G1Uncommit) { ++ if (G1PeriodicGCInterval == 0) { ++ vm_exit_during_initialization(err_msg("G1Uncommit requires G1PeriodicGCInterval > 0")); ++ return JNI_EINVAL; ++ } ++ if (G1PeriodicGCLoadThreshold < 0 || G1PeriodicGCLoadThreshold > 100) { ++ vm_exit_during_initialization(err_msg("G1Uncommit requires G1PeriodicGCLoadThreshold >= 0 and <= 100")); ++ return JNI_EINVAL; ++ } ++ } + // Ensure that the sizes are properly aligned. + Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); + Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); +@@ -2148,6 +2159,30 @@ void G1CollectedHeap::stop() { + if (G1StringDedup::is_enabled()) { + G1StringDedup::stop(); + } ++ if (G1Uncommit && _uncommit_thread != NULL) { ++ _uncommit_thread->stop(); ++ PeriodicGC::stop(); ++ } ++} ++ ++void G1CollectedHeap::check_trigger_periodic_gc() { ++ if (g1_policy()->should_trigger_periodic_gc()) { ++ collect(GCCause::_g1_periodic_collection); ++ } ++} ++ ++void G1CollectedHeap::init_periodic_gc_thread() { ++ if (_uncommit_thread == NULL && G1Uncommit) { ++ PeriodicGC::start(); ++ _uncommit_thread = new G1UncommitThread(); ++ } ++} ++ ++void G1CollectedHeap::extract_uncommit_list() { ++ if (g1_policy()->can_extract_uncommit_list()) { ++ uint count = _hrm.extract_uncommit_list(); ++ g1_policy()->record_extract_uncommit_list(count); ++ } + } + + size_t G1CollectedHeap::conservative_max_heap_alignment() { +@@ -2335,6 +2370,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { + case GCCause::_g1_humongous_allocation: return true; + case GCCause::_update_allocation_context_stats_inc: return true; + case GCCause::_wb_conc_mark: return true; ++ case GCCause::_g1_periodic_collection: return true; + default: return false; + } + } +@@ -2528,6 +2564,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) { + return; + } else { + if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc ++ || cause == GCCause::_g1_periodic_collection + DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { + + // Schedule a standard evacuation pause. We're setting word_size +@@ -2925,7 +2962,14 @@ size_t G1CollectedHeap::max_capacity() const { + + jlong G1CollectedHeap::millis_since_last_gc() { + // assert(false, "NYI"); +- return 0; ++ jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) - ++ _g1_policy->collection_pause_end_millis(); ++ if (ret_val < 0) { ++ gclog_or_tty->print_cr("millis_since_last_gc() would return : " JLONG_FORMAT ++ ". returning zero instead.", ret_val); ++ return 0; ++ } ++ return ret_val; + } + + void G1CollectedHeap::prepare_for_verify() { +@@ -4033,6 +4077,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { + + + double pause_start_sec = os::elapsedTime(); ++ g1_policy()->record_gc_start(pause_start_sec); + g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress()); + log_gc_header(); + +diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +index 4783cbde..bde0ca4d 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +@@ -35,6 +35,7 @@ + #include "gc_implementation/g1/g1InCSetState.hpp" + #include "gc_implementation/g1/g1MonitoringSupport.hpp" + #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" ++#include "gc_implementation/g1/g1UncommitThread.hpp" + #include "gc_implementation/g1/g1YCTypes.hpp" + #include "gc_implementation/g1/heapRegionManager.hpp" + #include "gc_implementation/g1/heapRegionSet.hpp" +@@ -74,8 +75,10 @@ class GenerationCounters; + class STWGCTimer; + class G1NewTracer; + class G1OldTracer; ++class G1UncommitThread; + class EvacuationFailedInfo; + class nmethod; ++class ScanRSClosure; + + typedef OverflowTaskQueue RefToScanQueue; + typedef GenericTaskQueueSet RefToScanQueueSet; +@@ -186,8 +189,12 @@ class G1CollectedHeap : public SharedHeap { + friend class SurvivorGCAllocRegion; + friend class OldGCAllocRegion; + friend class G1Allocator; ++ friend class G1CollectorPolicy; + friend class G1DefaultAllocator; + friend class G1ResManAllocator; ++ friend class ScanRSClosure; ++ friend class G1UncommitThread; ++ friend class ConcurrentG1RefineThread; + + // Closures used in implementation. + template +@@ -210,6 +217,7 @@ class G1CollectedHeap : public SharedHeap { + friend class G1ParCleanupCTTask; + + friend class G1FreeHumongousRegionClosure; ++ friend class FreeRegionList; + // Other related classes. + friend class G1MarkSweep; + +@@ -266,6 +274,8 @@ private: + // Class that handles the different kinds of allocations. + G1Allocator* _allocator; + ++ G1UncommitThread* _uncommit_thread; ++ + // Statistics for each allocation context + AllocationContextStats _allocation_context_stats; + +@@ -1002,6 +1012,10 @@ public: + + void set_refine_cte_cl_concurrency(bool concurrent); + ++ void check_trigger_periodic_gc(); ++ void init_periodic_gc_thread(); ++ void extract_uncommit_list(); ++ + RefToScanQueue *task_queue(int i) const; + + // A set of cards where updates happened during the GC +diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +index b416917f..05a270d2 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +@@ -93,6 +93,9 @@ G1CollectorPolicy::G1CollectorPolicy() : + _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), + + _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), ++ _heap_size_seq(new TruncatedSeq(TruncatedSeqLength)), ++ _os_load_seq(new TruncatedSeq(TruncatedSeqLength)), ++ _gc_count_seq(new TruncatedSeq(TruncatedSeqLength)), + _prev_collection_pause_end_ms(0.0), + _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), +@@ -113,6 +116,13 @@ G1CollectorPolicy::G1CollectorPolicy() : + _pause_time_target_ms((double) MaxGCPauseMillis), + + _gcs_are_young(true), ++ _periodic_gc(false), ++ _last_uncommit_attempt_s(0.0), ++ _os_load(-1.0), ++ _uncommit_start_time(0), ++ _gc_count_cancel_extract(false), ++ _gc_count(0), ++ _gc_count_minute(0), + + _during_marking(false), + _in_marking_window(false), +@@ -153,7 +163,8 @@ G1CollectorPolicy::G1CollectorPolicy() : + _inc_cset_recorded_rs_lengths_diffs(0), + _inc_cset_predicted_elapsed_time_ms(0.0), + _inc_cset_predicted_elapsed_time_ms_diffs(0.0), +- ++ _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC), ++ _extract_uncommit_list(0), + #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away + #pragma warning( disable:4355 ) // 'this' : used in base member initializer list + #endif // _MSC_VER +@@ -976,6 +987,8 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua + } + #endif // PRODUCT + ++ _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; ++ + last_pause_included_initial_mark = during_initial_mark_pause(); + if (last_pause_included_initial_mark) { + record_concurrent_mark_init_end(0.0); +@@ -1204,6 +1217,7 @@ void G1CollectorPolicy::record_heap_size_info_at_start(bool full) { + _heap_capacity_bytes_before_gc = _g1->capacity(); + _heap_used_bytes_before_gc = _g1->used(); + _cur_collection_pause_used_regions_at_start = _g1->num_used_regions(); ++ _heap_size_seq->add(_cur_collection_pause_used_regions_at_start); + + _eden_capacity_bytes_before_gc = + (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc; +@@ -1247,6 +1261,13 @@ void G1CollectorPolicy::print_detailed_heap_transition(bool full) { + EXT_SIZE_PARAMS(heap_used_bytes_after_gc), + EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc)); + ++ if (_extract_uncommit_list) { ++ gclog_or_tty->print(" [Uncommit list " UINTX_FORMAT ", remaining " UINTX_FORMAT ", free list " UINTX_FORMAT "]", ++ _extract_uncommit_list, ++ _g1->_hrm.length(), ++ _g1->_hrm.num_free_regions()); ++ _extract_uncommit_list = 0; ++ } + if (full) { + MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc); + } +@@ -2160,6 +2181,53 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInf + evacuation_info.set_collectionset_regions(cset_region_length()); + } + ++void G1CollectorPolicy::record_gc_start(double curr_sec) { ++ if (_uncommit_start_time == 0) { ++ _uncommit_start_time = curr_sec + G1UncommitDelay; ++ } ++ long curr = curr_sec / 60; ++ if (curr > _gc_count_minute) { ++ int diff = curr - _gc_count_minute; ++ _gc_count_seq->add(_gc_count); ++ for (int i = 1; i < diff; i++) { ++ _gc_count_seq->add(0.0); ++ } ++ _gc_count_minute = curr; ++ double gc_count_expected = get_new_prediction(_gc_count_seq); ++ // Considering the test result, 15000 is an appropriate value for G1PeriodicGCInterval. ++ _gc_count_cancel_extract = gc_count_expected > MIN2(4.0, 60000.0 / G1PeriodicGCInterval); ++ _gc_count = 0; ++ } ++ _gc_count++; ++} ++ ++bool G1CollectorPolicy::should_trigger_periodic_gc() { ++ if (G1PeriodicGCLoadThreshold && _os_load > G1PeriodicGCLoadThreshold) { ++ _periodic_gc = false; ++ } else if (_periodic_gc) { ++ _periodic_gc = false; ++ return true; ++ } ++ return false; ++} ++ ++bool G1CollectorPolicy::can_extract_uncommit_list() { ++ double now = os::elapsedTime(); ++ if (G1Uncommit && now > _uncommit_start_time) { ++ if (G1PeriodicGCLoadThreshold && _os_load > G1PeriodicGCLoadThreshold) { ++ return false; ++ } ++ G1CollectedHeap* g1h = G1CollectedHeap::heap(); ++ if (!_gc_count_cancel_extract || now >= (g1h->millis_since_last_gc() + G1PeriodicGCInterval) / 1000.0) { ++ if (now - _last_uncommit_attempt_s >= G1PeriodicGCInterval / 1000.0) { ++ _last_uncommit_attempt_s = now; ++ return true; ++ } ++ } ++ } ++ return false; ++} ++ + void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) { + if(TraceGen0Time) { + _all_stop_world_times_ms.add(time_to_stop_the_world_ms); +diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp +index 02217ce4..1c918070 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp +@@ -183,6 +183,7 @@ private: + + CollectionSetChooser* _collectionSetChooser; + ++ jlong _collection_pause_end_millis; + double _full_collection_start_sec; + uint _cur_collection_pause_used_regions_at_start; + +@@ -243,6 +244,9 @@ private: + TruncatedSeq* _constant_other_time_ms_seq; + TruncatedSeq* _young_other_cost_per_region_ms_seq; + TruncatedSeq* _non_young_other_cost_per_region_ms_seq; ++ TruncatedSeq* _heap_size_seq; ++ TruncatedSeq* _os_load_seq; ++ TruncatedSeq* _gc_count_seq; + + TruncatedSeq* _pending_cards_seq; + TruncatedSeq* _rs_lengths_seq; +@@ -264,6 +268,8 @@ private: + + uint _free_regions_at_end_of_collection; + ++ uint _extract_uncommit_list; ++ + size_t _recorded_rs_lengths; + size_t _max_rs_lengths; + double _sigma; +@@ -300,9 +306,21 @@ private: + + size_t _pending_cards; + ++ size_t _gc_count; ++ long _gc_count_minute; ++ bool _gc_count_cancel_extract; ++ ++ volatile bool _periodic_gc; ++ double _last_uncommit_attempt_s; ++ volatile double _os_load; ++ double _uncommit_start_time; + public: + // Accessors + ++ void set_periodic_gc() { _periodic_gc = true; } ++ bool can_extract_uncommit_list(); ++ bool should_trigger_periodic_gc(); ++ + void set_region_eden(HeapRegion* hr, int young_index_in_cset) { + hr->set_eden(); + hr->install_surv_rate_group(_short_lived_surv_rate_group); +@@ -328,6 +346,17 @@ public: + _max_rs_lengths = rs_lengths; + } + ++ size_t predict_heap_size_seq() { ++ return (size_t) get_new_prediction(_heap_size_seq); ++ } ++ ++ void add_os_load(double load) { ++ _os_load_seq->add(load); ++ _os_load = get_new_prediction(_os_load_seq); ++ } ++ ++ void record_gc_start(double sec); ++ + size_t predict_rs_length_diff() { + return (size_t) get_new_prediction(_rs_length_diff_seq); + } +@@ -475,6 +504,8 @@ public: + return _short_lived_surv_rate_group->accum_surv_rate_pred(age); + } + ++ jlong collection_pause_end_millis() { return _collection_pause_end_millis; } ++ + private: + // Statistics kept per GC stoppage, pause or full. + TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; +@@ -706,6 +737,8 @@ public: + void record_stop_world_start(); + void record_concurrent_pause(); + ++ void record_extract_uncommit_list(uint count) { _extract_uncommit_list = count; } ++ + // Record how much space we copied during a GC. This is typically + // called when a GC alloc region is being retired. + void record_bytes_copied_during_gc(size_t bytes) { +diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp +index da417fb7..3066f9e6 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp +@@ -206,6 +206,9 @@ public: + #endif + + HeapRegion* card_region = _g1h->heap_region_containing(card_start); ++ if (!_g1h->_hrm.is_available(card_region->hrm_index())) { ++ continue; ++ } + _cards++; + + if (!card_region->is_on_dirty_cards_region_list()) { +diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1UncommitThread.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1UncommitThread.cpp +new file mode 100644 +index 00000000..37bdbdb6 +--- /dev/null ++++ b/hotspot/src/share/vm/gc_implementation/g1/g1UncommitThread.cpp +@@ -0,0 +1,170 @@ ++/* ++ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++ ++#include "gc_implementation/g1/g1UncommitThread.hpp" ++#include "gc_implementation/g1/g1_globals.hpp" ++#include "runtime/javaCalls.hpp" ++#include "runtime/os.hpp" ++ ++#ifdef _WINDOWS ++#pragma warning(disable : 4355) ++#endif ++ ++volatile bool PeriodicGC::_should_terminate = false; ++JavaThread* PeriodicGC::_thread = NULL; ++Monitor* PeriodicGC::_monitor = NULL; ++ ++bool PeriodicGC::has_error(TRAPS, const char* error) { ++ if (HAS_PENDING_EXCEPTION) { ++ tty->print_cr("%s", error); ++ java_lang_Throwable::print(PENDING_EXCEPTION, tty); ++ tty->cr(); ++ CLEAR_PENDING_EXCEPTION; ++ return true; ++ } else { ++ return false; ++ } ++} ++ ++void PeriodicGC::start() { ++ _monitor = new Monitor(Mutex::nonleaf, "PeriodicGC::_monitor", Mutex::_allow_vm_block_flag); ++ ++ EXCEPTION_MARK; ++ Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK); ++ instanceKlassHandle klass (THREAD, k); ++ instanceHandle thread_oop = klass->allocate_instance_handle(CHECK); ++ ++ const char thread_name[] = "periodic gc timer"; ++ Handle string = java_lang_String::create_from_str(thread_name, CHECK); ++ ++ // Initialize thread_oop to put it into the system threadGroup ++ Handle thread_group (THREAD, Universe::system_thread_group()); ++ JavaValue result(T_VOID); ++ JavaCalls::call_special(&result, thread_oop, ++ klass, ++ vmSymbols::object_initializer_name(), ++ vmSymbols::threadgroup_string_void_signature(), ++ thread_group, ++ string, ++ THREAD); ++ if (has_error(THREAD, "Exception in VM (PeriodicGC::start) : ")) { ++ vm_exit_during_initialization("Cannot create periodic gc timer thread."); ++ return; ++ } ++ ++ KlassHandle group(THREAD, SystemDictionary::ThreadGroup_klass()); ++ JavaCalls::call_special(&result, ++ thread_group, ++ group, ++ vmSymbols::add_method_name(), ++ vmSymbols::thread_void_signature(), ++ thread_oop, // ARG 1 ++ THREAD); ++ if (has_error(THREAD, "Exception in VM (PeriodicGC::start) : ")) { ++ vm_exit_during_initialization("Cannot create periodic gc timer thread."); ++ return; ++ } ++ ++ { ++ MutexLocker mu(Threads_lock); ++ _thread = new JavaThread(&PeriodicGC::timer_thread_entry); ++ if (_thread == NULL || _thread->osthread() == NULL) { ++ vm_exit_during_initialization("Cannot create PeriodicGC timer thread. Out of system resources."); ++ } ++ ++ java_lang_Thread::set_thread(thread_oop(), _thread); ++ java_lang_Thread::set_daemon(thread_oop()); ++ _thread->set_threadObj(thread_oop()); ++ Threads::add(_thread); ++ Thread::start(_thread); ++ } ++} ++ ++void PeriodicGC::timer_thread_entry(JavaThread* thread, TRAPS) { ++ while(!_should_terminate) { ++ assert(!SafepointSynchronize::is_at_safepoint(), "PeriodicGC timer thread is a JavaThread"); ++ G1CollectedHeap::heap()->check_trigger_periodic_gc(); ++ ++ MutexLockerEx x(_monitor); ++ if (_should_terminate) { ++ break; ++ } ++ _monitor->wait(false /* no_safepoint_check */, 200); ++ } ++} ++ ++void PeriodicGC::stop() { ++ _should_terminate = true; ++ { ++ MutexLockerEx ml(_monitor, Mutex::_no_safepoint_check_flag); ++ _monitor->notify(); ++ } ++} ++ ++G1UncommitThread::G1UncommitThread() : ++ ConcurrentGCThread() { ++ if (os::create_thread(this, os::cgc_thread)) { ++ int native_prio; ++ if (G1UncommitThreadPriority) { ++ native_prio = os::java_to_os_priority[CriticalPriority]; ++ } else { ++ native_prio = os::java_to_os_priority[NearMaxPriority]; ++ } ++ os::set_native_priority(this, native_prio); ++ if (!_should_terminate && !DisableStartThread) { ++ os::start_thread(this); ++ } ++ } ++ if (G1UncommitLog) { ++ gclog_or_tty->print_cr("Periodic GC Thread start"); ++ } ++} ++ ++G1UncommitThread::~G1UncommitThread() { ++ // This is here so that super is called. ++} ++ ++void G1UncommitThread::run() { ++ G1CollectedHeap* heap = G1CollectedHeap::heap(); ++ while (!_should_terminate) { ++ heap->_hrm.free_uncommit_list_memory(); ++ os::sleep(this, G1PeriodicGCInterval / 10, false); ++ } ++ terminate(); ++} ++ ++void G1UncommitThread::stop() { ++ { ++ MutexLockerEx ml(Terminator_lock); ++ _should_terminate = true; ++ } ++ { ++ MutexLockerEx ml(Terminator_lock); ++ while (!_has_terminated) { ++ Terminator_lock->wait(); ++ } ++ } ++} +diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1UncommitThread.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1UncommitThread.hpp +new file mode 100644 +index 00000000..883a9a41 +--- /dev/null ++++ b/hotspot/src/share/vm/gc_implementation/g1/g1UncommitThread.hpp +@@ -0,0 +1,59 @@ ++/* ++ * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef SHARE_VM_GC_G1_G1CONCURRENTTHREAD_HPP ++#define SHARE_VM_GC_G1_G1CONCURRENTTHREAD_HPP ++ ++#include "gc_implementation/shared/concurrentGCThread.hpp" ++#include "gc_implementation/g1/g1CollectedHeap.hpp" ++#include "gc_implementation/g1/heapRegionSet.hpp" ++ ++class PeriodicGC : AllStatic { ++private: ++ volatile static bool _should_terminate; ++ static JavaThread* _thread; ++ static Monitor* _monitor; ++ ++public: ++ // Timer thread entry ++ static void timer_thread_entry(JavaThread* thread, TRAPS); ++ static void start(); ++ static void stop(); ++ static bool has_error(TRAPS, const char* error); ++}; ++ ++class G1UncommitThread: public ConcurrentGCThread { ++ friend class VMStructs; ++ ++public: ++ // Constructor ++ G1UncommitThread(); ++ ~G1UncommitThread(); ++ ++ void run(); ++ void stop(); ++ ++ char* name() const { return (char*)"G1UncommitThread";} ++}; ++ ++#endif // SHARE_VM_GC_G1_G1CONCURRENTTHREAD_HPP +diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp +index e24cc959..db7ddece 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp +@@ -328,7 +328,37 @@ + "Verify the code root lists attached to each heap region.") \ + \ + develop(bool, G1VerifyBitmaps, false, \ +- "Verifies the consistency of the marking bitmaps") ++ "Verifies the consistency of the marking bitmaps") \ ++ \ ++ product(bool, G1Uncommit, false, \ ++ "Allow G1 to uncommit unused memory.") \ ++ \ ++ product(bool, G1UncommitLog, false, \ ++ "Enable G1 uncommit logs.") \ ++ \ ++ manageable(uintx, G1PeriodicGCInterval, 15000, \ ++ "Number of milliseconds after a previous GC to wait before " \ ++ "triggering a periodic gc. A value of zero disables periodically "\ ++ "enforced gc cycles.") \ ++ \ ++ manageable(uintx, G1PeriodicGCLoadThreshold, 10, \ ++ "Percentage of process load or system load." \ ++ "Above this value cancels a given periodic GC." \ ++ "A value of zero disables load check.") \ ++ \ ++ experimental(bool, G1PeriodicGCProcessLoad, true, \ ++ "if true, use process load, else use system load. which is" \ ++ "the 1m value of getloadavg() / CPU core number.") \ ++ \ ++ experimental(bool, G1UncommitThreadPriority, false, \ ++ "G1 uncommit thread runs at critical scheduling priority.") \ ++ \ ++ experimental(double, G1UncommitPercent, 0.1, \ ++ "Percent of free regions to uncommit for one uncommit cycle.") \ ++ \ ++ experimental(uintx, G1UncommitDelay, 50, \ ++ "Starup delay in seconds for periodic uncommit.") \ ++ \ + + G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG) + +diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp +index f0e24811..32f8b198 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp +@@ -310,7 +310,8 @@ HeapRegion::HeapRegion(uint hrm_index, + #ifdef ASSERT + _containing_set(NULL), + #endif // ASSERT +- _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), ++ _in_uncommit_list(false), ++ _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), + _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), + _predicted_bytes_to_copy(0) + { +diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp +index b1f94fb9..27b1048f 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp +@@ -227,6 +227,9 @@ class HeapRegion: public G1OffsetTableContigSpace { + // For the start region of a humongous sequence, it's original end(). + HeapWord* _orig_end; + ++ // True iff the region is in current uncommit_list. ++ bool _in_uncommit_list; ++ + // True iff the region is in current collection_set. + bool _in_collection_set; + +@@ -429,6 +432,8 @@ class HeapRegion: public G1OffsetTableContigSpace { + return _humongous_start_region; + } + ++ void set_uncommit_list(bool in) { _in_uncommit_list = in; } ++ bool in_uncommit_list() { return _in_uncommit_list; } + // Return the number of distinct regions that are covered by this region: + // 1 if the region is not humongous, >= 1 if the region is humongous. + uint region_num() const { +diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp +index 49c231d8..842550d2 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp +@@ -53,12 +53,25 @@ void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage, + + _available_map.resize(_regions.length(), false); + _available_map.clear(); ++ _uncommit_list_filled = false; + } + + bool HeapRegionManager::is_available(uint region) const { ++ HeapRegion* hr = _regions.get_by_index(region); ++ if (hr != NULL && hr->in_uncommit_list()) { ++ return false; ++ } + return _available_map.at(region); + } + ++bool HeapRegionManager::can_expand(uint region) const { ++ HeapRegion* hr = _regions.get_by_index(region); ++ if (hr != NULL && hr->in_uncommit_list()) { ++ return false; ++ } ++ return !_available_map.at(region); ++} ++ + #ifdef ASSERT + bool HeapRegionManager::is_free(HeapRegion* hr) const { + return _free_list.contains(hr); +@@ -77,7 +90,7 @@ void HeapRegionManager::commit_regions(uint index, size_t num_regions) { + guarantee(num_regions > 0, "Must commit more than zero regions"); + guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions"); + +- _num_committed += (uint)num_regions; ++ Atomic::add((int)num_regions, (volatile int*)&_num_committed); + + _heap_mapper->commit_regions(index, num_regions); + +@@ -103,9 +116,9 @@ void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) { + } + } + +- _num_committed -= (uint)num_regions; +- ++ Atomic::add(-num_regions, (volatile int*)&_num_committed); + _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range); ++ + _heap_mapper->uncommit_regions(start, num_regions); + + // Also uncommit auxiliary data +@@ -198,7 +211,7 @@ uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) { + + while (length_found < num && cur < max_length()) { + HeapRegion* hr = _regions.get_by_index(cur); +- if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) { ++ if ((!empty_only && can_expand(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) { + // This region is a potential candidate for allocation into. + length_found++; + } else { +@@ -213,7 +226,7 @@ uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) { + for (uint i = found; i < (found + num); i++) { + HeapRegion* hr = _regions.get_by_index(i); + // sanity check +- guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()), ++ guarantee((!empty_only && can_expand(i)) || (is_available(i) && hr != NULL && hr->is_empty()), + err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT + " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr))); + } +@@ -239,7 +252,8 @@ void HeapRegionManager::iterate(HeapRegionClosure* blk) const { + uint len = max_length(); + + for (uint i = 0; i < len; i++) { +- if (!is_available(i)) { ++ HeapRegion* r = _regions.get_by_index(i); ++ if (r != NULL && r->in_uncommit_list() || !_available_map.at(i)) { + continue; + } + guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i)); +@@ -265,15 +279,15 @@ uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) + return num_regions; + } + *res_idx = cur; +- while (cur < max_length() && !is_available(cur)) { ++ while (cur < max_length() && can_expand(cur)) { + cur++; + } + num_regions = cur - *res_idx; + #ifdef ASSERT + for (uint i = *res_idx; i < (*res_idx + num_regions); i++) { +- assert(!is_available(i), "just checking"); ++ assert(can_expand(i), "just checking"); + } +- assert(cur == max_length() || num_regions == 0 || is_available(cur), ++ assert(cur == max_length() || num_regions == 0 || (!G1Uncommit && is_available(cur)) || G1Uncommit, + err_msg("The region at the current position %u must be available or at the end of the heap.", cur)); + #endif + return num_regions; +@@ -294,10 +308,10 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint + const uint index = (start_index + count) % _allocated_heapregions_length; + assert(0 <= index && index < _allocated_heapregions_length, "sanity"); + // Skip over unavailable regions +- if (!is_available(index)) { ++ HeapRegion* r = _regions.get_by_index(index); ++ if (r != NULL && r->in_uncommit_list() || !_available_map.at(index)) { + continue; + } +- HeapRegion* r = _regions.get_by_index(index); + // We'll ignore "continues humongous" regions (we'll process them + // when we come across their corresponding "start humongous" + // region) and regions already claimed. +@@ -425,12 +439,12 @@ void HeapRegionManager::verify() { + uint num_committed = 0; + HeapWord* prev_end = heap_bottom(); + for (uint i = 0; i < _allocated_heapregions_length; i++) { +- if (!is_available(i)) { ++ HeapRegion* hr = _regions.get_by_index(i); ++ if (hr != NULL && hr->in_uncommit_list() || !_available_map.at(i)) { + prev_committed = false; + continue; + } + num_committed++; +- HeapRegion* hr = _regions.get_by_index(i); + guarantee(hr != NULL, err_msg("invariant: i: %u", i)); + guarantee(!prev_committed || hr->bottom() == prev_end, + err_msg("invariant i: %u " HR_FORMAT " prev_end: " PTR_FORMAT, +@@ -454,10 +468,38 @@ void HeapRegionManager::verify() { + guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i)); + } + +- guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed)); ++ guarantee((!G1Uncommit && num_committed == _num_committed) || G1Uncommit, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed)); + _free_list.verify(); + } + ++void HeapRegionManager::free_uncommit_list_memory() { ++ if (_uncommit_list_filled) { ++ _uncommit_list.remove_all(true); ++ OrderAccess::storestore(); ++ _uncommit_list_filled = false; ++ } ++} ++ ++uint HeapRegionManager::extract_uncommit_list() ++{ ++ assert_at_safepoint(true /* should_be_vm_thread */); ++ if (!_uncommit_list_filled) { ++ G1CollectedHeap* g1h = G1CollectedHeap::heap(); ++ uint dest = ((G1CollectorPolicy*)g1h->collector_policy())->predict_heap_size_seq(); ++ ++ if (dest < _num_committed) { ++ uint num_regions_to_remove = (_num_committed - dest) * G1UncommitPercent; ++ if (num_regions_to_remove >= 1 && num_regions_to_remove < _free_list.length()) { ++ int count = _free_list.move_regions_to(&_uncommit_list, num_regions_to_remove); ++ OrderAccess::storestore(); ++ _uncommit_list_filled = true; ++ return count; ++ } ++ } ++ } ++ return 0; ++} ++ + #ifndef PRODUCT + void HeapRegionManager::verify_optional() { + verify(); +diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.hpp +index 83996f71..71512218 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.hpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.hpp +@@ -67,6 +67,7 @@ class G1HeapRegionTable : public G1BiasedMappedArray { + + class HeapRegionManager: public CHeapObj { + friend class VMStructs; ++ friend class FreeRegionList; + + G1HeapRegionTable _regions; + +@@ -78,6 +79,8 @@ class HeapRegionManager: public CHeapObj { + G1RegionToSpaceMapper* _card_counts_mapper; + + FreeRegionList _free_list; ++ FreeRegionList _uncommit_list; ++ bool _uncommit_list_filled; + + // Each bit in this bitmap indicates that the corresponding region is available + // for allocation. +@@ -123,17 +126,23 @@ class HeapRegionManager: public CHeapObj { + public: + bool is_free(HeapRegion* hr) const; + #endif +- // Returns whether the given region is available for allocation. +- bool is_available(uint region) const; ++ ++ // Returns whether the given region is not available and can be expanded. ++ bool can_expand(uint region) const; + + public: + // Empty constructor, we'll initialize it with the initialize() method. + HeapRegionManager() : _regions(), _heap_mapper(NULL), _num_committed(0), + _next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL), + _allocated_heapregions_length(0), _available_map(), +- _free_list("Free list", new MasterFreeRegionListMtSafeChecker()) ++ _free_list("Free list", new MasterFreeRegionListMtSafeChecker()), ++ _uncommit_list("Uncommit list", NULL) + { } + ++ // Returns whether the given region is available for allocation. ++ // !is_available is not allowed ++ bool is_available(uint region) const; ++ + void initialize(G1RegionToSpaceMapper* heap_storage, + G1RegionToSpaceMapper* prev_bitmap, + G1RegionToSpaceMapper* next_bitmap, +@@ -141,6 +150,9 @@ public: + G1RegionToSpaceMapper* cardtable, + G1RegionToSpaceMapper* card_counts); + ++ uint extract_uncommit_list(); ++ void free_uncommit_list_memory(); ++ + // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired + // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit + // the heap from the lowest address, this region (and its associated data +diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp +index 213380e7..09d12fd3 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp +@@ -109,10 +109,11 @@ void FreeRegionList::fill_in_ext_msg_extra(hrs_ext_msg* msg) { + msg->append(" hd: " PTR_FORMAT " tl: " PTR_FORMAT, _head, _tail); + } + +-void FreeRegionList::remove_all() { ++void FreeRegionList::remove_all(bool uncommit) { + check_mt_safety(); + verify_optional(); + ++ G1CollectedHeap* g1h = G1CollectedHeap::heap(); + HeapRegion* curr = _head; + while (curr != NULL) { + verify_region(curr); +@@ -121,6 +122,11 @@ void FreeRegionList::remove_all() { + curr->set_next(NULL); + curr->set_prev(NULL); + curr->set_containing_set(NULL); ++ if (uncommit) { ++ g1h->_hrm.uncommit_regions(curr->hrm_index(), 1); ++ OrderAccess::storestore(); ++ curr->set_uncommit_list(false); ++ } + curr = next; + } + clear(); +@@ -328,6 +334,48 @@ void FreeRegionList::verify_list() { + name(), total_capacity_bytes(), capacity)); + } + ++uint FreeRegionList::move_regions_to(FreeRegionList* dest, uint num_regions) { ++ check_mt_safety(); ++ assert(num_regions >= 1, hrs_ext_msg(this, "pre-condition")); ++ assert(num_regions < length(), hrs_ext_msg(this, "pre-condition")); ++ assert(dest != NULL && dest->is_empty(), hrs_ext_msg(dest, "pre-condition")); ++ ++ verify_optional(); ++ DEBUG_ONLY(uint old_length = length();) ++ HeapRegion* curr = _tail; ++ uint count = 0; ++ size_t capacity = 0; ++ ++ while (count < num_regions) { ++ if (curr->hrm_index() <= InitialHeapSize / HeapRegion::GrainBytes) { ++ break; ++ } ++ if (_last == curr) { ++ _last = NULL; ++ } ++ curr->set_containing_set(NULL); ++ curr->set_containing_set(dest); ++ curr->set_uncommit_list(true); ++ count++; ++ capacity += curr->capacity(); ++ curr = curr->prev(); ++ assert(curr != NULL, hrs_ext_msg(this, "invariant")); ++ } ++ if (count != 0) { ++ dest->_tail = _tail; ++ dest->_head = curr->next(); ++ dest->_head->set_prev(NULL); ++ dest->_count.increment(count, capacity); ++ dest->verify_optional(); ++ ++ _count.decrement(count, capacity); ++ _tail = curr; ++ _tail->set_next(NULL); ++ verify_optional(); ++ } ++ return count; ++} ++ + // Note on the check_mt_safety() methods below: + // + // Verification of the "master" heap region sets / lists that are +diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp +index 9a9267c4..ede3136d 100644 +--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp ++++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp +@@ -249,13 +249,15 @@ public: + void add_ordered(FreeRegionList* from_list); + + // It empties the list by removing all regions from it. +- void remove_all(); ++ void remove_all(bool uncommit = false); + + // Remove all (contiguous) regions from first to first + num_regions -1 from + // this list. + // Num_regions must be > 1. + void remove_starting_at(HeapRegion* first, uint num_regions); + ++ uint move_regions_to(FreeRegionList* dest, uint num_regions); ++ + virtual void verify(); + + virtual void print_on(outputStream* out, bool print_contents = false); +diff --git a/hotspot/src/share/vm/gc_interface/gcCause.cpp b/hotspot/src/share/vm/gc_interface/gcCause.cpp +index bdac7cb0..283df9bf 100644 +--- a/hotspot/src/share/vm/gc_interface/gcCause.cpp ++++ b/hotspot/src/share/vm/gc_interface/gcCause.cpp +@@ -100,6 +100,9 @@ const char* GCCause::to_string(GCCause::Cause cause) { + case _g1_humongous_allocation: + return "G1 Humongous Allocation"; + ++ case _g1_periodic_collection: ++ return "G1 Periodic Collection"; ++ + case _shenandoah_allocation_failure_evac: + return "Allocation Failure During Evacuation"; + +diff --git a/hotspot/src/share/vm/gc_interface/gcCause.hpp b/hotspot/src/share/vm/gc_interface/gcCause.hpp +index 29408d77..5be14548 100644 +--- a/hotspot/src/share/vm/gc_interface/gcCause.hpp ++++ b/hotspot/src/share/vm/gc_interface/gcCause.hpp +@@ -72,6 +72,7 @@ class GCCause : public AllStatic { + + _g1_inc_collection_pause, + _g1_humongous_allocation, ++ _g1_periodic_collection, + + _shenandoah_stop_vm, + _shenandoah_metadata_gc_clear_softrefs, +diff --git a/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp b/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp +index 9335a9f7..4e8dae21 100644 +--- a/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp ++++ b/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp +@@ -98,7 +98,7 @@ private: + static GlobalTLABStats* global_stats() { return _global_stats; } + + public: +- ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight), _allocated_before_last_gc(0), _initialized(false) { ++ ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight), _allocated_before_last_gc(0), _initialized(false), _gclab(false) { + // do nothing. tlabs must be inited by initialize() calls + } + +diff --git a/hotspot/src/share/vm/prims/jvm.cpp b/hotspot/src/share/vm/prims/jvm.cpp +index 38cddfed..3a032d04 100644 +--- a/hotspot/src/share/vm/prims/jvm.cpp ++++ b/hotspot/src/share/vm/prims/jvm.cpp +@@ -3286,6 +3286,23 @@ JVM_ENTRY(void, JVM_SetThreadPriority(JNIEnv* env, jobject jthread, jint prio)) + } + JVM_END + ++JVM_ENTRY(void, JVM_AdaptiveHeapSetG1PeriodicGCInterval(JNIEnv *env, jclass klass, jint interval)) ++ JVMWrapper("JVM_AdaptiveHeapSetG1PeriodicGCInterval"); ++ G1PeriodicGCInterval = interval; ++JVM_END ++JVM_ENTRY(jint, JVM_AdaptiveHeapGetG1PeriodicGCInterval(JNIEnv *env, jclass klass)) ++ JVMWrapper("JVM_AdaptiveHeapGetG1PeriodicGCInterval"); ++ return G1PeriodicGCInterval; ++JVM_END ++ ++JVM_ENTRY(void, JVM_AdaptiveHeapSetG1PeriodicGCLoadThreshold(JNIEnv *env, jclass clazz, jint loadThreshold)) ++ JVMWrapper("JVM_AdaptiveHeapSetG1PeriodicGCLoadThreshold"); ++ G1PeriodicGCLoadThreshold = loadThreshold; ++JVM_END ++JVM_ENTRY(jint, JVM_AdaptiveHeapGetG1PeriodicGCLoadThreshold(JNIEnv *env, jclass clazz)) ++ JVMWrapper("JVM_AdaptiveHeapgetG1PeriodicGCLoadThreshold"); ++ return G1PeriodicGCLoadThreshold; ++JVM_END + + JVM_ENTRY(void, JVM_Yield(JNIEnv *env, jclass threadClass)) + JVMWrapper("JVM_Yield"); +diff --git a/hotspot/src/share/vm/prims/jvm.h b/hotspot/src/share/vm/prims/jvm.h +index 198c52dc..0c0d44a0 100644 +--- a/hotspot/src/share/vm/prims/jvm.h ++++ b/hotspot/src/share/vm/prims/jvm.h +@@ -1586,6 +1586,20 @@ JVM_GetResourceLookupCacheURLs(JNIEnv *env, jobject loader); + JNIEXPORT jintArray JNICALL + JVM_GetResourceLookupCache(JNIEnv *env, jobject loader, const char *resource_name); + ++/* ++ *com.huawei.management.AdaptiveHeapMXBeanImpl ++ */ ++JNIEXPORT void JNICALL ++JVM_AdaptiveHeapSetG1PeriodicGCInterval(JNIEnv *env, jclass klass, jint interval); ++JNIEXPORT jint JNICALL ++JVM_AdaptiveHeapGetG1PeriodicGCInterval(JNIEnv *env, jclass klass); ++ ++ ++JNIEXPORT void JNICALL ++JVM_AdaptiveHeapSetG1PeriodicGCLoadThreshold(JNIEnv *env, jclass clazz, jint loadThreshold); ++JNIEXPORT jint JNICALL ++JVM_AdaptiveHeapGetG1PeriodicGCLoadThreshold(JNIEnv *env, jclass clazz); ++ + + /* ========================================================================= + * The following defines a private JVM interface that the JDK can query +diff --git a/hotspot/src/share/vm/runtime/os.hpp b/hotspot/src/share/vm/runtime/os.hpp +index ab3148e3..cff2e9c3 100644 +--- a/hotspot/src/share/vm/runtime/os.hpp ++++ b/hotspot/src/share/vm/runtime/os.hpp +@@ -823,6 +823,9 @@ class os: AllStatic { + // System loadavg support. Returns -1 if load average cannot be obtained. + static int loadavg(double loadavg[], int nelem); + ++ // Process loadavg support. Returns -1 if load average cannot be obtained. ++ static double get_process_load(); ++ + // Hook for os specific jvm options that we don't want to abort on seeing + static bool obsolete_option(const JavaVMOption *option); + +diff --git a/hotspot/src/share/vm/runtime/thread.cpp b/hotspot/src/share/vm/runtime/thread.cpp +index d0185280..710f8cbc 100644 +--- a/hotspot/src/share/vm/runtime/thread.cpp ++++ b/hotspot/src/share/vm/runtime/thread.cpp +@@ -99,6 +99,7 @@ + #include "gc_implementation/shenandoah/shenandoahControlThread.hpp" + #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" + #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" ++#include "gc_implementation/g1/g1CollectedHeap.hpp" + #include "gc_implementation/parallelScavenge/pcTasks.hpp" + #endif // INCLUDE_ALL_GCS + #ifdef COMPILER1 +@@ -3690,6 +3691,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { + ShenandoahControlThread::makeSurrogateLockerThread(THREAD); + } else { + ConcurrentMarkThread::makeSurrogateLockerThread(THREAD); ++ G1CollectedHeap::heap()->init_periodic_gc_thread(); + } + if (HAS_PENDING_EXCEPTION) { + vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION)); +diff --git a/jdk/make/CreateJars.gmk b/jdk/make/CreateJars.gmk +index 6e484369..559a62b6 100644 +--- a/jdk/make/CreateJars.gmk ++++ b/jdk/make/CreateJars.gmk +@@ -555,7 +555,9 @@ EXCLUDE_PROPWARN_PKGS = com.sun.java.swing.plaf.windows \ + # with a new module system (being discussed for JDK 8). + # + EXPORTED_PRIVATE_PKGS = com.oracle.net \ +- com.oracle.nio ++ com.oracle.nio \ ++ com.huawei.management \ ++ com.huawei.jvm.gc + + $(IMAGES_OUTPUTDIR)/symbols/_the.symbols: $(IMAGES_OUTPUTDIR)/lib/rt.jar + $(RM) -r $(IMAGES_OUTPUTDIR)/symbols/META-INF/sym +diff --git a/jdk/make/data/classlist/classlist.linux b/jdk/make/data/classlist/classlist.linux +index 2a3915c0..737aefe2 100644 +--- a/jdk/make/data/classlist/classlist.linux ++++ b/jdk/make/data/classlist/classlist.linux +@@ -2556,4 +2556,5 @@ javax/swing/plaf/basic/BasicToolBarSeparatorUI + java/awt/event/AdjustmentEvent + java/awt/MenuBar + sun/awt/X11/XErrorEvent ++com/huawei/jvm/gc + # eea35d9d56e0006e +diff --git a/jdk/make/lib/CoreLibraries.gmk b/jdk/make/lib/CoreLibraries.gmk +index d374a47a..2a155ea1 100644 +--- a/jdk/make/lib/CoreLibraries.gmk ++++ b/jdk/make/lib/CoreLibraries.gmk +@@ -143,6 +143,7 @@ LIBJAVA_SRC_DIRS += $(JDK_TOPDIR)/src/$(OPENJDK_TARGET_OS_API_DIR)/native/java/l + $(JDK_TOPDIR)/src/share/native/common \ + $(JDK_TOPDIR)/src/share/native/sun/misc \ + $(JDK_TOPDIR)/src/share/native/sun/reflect \ ++ $(JDK_TOPDIR)/src/share/native/com/huawei/jvm/gc \ + $(JDK_TOPDIR)/src/share/native/java/util \ + $(JDK_TOPDIR)/src/share/native/java/util/concurrent/atomic \ + $(JDK_TOPDIR)/src/$(OPENJDK_TARGET_OS_API_DIR)/native/common \ +diff --git a/jdk/make/mapfiles/libjava/mapfile-vers b/jdk/make/mapfiles/libjava/mapfile-vers +index d686924a..7aeab583 100644 +--- a/jdk/make/mapfiles/libjava/mapfile-vers ++++ b/jdk/make/mapfiles/libjava/mapfile-vers +@@ -215,6 +215,7 @@ SUNWprivate_1.1 { + Java_java_lang_System_setErr0; + Java_java_lang_System_setIn0; + Java_java_lang_System_setOut0; ++ Java_com_huawei_jvm_gc_AdaptiveHeapMXBeanImpl_registerNatives; + Java_java_lang_Thread_registerNatives; + Java_java_lang_Throwable_fillInStackTrace; + Java_java_lang_Throwable_getStackTraceDepth; +diff --git a/jdk/make/mapfiles/libjava/reorder-sparc b/jdk/make/mapfiles/libjava/reorder-sparc +index 96f8e735..95793a5b 100644 +--- a/jdk/make/mapfiles/libjava/reorder-sparc ++++ b/jdk/make/mapfiles/libjava/reorder-sparc +@@ -104,3 +104,4 @@ text: .text%Java_java_util_TimeZone_getSystemTimeZoneID; + text: .text%findJavaTZ_md; + text: .text%Java_java_lang_StrictMath_log; + text: .text%Java_java_lang_StrictMath_sqrt; ++text: .text%Java_com_huawei_jvm_gc_AdaptiveHeapMXBeanImpl_registerNatives; +diff --git a/jdk/make/mapfiles/libjava/reorder-x86 b/jdk/make/mapfiles/libjava/reorder-x86 +index e0566b32..5b7a7ee1 100644 +--- a/jdk/make/mapfiles/libjava/reorder-x86 ++++ b/jdk/make/mapfiles/libjava/reorder-x86 +@@ -10,6 +10,7 @@ text: .text%collapse: OUTPUTDIR/canonicalize_md.o; + text: .text%Java_java_lang_Object_registerNatives; + text: .text%Java_java_lang_System_registerNatives; + text: .text%Java_java_lang_Thread_registerNatives; ++text: .text%Java_com_huawei_jvm_gc_AdaptiveHeapMXBeanImpl_registerNatives; + text: .text%Java_java_security_AccessController_getStackAccessControlContext; + text: .text%Java_java_security_AccessController_getInheritedAccessControlContext; + text: .text%Java_java_lang_ClassLoader_registerNatives; +diff --git a/jdk/src/share/classes/com/huawei/jvm/gc/AdaptiveHeapMXBeanImpl.java b/jdk/src/share/classes/com/huawei/jvm/gc/AdaptiveHeapMXBeanImpl.java +new file mode 100644 +index 00000000..1443fb04 +--- /dev/null ++++ b/jdk/src/share/classes/com/huawei/jvm/gc/AdaptiveHeapMXBeanImpl.java +@@ -0,0 +1,62 @@ ++/* ++ * Copyright (c) 2020, Huawei Technologies Co., LTD. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++package com.huawei.jvm.gc; ++import com.huawei.management.AdaptiveHeapMXBean; ++import sun.management.Util; ++import javax.management.ObjectName; ++ ++public class AdaptiveHeapMXBeanImpl implements AdaptiveHeapMXBean { ++ private static native void registerNatives(); ++ ++ static { ++ registerNatives(); ++ } ++ ++ private final static String ADAPTIVE_HEAP_MXBEAN_NAME = "com.huawei.management:type=AdaptiveHeap"; ++ @Override ++ public void setG1PeriodicGCInterval(int interval) { ++ setG1PeriodicGCIntervalImpl(interval); ++ } ++ @Override ++ public void setG1PeriodicGCLoadThreshold(int loadThreshold) { ++ setG1PeriodicGCLoadThresholdImpl(loadThreshold); ++ } ++ @Override ++ public int getG1PeriodicGCInterval() { ++ return getG1PeriodicGCIntervalImpl(); ++ } ++ @Override ++ public int getG1PeriodicGCLoadThreshold() { ++ return getG1PeriodicGCLoadThresholdImpl(); ++ } ++ @Override ++ public ObjectName getObjectName() { ++ return Util.newObjectName(ADAPTIVE_HEAP_MXBEAN_NAME); ++ } ++ ++ ++ private static native void setG1PeriodicGCIntervalImpl(int interval); ++ private static native void setG1PeriodicGCLoadThresholdImpl(int loadThreshold); ++ private static native int getG1PeriodicGCIntervalImpl(); ++ private static native int getG1PeriodicGCLoadThresholdImpl(); ++} +diff --git a/jdk/src/share/classes/com/huawei/management/AdaptiveHeapMXBean.java b/jdk/src/share/classes/com/huawei/management/AdaptiveHeapMXBean.java +new file mode 100644 +index 00000000..70563b58 +--- /dev/null ++++ b/jdk/src/share/classes/com/huawei/management/AdaptiveHeapMXBean.java +@@ -0,0 +1,32 @@ ++/* ++ * Copyright (c) 2020, Huawei Technologies Co., LTD. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++package com.huawei.management; ++ ++import java.lang.management.PlatformManagedObject; ++import java.util.List; ++public interface AdaptiveHeapMXBean extends PlatformManagedObject { ++ void setG1PeriodicGCInterval(int interval); ++ void setG1PeriodicGCLoadThreshold(int loadThreshold); ++ int getG1PeriodicGCInterval(); ++ int getG1PeriodicGCLoadThreshold(); ++} +diff --git a/jdk/src/share/classes/java/lang/management/PlatformComponent.java b/jdk/src/share/classes/java/lang/management/PlatformComponent.java +index 0c67543e..afc48d24 100644 +--- a/jdk/src/share/classes/java/lang/management/PlatformComponent.java ++++ b/jdk/src/share/classes/java/lang/management/PlatformComponent.java +@@ -37,6 +37,7 @@ import javax.management.ObjectName; + + import com.sun.management.HotSpotDiagnosticMXBean; + import com.sun.management.UnixOperatingSystemMXBean; ++import com.huawei.management.AdaptiveHeapMXBean; + + import sun.management.ManagementFactoryHelper; + import sun.management.Util; +@@ -270,9 +271,20 @@ enum PlatformComponent { + public List getMXBeans() { + return Collections.singletonList(ManagementFactoryHelper.getDiagnosticMXBean()); + } ++ }), ++ /** ++ * ADAPTIVE Heap. ++ */ ++ ADAPTIVE_HEAP( ++ "com.huawei.management.AdaptiveHeapMXBean", ++ "com.huawei.management", "AdaptiveHeap", defaultKeyProperties(), ++ true, ++ new MXBeanFetcher() { ++ public List getMXBeans() { ++ return Collections.singletonList(ManagementFactoryHelper.getAdaptiveHeapMXBean()); ++ } + }); + +- + /** + * A task that returns the MXBeans for a component. + */ +diff --git a/jdk/src/share/classes/sun/management/ManagementFactoryHelper.java b/jdk/src/share/classes/sun/management/ManagementFactoryHelper.java +index be82ddae..d5df523e 100644 +--- a/jdk/src/share/classes/sun/management/ManagementFactoryHelper.java ++++ b/jdk/src/share/classes/sun/management/ManagementFactoryHelper.java +@@ -48,6 +48,9 @@ import java.util.List; + import com.sun.management.DiagnosticCommandMBean; + import com.sun.management.HotSpotDiagnosticMXBean; + ++import com.huawei.management.AdaptiveHeapMXBean; ++import com.huawei.jvm.gc.AdaptiveHeapMXBeanImpl; ++ + import static java.lang.management.ManagementFactory.*; + + /** +@@ -65,6 +68,7 @@ public class ManagementFactoryHelper { + private static RuntimeImpl runtimeMBean = null; + private static CompilationImpl compileMBean = null; + private static OperatingSystemImpl osMBean = null; ++ private static AdaptiveHeapMXBeanImpl adaptiveHeapMXBean = null; + + public static synchronized ClassLoadingMXBean getClassLoadingMXBean() { + if (classMBean == null) { +@@ -108,6 +112,13 @@ public class ManagementFactoryHelper { + return osMBean; + } + ++ public static synchronized AdaptiveHeapMXBean getAdaptiveHeapMXBean() { ++ if (adaptiveHeapMXBean == null) { ++ adaptiveHeapMXBean = new AdaptiveHeapMXBeanImpl(); ++ } ++ return adaptiveHeapMXBean; ++ } ++ + public static List getMemoryPoolMXBeans() { + MemoryPoolMXBean[] pools = MemoryImpl.getMemoryPools(); + List list = new ArrayList<>(pools.length); +diff --git a/jdk/src/share/javavm/export/jvm.h b/jdk/src/share/javavm/export/jvm.h +index 6e64cb0d..9eafbbb8 100644 +--- a/jdk/src/share/javavm/export/jvm.h ++++ b/jdk/src/share/javavm/export/jvm.h +@@ -1430,6 +1430,21 @@ JNIEXPORT jintArray JNICALL + JVM_GetResourceLookupCache(JNIEnv *env, jobject loader, const char *resource_name); + + ++/* ++ *com.huawei.management.AdaptiveHeapMXBeanImpl ++ */ ++JNIEXPORT void JNICALL ++JVM_AdaptiveHeapSetG1PeriodicGCInterval(JNIEnv *env, jclass klass, jint interval); ++JNIEXPORT jint JNICALL ++JVM_AdaptiveHeapGetG1PeriodicGCInterval(JNIEnv *env, jclass klass); ++ ++ ++JNIEXPORT void JNICALL ++JVM_AdaptiveHeapSetG1PeriodicGCLoadThreshold(JNIEnv *env, jclass clazz, jint loadThreshold); ++JNIEXPORT jint JNICALL ++JVM_AdaptiveHeapGetG1PeriodicGCLoadThreshold(JNIEnv *env, jclass clazz); ++ ++ + /* ========================================================================= + * The following defines a private JVM interface that the JDK can query + * for the JVM version and capabilities. sun.misc.Version defines +diff --git a/jdk/src/share/native/com/huawei/jvm/gc/AdaptiveHeapMXBeanImpl.c b/jdk/src/share/native/com/huawei/jvm/gc/AdaptiveHeapMXBeanImpl.c +new file mode 100644 +index 00000000..1f75e7cb +--- /dev/null ++++ b/jdk/src/share/native/com/huawei/jvm/gc/AdaptiveHeapMXBeanImpl.c +@@ -0,0 +1,40 @@ ++/* ++ * Copyright (c) 2020, Huawei Technologies Co., LTD. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. Alibaba designates this ++ * particular file as subject to the "Classpath" exception as provided ++ * by Oracle in the LICENSE file that accompanied this code. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ */ ++ ++#include "jni.h" ++#include "jvm.h" ++#include "com_huawei_jvm_gc_AdaptiveHeapMXBeanImpl.h" ++ ++#define ARRAY_LENGTH(a) (sizeof(a)/sizeof(a[0])) ++ ++static JNINativeMethod methods[] = { ++ {"setG1PeriodicGCIntervalImpl", "(I)V", (void *)&JVM_AdaptiveHeapSetG1PeriodicGCInterval}, ++ {"getG1PeriodicGCIntervalImpl", "()I", (void *)&JVM_AdaptiveHeapGetG1PeriodicGCInterval}, ++ {"setG1PeriodicGCLoadThresholdImpl", "(I)V", (void *)&JVM_AdaptiveHeapSetG1PeriodicGCLoadThreshold}, ++ {"getG1PeriodicGCLoadThresholdImpl", "()I", (void *)&JVM_AdaptiveHeapGetG1PeriodicGCLoadThreshold}, ++ ++}; ++ ++JNIEXPORT void JNICALL ++Java_com_huawei_jvm_gc_AdaptiveHeapMXBeanImpl_registerNatives(JNIEnv *env, jclass cls) ++{ ++ (*env)->RegisterNatives(env, cls, methods, ARRAY_LENGTH(methods)); ++} +diff --git a/jdk/src/solaris/native/sun/management/LinuxOperatingSystem.c b/jdk/src/solaris/native/sun/management/LinuxOperatingSystem.c +index a2ddcb93..93af600d 100644 +--- a/jdk/src/solaris/native/sun/management/LinuxOperatingSystem.c ++++ b/jdk/src/solaris/native/sun/management/LinuxOperatingSystem.c +@@ -57,7 +57,7 @@ static struct perfbuf { + ticks *cpus; + } counters; + +-#define DEC_64 "%lld" ++#define DEC_64 "%lud" + + static void next_line(FILE *f) { + while (fgetc(f) != '\n'); diff --git a/java-1.8.0-openjdk.spec b/java-1.8.0-openjdk.spec index 4659dca..422650f 100644 --- a/java-1.8.0-openjdk.spec +++ b/java-1.8.0-openjdk.spec @@ -915,7 +915,7 @@ Provides: java-%{javaver}-%{origin}-accessibility%{?1} = %{epoch}:%{version}-%{r Name: java-%{javaver}-%{origin} Version: %{javaver}.%{updatever}.%{buildver} -Release: 8 +Release: 9 # java-1.5.0-ibm from jpackage.org set Epoch to 1 for unknown reasons # and this change was brought into RHEL-4. java-1.5.0-ibm packages # also included the epoch in their virtual provides. This created a @@ -1064,6 +1064,7 @@ Patch135: 8223940-Private-key-not-supported-by-chosen-signature.patch Patch136: 8236512-PKCS11-Connection-closed-after-Cipher.doFinal-and-NoPadding.patch Patch137: 8250861-Crash-in-MinINode-Ideal-PhaseGVN-bool.patch Patch138: add-appcds-file-lock.patch +Patch139: G1-memory-uncommit.patch ############################################# # @@ -1483,6 +1484,7 @@ pushd %{top_level_dir_name} %patch136 -p1 %patch137 -p1 %patch138 -p1 +%patch139 -p1 popd @@ -2099,6 +2101,9 @@ require "copy_jdk_configs.lua" %endif %changelog +* Thu Dec 22 2020 cruise01 - 1:1.8.0.272-b10.9 +- add G1-memory-uncommit.patch + * Thu Dec 22 2020 kuenking - 1:1.8.0.272-b10.8 - add add-appcds-file-lock.patch -- Gitee