diff --git a/8137165-Tests-fail-in-SR_Handler-because-thread-is-n.patch b/8137165-Tests-fail-in-SR_Handler-because-thread-is-n.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0eb257dca5b1764f287371e2fef0f5bc2fb4adf8
--- /dev/null
+++ b/8137165-Tests-fail-in-SR_Handler-because-thread-is-n.patch
@@ -0,0 +1,72 @@
+From 11116ea71b22635302759817b43c555ded53f882 Mon Sep 17 00:00:00 2001
+Subject: 8137165: Tests fail in SR_Handler because thread is not VMThread or JavaThread
+
+---
+ hotspot/src/os/linux/vm/os_linux.cpp | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+diff --git a/hotspot/src/os/linux/vm/os_linux.cpp b/hotspot/src/os/linux/vm/os_linux.cpp
+index 6ee49eedc..773c746af 100644
+--- a/hotspot/src/os/linux/vm/os_linux.cpp
++++ b/hotspot/src/os/linux/vm/os_linux.cpp
+@@ -1070,6 +1070,13 @@ void os::free_thread(OSThread* osthread) {
+ assert(osthread != NULL, "osthread not set");
+
+ if (Thread::current()->osthread() == osthread) {
++#ifdef ASSERT
++ sigset_t current;
++ sigemptyset(¤t);
++ pthread_sigmask(SIG_SETMASK, NULL, ¤t);
++ assert(!sigismember(¤t, SR_signum), "SR signal should not be blocked!");
++#endif
++
+ // Restore caller's signal mask
+ sigset_t sigmask = osthread->caller_sigmask();
+ pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
+@@ -4723,7 +4730,8 @@ static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
+ // after sigsuspend.
+ int old_errno = errno;
+
+- Thread* thread = Thread::current();
++ Thread* thread = Thread::current_or_null();
++ assert(thread != NULL, "Missing current thread in SR_handler");
+ OSThread* osthread = thread->osthread();
+ assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
+
+@@ -4735,7 +4743,7 @@ static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
+ os::SuspendResume::State state = osthread->sr.suspended();
+ if (state == os::SuspendResume::SR_SUSPENDED) {
+ sigset_t suspend_set; // signals for sigsuspend()
+-
++ sigemptyset(&suspend_set);
+ // get current set of blocked signals and unblock resume signal
+ pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
+ sigdelset(&suspend_set, SR_signum);
+@@ -5025,6 +5033,7 @@ static bool call_chained_handler(struct sigaction *actp, int sig,
+
+ // try to honor the signal mask
+ sigset_t oset;
++ sigemptyset(&oset);
+ pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
+
+ // call into the chained handler
+@@ -5035,7 +5044,7 @@ static bool call_chained_handler(struct sigaction *actp, int sig,
+ }
+
+ // restore the signal mask
+- pthread_sigmask(SIG_SETMASK, &oset, 0);
++ pthread_sigmask(SIG_SETMASK, &oset, NULL);
+ }
+ // Tell jvm's signal handler the signal is taken care of.
+ return true;
+@@ -6699,6 +6708,7 @@ void Parker::park(bool isAbsolute, jlong time) {
+ // Don't catch signals while blocked; let the running threads have the signals.
+ // (This allows a debugger to break into the running thread.)
+ sigset_t oldsigs;
++ sigemptyset(&oldsigs);
+ sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals();
+ pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
+ #endif
+--
+2.19.1
+
diff --git a/Enhance-SIGBUS-and-rlimit-information-in-errlog.patch b/Enhance-SIGBUS-and-rlimit-information-in-errlog.patch
new file mode 100644
index 0000000000000000000000000000000000000000..368d0e477968c9f5c358e4b3d8deef7800660ac5
--- /dev/null
+++ b/Enhance-SIGBUS-and-rlimit-information-in-errlog.patch
@@ -0,0 +1,67 @@
+Subject: [PATCH][Huawei] Enhance SIGBUS and rlimit information in errlog
+
+---
+ hotspot/src/os/posix/vm/os_posix.cpp | 26 ++++++++++++++++++++++
+ hotspot/src/share/vm/utilities/vmError.cpp | 2 +-
+ 2 files changed, 27 insertions(+), 1 deletion(-)
+
+diff --git a/hotspot/src/os/posix/vm/os_posix.cpp b/hotspot/src/os/posix/vm/os_posix.cpp
+index f7dab3c7f..a83ae1476 100644
+--- a/hotspot/src/os/posix/vm/os_posix.cpp
++++ b/hotspot/src/os/posix/vm/os_posix.cpp
+@@ -207,6 +207,26 @@ void os::Posix::print_rlimit_info(outputStream* st) {
+ if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
+ else st->print("%uk", rlim.rlim_cur >> 10);
+
++ st->print(", DATA ");
++ getrlimit(RLIMIT_DATA, &rlim);
++ if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
++ else st->print("%uk", rlim.rlim_cur >> 10);
++
++ st->print(", FSIZE ");
++ getrlimit(RLIMIT_FSIZE, &rlim);
++ if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
++ else st->print("%u", rlim.rlim_cur >> 10);
++
++ st->print(", CPU ");
++ getrlimit(RLIMIT_CPU, &rlim);
++ if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
++ else st->print("%uk seconds", rlim.rlim_cur >> 10);
++
++ st->print(", RSS ");
++ getrlimit(RLIMIT_RSS, &rlim);
++ if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
++ else st->print("%u", rlim.rlim_cur >> 10);
++
+ // Isn't there on solaris
+ #if !defined(TARGET_OS_FAMILY_solaris) && !defined(TARGET_OS_FAMILY_aix)
+ st->print(", NPROC ");
+@@ -765,6 +785,12 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
+ { SIGBUS, BUS_ADRALN, "BUS_ADRALN", "Invalid address alignment." },
+ { SIGBUS, BUS_ADRERR, "BUS_ADRERR", "Nonexistent physical address." },
+ { SIGBUS, BUS_OBJERR, "BUS_OBJERR", "Object-specific hardware error." },
++#ifdef BUS_MCEERR_AR
++ { SIGBUS, BUS_MCEERR_AR,"BUS_MCEERR_AR","hardware memory error consumed on a machine check: action required."},
++#endif
++#ifdef BUS_MCEERR_AO
++ { SIGBUS, BUS_MCEERR_AO,"BUS_MCEERR_AO","hardware memory error detected in process but not consumed: action optional."},
++#endif
+ { SIGTRAP, TRAP_BRKPT, "TRAP_BRKPT", "Process breakpoint." },
+ { SIGTRAP, TRAP_TRACE, "TRAP_TRACE", "Process trace trap." },
+ { SIGCHLD, CLD_EXITED, "CLD_EXITED", "Child has exited." },
+diff --git a/hotspot/src/share/vm/utilities/vmError.cpp b/hotspot/src/share/vm/utilities/vmError.cpp
+index 0c5c955bf..3233e4b31 100644
+--- a/hotspot/src/share/vm/utilities/vmError.cpp
++++ b/hotspot/src/share/vm/utilities/vmError.cpp
+@@ -813,7 +813,7 @@ void VMError::report(outputStream* st) {
+ #if defined(AARCH64) || defined(X86)
+ STEP(207, "(printing file descriptor)" )
+
+- if (ExtensiveErrorReports && _verbose) {
++ if (_verbose) {
+ // File Descriptor
+ os::print_file_descriptor(st);
+ st->cr();
+--
+2.19.1
+
diff --git a/Extending-the-IV-Length-Supported-by-KAEProvider-AES.patch b/Extending-the-IV-Length-Supported-by-KAEProvider-AES.patch
new file mode 100644
index 0000000000000000000000000000000000000000..03e949a677b51dbcac637e123b27f2cd08158248
--- /dev/null
+++ b/Extending-the-IV-Length-Supported-by-KAEProvider-AES.patch
@@ -0,0 +1,135 @@
+From f4b0357c01f51e813642c3ac5c8ff33c1576eb72 Mon Sep 17 00:00:00 2001
+Subject: Extending the IV Length Supported by KAEProvider AES/Gcm
+---
+ .../security/openssl/kae_symmetric_cipher.c | 23 ++++++--
+ .../security/openssl/KAEGcmIvLenTest.java | 52 +++++++++++++++++++
+ 2 files changed, 72 insertions(+), 3 deletions(-)
+ create mode 100644 jdk/test/org/openeuler/security/openssl/KAEGcmIvLenTest.java
+
+diff --git a/jdk/src/solaris/native/org/openeuler/security/openssl/kae_symmetric_cipher.c b/jdk/src/solaris/native/org/openeuler/security/openssl/kae_symmetric_cipher.c
+index ec8894f1a..7618d6e16 100644
+--- a/jdk/src/solaris/native/org/openeuler/security/openssl/kae_symmetric_cipher.c
++++ b/jdk/src/solaris/native/org/openeuler/security/openssl/kae_symmetric_cipher.c
+@@ -146,6 +146,7 @@ Java_org_openeuler_security_openssl_KAESymmetricCipherBase_nativeInit(JNIEnv* en
+ const EVP_CIPHER* cipher = NULL;
+ ENGINE* kaeEngine = NULL;
+ int keyLength = (*env)->GetArrayLength(env, key);
++ int ivLength = 0;
+
+ const char* algo = (*env)->GetStringUTFChars(env, cipherType, 0);
+ if (StartsWith("aes", algo)) {
+@@ -158,7 +159,6 @@ Java_org_openeuler_security_openssl_KAESymmetricCipherBase_nativeInit(JNIEnv* en
+
+ KAE_TRACE("KAESymmetricCipherBase_nativeInit: kaeEngine => %p", kaeEngine);
+
+- (*env)->ReleaseStringUTFChars(env, cipherType, algo);
+ if (cipher == NULL) {
+ KAE_ThrowOOMException(env, "create EVP_CIPHER fail");
+ goto cleanup;
+@@ -170,19 +170,35 @@ Java_org_openeuler_security_openssl_KAESymmetricCipherBase_nativeInit(JNIEnv* en
+
+ if (iv != NULL) {
+ ivBytes = (*env)->GetByteArrayElements(env, iv, NULL);
++ ivLength = (*env)->GetArrayLength(env, iv);
+ }
+ if (key != NULL) {
+ keyBytes = (*env)->GetByteArrayElements(env, key, NULL);
+ }
+
+- if (!EVP_CipherInit_ex(ctx, cipher, kaeEngine, (const unsigned char*)keyBytes,
+- (const unsigned char*)ivBytes, encrypt ? 1 : 0)) {
++ if (!EVP_CipherInit_ex(ctx, cipher, kaeEngine, NULL,
++ NULL, encrypt ? 1 : 0)) {
+ KAE_ThrowFromOpenssl(env, "EVP_CipherInit_ex failed", KAE_ThrowRuntimeException);
+ goto cleanup;
+ }
+
++ if (strcasecmp(algo + 8, "gcm") == 0) {
++ /* Set IV length if default 12 bytes (96 bits) is not appropriate */
++ if(!EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, ivLength, NULL)) {
++ KAE_ThrowFromOpenssl(env, "EVP_CIPHER_CTX_ctrl failed", KAE_ThrowRuntimeException);
++ goto cleanup;
++ }
++ }
++
++ if (!EVP_CipherInit_ex(ctx, NULL, kaeEngine, (const unsigned char*)keyBytes,
++ (const unsigned char*)ivBytes, encrypt ? 1 : 0)) {
++ KAE_ThrowFromOpenssl(env, "EVP_CipherInit_ex int key & iv failed", KAE_ThrowRuntimeException);
++ goto cleanup;
++ }
++
+ EVP_CIPHER_CTX_set_padding(ctx, padding ? 1 : 0);
+
++ (*env)->ReleaseStringUTFChars(env, cipherType, algo);
+ FreeMemoryFromInit(env, iv, ivBytes, key, keyBytes, keyLength);
+ return (jlong)ctx;
+
+@@ -190,6 +206,7 @@ cleanup:
+ if (ctx != NULL) {
+ EVP_CIPHER_CTX_free(ctx);
+ }
++ (*env)->ReleaseStringUTFChars(env, cipherType, algo);
+ FreeMemoryFromInit(env, iv, ivBytes, key, keyBytes, keyLength);
+ return 0;
+ }
+diff --git a/jdk/test/org/openeuler/security/openssl/KAEGcmIvLenTest.java b/jdk/test/org/openeuler/security/openssl/KAEGcmIvLenTest.java
+new file mode 100644
+index 000000000..c9e2257aa
+--- /dev/null
++++ b/jdk/test/org/openeuler/security/openssl/KAEGcmIvLenTest.java
+@@ -0,0 +1,52 @@
++import org.openeuler.security.openssl.KAEProvider;
++
++import javax.crypto.Cipher;
++import javax.crypto.spec.GCMParameterSpec;
++import javax.crypto.spec.SecretKeySpec;
++import java.nio.charset.StandardCharsets;
++import java.security.Security;
++import java.util.Arrays;
++
++/**
++ * @test
++ * @summary Basic test for AES/GCM Iv
++ * @requires os.arch=="aarch64"
++ * @run main KAEGcmIvLenTest
++ */
++public class KAEGcmIvLenTest {
++ private static String plainText = "helloworldhellow"; // 16bytes for NoPadding
++ private static String shortPlainText = "helloworld"; // 5 bytes for padding
++ private static SecretKeySpec ks = new SecretKeySpec("AESEncryptionKey".getBytes(StandardCharsets.UTF_8), "AES"); // key has 16 bytes
++ private static int[] ivLens = {12, 16};
++ public static void main(String[] args) throws Exception {
++ Security.addProvider(new KAEProvider());
++ for (int ivLen : ivLens) {
++ testGcm(plainText,"AES/GCM/NoPadding", "KAEProvider", "SunJCE", ivLen);
++ testGcm(plainText,"AES/GCM/NoPadding", "SunJCE", "KAEProvider", ivLen);
++ testGcm(shortPlainText,"AES/GCM/PKCS5Padding", "KAEProvider", "SunJCE", ivLen);
++ testGcm(shortPlainText,"AES/GCM/PKCS5Padding", "SunJCE", "KAEProvider", ivLen);
++ }
++
++ }
++
++ private static void testGcm(String plainText, String algo, String encryptProvider, String decryptProvider, int ivLen) throws Exception {
++ Cipher enCipher = Cipher.getInstance(algo, encryptProvider);
++ enCipher.init(Cipher.ENCRYPT_MODE, ks, getIv(ivLen));
++ byte[] cipherText = enCipher.doFinal(plainText.getBytes());
++
++ Cipher deCipher = Cipher.getInstance(algo, decryptProvider);
++ deCipher.init(Cipher.DECRYPT_MODE, ks, getIv(ivLen));
++ byte[] origin = deCipher.doFinal(cipherText);
++
++ if (!Arrays.equals(plainText.getBytes(), origin)) {
++ throw new RuntimeException("gcm decryption failed, algo = " + algo);
++ }
++ }
++
++ private static GCMParameterSpec getIv(int ivLen) {
++ if (ivLen == 16) {
++ return new GCMParameterSpec(128, "abcdefghabcdefgh".getBytes(StandardCharsets.UTF_8));
++ }
++ return new GCMParameterSpec(96, "abcdefghabcd".getBytes(StandardCharsets.UTF_8));
++ }
++}
+--
+2.19.1
+
diff --git a/KAE-zip-support-streaming-data-decompression.patch b/KAE-zip-support-streaming-data-decompression.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4e011116bbb0a8eb5c34ffef832d81544cb66428
--- /dev/null
+++ b/KAE-zip-support-streaming-data-decompression.patch
@@ -0,0 +1,691 @@
+---
+ jdk/make/mapfiles/libzip/mapfile-vers | 5 +-
+ .../share/classes/java/util/zip/Deflater.java | 36 +++++--
+ .../java/util/zip/GZIPInputStream.java | 73 +++++++++++---
+ .../java/util/zip/GZIPOutputStream.java | 44 +++++----
+ .../share/classes/java/util/zip/Inflater.java | 37 +++++---
+ .../java/util/zip/InflaterInputStream.java | 25 +++++
+ jdk/src/share/lib/security/java.policy | 2 +
+ jdk/src/share/native/java/util/zip/Deflater.c | 9 +-
+ jdk/src/share/native/java/util/zip/Inflater.c | 73 +-------------
+ .../java/util/zip/GZIP/TestAvailable.java | 94 +++++++++++++++++++
+ 10 files changed, 272 insertions(+), 126 deletions(-)
+ create mode 100644 jdk/test/java/util/zip/GZIP/TestAvailable.java
+
+diff --git a/jdk/make/mapfiles/libzip/mapfile-vers b/jdk/make/mapfiles/libzip/mapfile-vers
+index 5c6d27d0d..79ef59e5f 100644
+--- a/jdk/make/mapfiles/libzip/mapfile-vers
++++ b/jdk/make/mapfiles/libzip/mapfile-vers
+@@ -38,16 +38,15 @@ SUNWprivate_1.1 {
+ Java_java_util_zip_Deflater_end;
+ Java_java_util_zip_Deflater_getAdler;
+ Java_java_util_zip_Deflater_init;
+- Java_java_util_zip_Deflater_initKae;
++ Java_java_util_zip_Deflater_initKAE;
+ Java_java_util_zip_Deflater_initIDs;
+ Java_java_util_zip_Deflater_reset;
+ Java_java_util_zip_Deflater_setDictionary;
+ Java_java_util_zip_Inflater_end;
+ Java_java_util_zip_Inflater_getAdler;
+ Java_java_util_zip_Inflater_inflateBytes;
+- Java_java_util_zip_Inflater_inflateBytesKAE;
+ Java_java_util_zip_Inflater_init;
+- Java_java_util_zip_Inflater_initKae;
++ Java_java_util_zip_Inflater_initKAE;
+ Java_java_util_zip_Inflater_initIDs;
+ Java_java_util_zip_Inflater_reset;
+ Java_java_util_zip_Inflater_setDictionary;
+diff --git a/jdk/src/share/classes/java/util/zip/Deflater.java b/jdk/src/share/classes/java/util/zip/Deflater.java
+index a4ea40cf8..509808349 100644
+--- a/jdk/src/share/classes/java/util/zip/Deflater.java
++++ b/jdk/src/share/classes/java/util/zip/Deflater.java
+@@ -81,7 +81,6 @@ class Deflater {
+ private boolean finish, finished;
+ private long bytesRead;
+ private long bytesWritten;
+- private boolean defalterUseKae;
+
+ /**
+ * Compression method for the deflate algorithm (the only one currently
+@@ -169,13 +168,20 @@ class Deflater {
+ public Deflater(int level, boolean nowrap) {
+ this.level = level;
+ this.strategy = DEFAULT_STRATEGY;
+- if (("true".equals(System.getProperty("GZIP_USE_KAE", "false"))) &&
+- ("aarch64".equals(System.getProperty("os.arch")))) {
+- this.defalterUseKae = true;
+- }
+- this.zsRef = defalterUseKae ?
+- new ZStreamRef(initKae(level, DEFAULT_STRATEGY)) :
+- new ZStreamRef(init(level, DEFAULT_STRATEGY, nowrap));
++ this.zsRef = new ZStreamRef(init(level, DEFAULT_STRATEGY, nowrap));
++ }
++
++ /**
++ * Creates a new compressor using the specified compression level
++ * and windowBits.
++ * This method is mainly used to support the KAE-zip feature.
++ * @param level the compression level (0-9)
++ * @param windowBits compression format (-15~31)
++ */
++ public Deflater(int level, int windowBits) {
++ this.level = level;
++ this.strategy = DEFAULT_STRATEGY;
++ this.zsRef = new ZStreamRef(initKAE(level, DEFAULT_STRATEGY, windowBits));
+ }
+
+ /**
+@@ -535,6 +541,18 @@ class Deflater {
+ }
+ }
+
++ /**
++ * Resets deflater so that a new set of input data can be processed.
++ * Java fields are not initialized.
++ * This method is mainly used to support the KAE-zip feature.
++ */
++ public void resetKAE() {
++ synchronized (zsRef) {
++ ensureOpen();
++ reset(zsRef.address());
++ }
++ }
++
+ /**
+ * Closes the compressor and discards any unprocessed input.
+ * This method should be called when the compressor is no longer
+@@ -578,7 +596,7 @@ class Deflater {
+
+ private static native void initIDs();
+ private native static long init(int level, int strategy, boolean nowrap);
+- private native static long initKae(int level, int strategy);
++ private native static long initKAE(int level, int strategy, int windowBits);
+ private native static void setDictionary(long addr, byte[] b, int off, int len);
+ private native int deflateBytes(long addr, byte[] b, int off, int len,
+ int flush);
+diff --git a/jdk/src/share/classes/java/util/zip/GZIPInputStream.java b/jdk/src/share/classes/java/util/zip/GZIPInputStream.java
+index 7fb753729..10d044caf 100644
+--- a/jdk/src/share/classes/java/util/zip/GZIPInputStream.java
++++ b/jdk/src/share/classes/java/util/zip/GZIPInputStream.java
+@@ -54,10 +54,22 @@ class GZIPInputStream extends InflaterInputStream {
+
+ private boolean closed = false;
+
+- /*
+- * GZIP use KAE.
++ /**
++ * The field is mainly used to support the KAE-zip feature.
+ */
+- private boolean gzipUseKae = false;
++ private static boolean GZIP_USE_KAE = false;
++
++ private static int WINDOWBITS = 31;
++
++ private static int FLUSHKAE = 2;
++
++ static {
++ if ("aarch64".equals(System.getProperty("os.arch"))) {
++ GZIP_USE_KAE = Boolean.parseBoolean(System.getProperty("GZIP_USE_KAE", "false"));
++ WINDOWBITS = Integer.parseInt(System.getProperty("WINDOWBITS", "31"));
++ FLUSHKAE = Integer.parseInt(System.getProperty("FLUSHKAE", "2"));
++ }
++ }
+
+ /**
+ * Check to make sure that this stream has not been closed
+@@ -79,14 +91,13 @@ class GZIPInputStream extends InflaterInputStream {
+ * @exception IllegalArgumentException if {@code size <= 0}
+ */
+ public GZIPInputStream(InputStream in, int size) throws IOException {
+- super(in, new Inflater(true), size);
++ super(in, GZIP_USE_KAE ? new Inflater(WINDOWBITS, FLUSHKAE) : new Inflater(true), size);
+ usesDefaultInflater = true;
+- if (("true".equals(System.getProperty("GZIP_USE_KAE", "false"))) &&
+- ("aarch64".equals(System.getProperty("os.arch")))) {
+- gzipUseKae = true;
+- }
+- // file header will be readed by kae zlib when use kae
+- if (gzipUseKae) return;
++
++ // When GZIP_USE_KAE is true, the header of the file is readed
++ // through the native zlib library, not in java code.
++ if (GZIP_USE_KAE) return;
++
+ readHeader(in);
+ }
+
+@@ -127,13 +138,16 @@ class GZIPInputStream extends InflaterInputStream {
+ }
+ int n = super.read(buf, off, len);
+ if (n == -1) {
+- if (readTrailer())
++ if (GZIP_USE_KAE ? readTrailerKAE() : readTrailer())
+ eos = true;
+ else
+ return this.read(buf, off, len);
+ } else {
+ crc.update(buf, off, n);
+ }
++ if (GZIP_USE_KAE && inf.finished()) {
++ if (readTrailerKAE()) eos = true;
++ }
+ return n;
+ }
+
+@@ -220,9 +234,7 @@ class GZIPInputStream extends InflaterInputStream {
+ * data set)
+ */
+ private boolean readTrailer() throws IOException {
+- // file trailer will be readed by kae zlib when use kae
+- if (gzipUseKae) return true;
+-
++ if (GZIP_USE_KAE) return true;
+ InputStream in = this.in;
+ int n = inf.getRemaining();
+ if (n > 0) {
+@@ -251,6 +263,39 @@ class GZIPInputStream extends InflaterInputStream {
+ return false;
+ }
+
++ /*
++ * Reads GZIP member trailer and returns true if the eos
++ * reached, false if there are more (concatenated gzip
++ * data set)
++ *
++ * This method is mainly used to support the KAE-zip feature.
++ */
++ private boolean readTrailerKAE() throws IOException {
++ InputStream in = this.in;
++ int n = inf.getRemaining();
++ if (n > 0) {
++ in = new SequenceInputStream(
++ new ByteArrayInputStream(buf, len - n, n),
++ new FilterInputStream(in) {
++ public void close() throws IOException {}
++ });
++ }
++ // If there are more bytes available in "in" or the leftover in the "inf" is > 18 bytes:
++ // next.header.min(10) + next.trailer(8), try concatenated case
++
++ if (n > 18) {
++ inf.reset();
++ inf.setInput(buf, len - n, n);
++ } else {
++ try {
++ fillKAE(n);
++ } catch (IOException e) {
++ return true;
++ }
++ }
++ return false;
++ }
++
+ /*
+ * Reads unsigned integer in Intel byte order.
+ */
+diff --git a/jdk/src/share/classes/java/util/zip/GZIPOutputStream.java b/jdk/src/share/classes/java/util/zip/GZIPOutputStream.java
+index 0f0be98bb..8eae40739 100644
+--- a/jdk/src/share/classes/java/util/zip/GZIPOutputStream.java
++++ b/jdk/src/share/classes/java/util/zip/GZIPOutputStream.java
+@@ -52,10 +52,19 @@ class GZIPOutputStream extends DeflaterOutputStream {
+ */
+ private final static int TRAILER_SIZE = 8;
+
+- /*
+- * GZIP use KAE.
++ /**
++ * The field is mainly used to support the KAE-zip feature.
+ */
+- private boolean gzipUseKae = false;
++ private static boolean GZIP_USE_KAE = false;
++
++ private static int WINDOWBITS = 31;
++
++ static {
++ if ("aarch64".equals(System.getProperty("os.arch"))) {
++ GZIP_USE_KAE = Boolean.parseBoolean(System.getProperty("GZIP_USE_KAE", "false"));
++ WINDOWBITS = Integer.parseInt(System.getProperty("WINDOWBITS", "31"));
++ }
++ }
+
+ /**
+ * Creates a new output stream with the specified buffer size.
+@@ -92,16 +101,15 @@ class GZIPOutputStream extends DeflaterOutputStream {
+ public GZIPOutputStream(OutputStream out, int size, boolean syncFlush)
+ throws IOException
+ {
+- super(out, new Deflater(Deflater.DEFAULT_COMPRESSION, true),
++ super(out, GZIP_USE_KAE ? new Deflater(Deflater.DEFAULT_COMPRESSION, WINDOWBITS) :
++ new Deflater(Deflater.DEFAULT_COMPRESSION, true),
+ size,
+ syncFlush);
+ usesDefaultDeflater = true;
+- if (("true".equals(System.getProperty("GZIP_USE_KAE", "false"))) &&
+- ("aarch64".equals(System.getProperty("os.arch")))) {
+- gzipUseKae = true;
+- }
+- // file header will be writed by kae zlib when use kae
+- if (gzipUseKae) return;
++
++ // When GZIP_USE_KAE is true, the header of the file is written
++ // through the native zlib library, not in java code.
++ if (GZIP_USE_KAE) return;
+ writeHeader();
+ crc.reset();
+ }
+@@ -171,9 +179,11 @@ class GZIPOutputStream extends DeflaterOutputStream {
+ int len = def.deflate(buf, 0, buf.length);
+ if (def.finished() && len <= buf.length - TRAILER_SIZE) {
+ // last deflater buffer. Fit trailer at the end
+- // file trailer will be writed by kae zlib when use kae
+- if (gzipUseKae) {
++ // When GZIP_USE_KAE is true, the trailer of the file is written
++ // through the native zlib library, not in java code.
++ if (GZIP_USE_KAE) {
+ out.write(buf, 0, len);
++ def.resetKAE();
+ return;
+ }
+ writeTrailer(buf, len);
+@@ -184,12 +194,14 @@ class GZIPOutputStream extends DeflaterOutputStream {
+ if (len > 0)
+ out.write(buf, 0, len);
+ }
+- // file trailer will be writed by kae zlib when use kae
+- if (gzipUseKae) {
+- return;
+- }
+ // if we can't fit the trailer at the end of the last
+ // deflater buffer, we write it separately
++ // When GZIP_USE_KAE is true, the trailer of the file is written
++ // through the native zlib library, not in java code.
++ if (GZIP_USE_KAE) {
++ def.resetKAE();
++ return;
++ }
+ byte[] trailer = new byte[TRAILER_SIZE];
+ writeTrailer(trailer, 0);
+ out.write(trailer);
+diff --git a/jdk/src/share/classes/java/util/zip/Inflater.java b/jdk/src/share/classes/java/util/zip/Inflater.java
+index 42e90f525..d1074cd8d 100644
+--- a/jdk/src/share/classes/java/util/zip/Inflater.java
++++ b/jdk/src/share/classes/java/util/zip/Inflater.java
+@@ -80,7 +80,6 @@ class Inflater {
+ private boolean needDict;
+ private long bytesRead;
+ private long bytesWritten;
+- private boolean inflaterUseKae;
+
+ private static final byte[] defaultBuf = new byte[0];
+
+@@ -101,11 +100,18 @@ class Inflater {
+ * @param nowrap if true then support GZIP compatible compression
+ */
+ public Inflater(boolean nowrap) {
+- if (("true".equals(System.getProperty("GZIP_USE_KAE", "false"))) &&
+- ("aarch64".equals(System.getProperty("os.arch")))) {
+- inflaterUseKae = true;
+- }
+- zsRef = inflaterUseKae ? new ZStreamRef(initKae()): new ZStreamRef(init(nowrap));
++ zsRef = new ZStreamRef(init(nowrap));
++ }
++
++ /**
++ * Creates a new decompressor.
++ * This method is mainly used to support the KAE-zip feature.
++ *
++ * @param windowBits compression format (-15~31)
++ * @param flushKAE inflate flush type (0~6)
++ */
++ public Inflater(int windowBits, int flushKAE) {
++ this.zsRef = new ZStreamRef(initKAE(windowBits, flushKAE));
+ }
+
+ /**
+@@ -261,9 +267,7 @@ class Inflater {
+ synchronized (zsRef) {
+ ensureOpen();
+ int thisLen = this.len;
+- int n = this.inflaterUseKae ?
+- inflateBytesKAE(zsRef.address(), b, off, len) :
+- inflateBytes(zsRef.address(), b, off, len);
++ int n = inflateBytes(zsRef.address(), b, off, len);
+ bytesWritten += n;
+ bytesRead += (thisLen - this.len);
+ return n;
+@@ -365,6 +369,17 @@ class Inflater {
+ }
+ }
+
++ /**
++ * Resets inflater so that a new set of input data can be processed.
++ * This method is mainly used to support the KAE-zip feature.
++ */
++ public void resetKAE() {
++ synchronized (zsRef) {
++ ensureOpen();
++ reset(zsRef.address());
++ }
++ }
++
+ /**
+ * Closes the decompressor and discards any unprocessed input.
+ * This method should be called when the decompressor is no longer
+@@ -404,13 +419,11 @@ class Inflater {
+
+ private native static void initIDs();
+ private native static long init(boolean nowrap);
+- private native static long initKae();
++ private native static long initKAE(int windowBits, int flushKAE);
+ private native static void setDictionary(long addr, byte[] b, int off,
+ int len);
+ private native int inflateBytes(long addr, byte[] b, int off, int len)
+ throws DataFormatException;
+- private native int inflateBytesKAE(long addr, byte[] b, int off, int len)
+- throws DataFormatException;
+ private native static int getAdler(long addr);
+ private native static void reset(long addr);
+ private native static void end(long addr);
+diff --git a/jdk/src/share/classes/java/util/zip/InflaterInputStream.java b/jdk/src/share/classes/java/util/zip/InflaterInputStream.java
+index 163f619c1..b0ac7dd26 100644
+--- a/jdk/src/share/classes/java/util/zip/InflaterInputStream.java
++++ b/jdk/src/share/classes/java/util/zip/InflaterInputStream.java
+@@ -179,6 +179,10 @@ class InflaterInputStream extends FilterInputStream {
+ ensureOpen();
+ if (reachEOF) {
+ return 0;
++ } else if (inf.finished()) {
++ // the end of the compressed data stream has been reached
++ reachEOF = true;
++ return 0;
+ } else {
+ return 1;
+ }
+@@ -242,6 +246,27 @@ class InflaterInputStream extends FilterInputStream {
+ inf.setInput(buf, 0, len);
+ }
+
++ /**
++ * Fills input buffer with more data to decompress.
++ * This method is mainly used to support the KAE-zip feature.
++ * @param n Maximum Read Bytes
++ * @throws IOException if an I/O error has occurred
++ */
++ protected void fillKAE(int n) throws IOException {
++ ensureOpen();
++ byte[] buftmp = new byte[buf.length];
++ if (n != 0) {
++ System.arraycopy(buf, buf.length - n, buftmp, 0, n);
++ }
++ int kaelen = in.read(buftmp, n, buf.length - n);
++ if (kaelen == -1) {
++ throw new EOFException("Unexpected end of ZLIB input stream");
++ }
++ System.arraycopy(buftmp, 0, buf, buf.length - n - kaelen, n + kaelen);
++ inf.reset();
++ inf.setInput(buf, buf.length - n - kaelen, n + kaelen);
++ }
++
+ /**
+ * Tests if this input stream supports the mark
and
+ * reset
methods. The markSupported
+diff --git a/jdk/src/share/lib/security/java.policy b/jdk/src/share/lib/security/java.policy
+index baec2ea15..284e3e334 100644
+--- a/jdk/src/share/lib/security/java.policy
++++ b/jdk/src/share/lib/security/java.policy
+@@ -50,5 +50,7 @@ grant {
+ permission java.util.PropertyPermission "sun.security.pkcs11.disableKeyExtraction", "read";
+
+ permission java.util.PropertyPermission "GZIP_USE_KAE", "read";
++ permission java.util.PropertyPermission "WINDOWBITS", "read";
++ permission java.util.PropertyPermission "FLUSHKAE", "read";
+ };
+
+diff --git a/jdk/src/share/native/java/util/zip/Deflater.c b/jdk/src/share/native/java/util/zip/Deflater.c
+index 1b048e4f5..b26eb1392 100644
+--- a/jdk/src/share/native/java/util/zip/Deflater.c
++++ b/jdk/src/share/native/java/util/zip/Deflater.c
+@@ -37,7 +37,6 @@
+ #include "java_util_zip_Deflater.h"
+
+ #define DEF_MEM_LEVEL 8
+-#define KAE_DEFLATER_WindowBit 31
+
+ static jfieldID levelID;
+ static jfieldID strategyID;
+@@ -106,8 +105,8 @@ Java_java_util_zip_Deflater_init(JNIEnv *env, jclass cls, jint level,
+ }
+
+ JNIEXPORT jlong JNICALL
+-Java_java_util_zip_Deflater_initKae(JNIEnv *env, jclass cls, jint level,
+- jint strategy)
++Java_java_util_zip_Deflater_initKAE(JNIEnv *env, jclass cls, jint level,
++ jint strategy, jint windowBits)
+ {
+ z_stream *strm = calloc(1, sizeof(z_stream));
+
+@@ -116,7 +115,9 @@ Java_java_util_zip_Deflater_initKae(JNIEnv *env, jclass cls, jint level,
+ return jlong_zero;
+ } else {
+ const char *msg;
+- int ret = deflateInit2(strm, level, Z_DEFLATED, KAE_DEFLATER_WindowBit, DEF_MEM_LEVEL, strategy);
++ int ret = deflateInit2(strm, level, Z_DEFLATED,
++ windowBits,
++ DEF_MEM_LEVEL, strategy);
+ switch (ret) {
+ case Z_OK:
+ return ptr_to_jlong(strm);
+diff --git a/jdk/src/share/native/java/util/zip/Inflater.c b/jdk/src/share/native/java/util/zip/Inflater.c
+index fca207215..8317267ff 100644
+--- a/jdk/src/share/native/java/util/zip/Inflater.c
++++ b/jdk/src/share/native/java/util/zip/Inflater.c
+@@ -41,11 +41,11 @@
+
+ #define ThrowDataFormatException(env, msg) \
+ JNU_ThrowByName(env, "java/util/zip/DataFormatException", msg)
+-#define KAE_INFLATER_WindowBit 31
+
+ static jfieldID needDictID;
+ static jfieldID finishedID;
+ static jfieldID bufID, offID, lenID;
++static jint inflaterFlushType = Z_PARTIAL_FLUSH;
+
+ JNIEXPORT void JNICALL
+ Java_java_util_zip_Inflater_initIDs(JNIEnv *env, jclass cls)
+@@ -96,16 +96,17 @@ Java_java_util_zip_Inflater_init(JNIEnv *env, jclass cls, jboolean nowrap)
+ }
+
+ JNIEXPORT jlong JNICALL
+-Java_java_util_zip_Inflater_initKae(JNIEnv *env, jclass cls)
++Java_java_util_zip_Inflater_initKAE(JNIEnv *env, jclass cls, jint windowBits, jint flushKAE)
+ {
+ z_stream *strm = calloc(1, sizeof(z_stream));
++ inflaterFlushType = flushKAE;
+
+ if (strm == NULL) {
+ JNU_ThrowOutOfMemoryError(env, 0);
+ return jlong_zero;
+ } else {
+ const char *msg;
+- int ret = inflateInit2(strm, KAE_INFLATER_WindowBit);
++ int ret = inflateInit2(strm, windowBits);
+ switch (ret) {
+ case Z_OK:
+ return ptr_to_jlong(strm);
+@@ -181,71 +182,7 @@ Java_java_util_zip_Inflater_inflateBytes(JNIEnv *env, jobject this, jlong addr,
+ strm->next_out = (Bytef *) (out_buf + off);
+ strm->avail_in = this_len;
+ strm->avail_out = len;
+- ret = inflate(strm, Z_PARTIAL_FLUSH);
+- (*env)->ReleasePrimitiveArrayCritical(env, b, out_buf, 0);
+- (*env)->ReleasePrimitiveArrayCritical(env, this_buf, in_buf, 0);
+-
+- switch (ret) {
+- case Z_STREAM_END:
+- (*env)->SetBooleanField(env, this, finishedID, JNI_TRUE);
+- /* fall through */
+- case Z_OK:
+- this_off += this_len - strm->avail_in;
+- (*env)->SetIntField(env, this, offID, this_off);
+- (*env)->SetIntField(env, this, lenID, strm->avail_in);
+- return (jint) (len - strm->avail_out);
+- case Z_NEED_DICT:
+- (*env)->SetBooleanField(env, this, needDictID, JNI_TRUE);
+- /* Might have consumed some input here! */
+- this_off += this_len - strm->avail_in;
+- (*env)->SetIntField(env, this, offID, this_off);
+- (*env)->SetIntField(env, this, lenID, strm->avail_in);
+- return 0;
+- case Z_BUF_ERROR:
+- return 0;
+- case Z_DATA_ERROR:
+- ThrowDataFormatException(env, strm->msg);
+- return 0;
+- case Z_MEM_ERROR:
+- JNU_ThrowOutOfMemoryError(env, 0);
+- return 0;
+- default:
+- JNU_ThrowInternalError(env, strm->msg);
+- return 0;
+- }
+-}
+-
+-JNIEXPORT jint JNICALL
+-Java_java_util_zip_Inflater_inflateBytesKAE(JNIEnv *env, jobject this, jlong addr,
+- jarray b, jint off, jint len)
+-{
+- z_stream *strm = jlong_to_ptr(addr);
+- jarray this_buf = (jarray)(*env)->GetObjectField(env, this, bufID);
+- jint this_off = (*env)->GetIntField(env, this, offID);
+- jint this_len = (*env)->GetIntField(env, this, lenID);
+-
+- jbyte *in_buf;
+- jbyte *out_buf;
+- int ret;
+-
+- in_buf = (*env)->GetPrimitiveArrayCritical(env, this_buf, 0);
+- if (in_buf == NULL) {
+- if (this_len != 0 && (*env)->ExceptionOccurred(env) == NULL)
+- JNU_ThrowOutOfMemoryError(env, 0);
+- return 0;
+- }
+- out_buf = (*env)->GetPrimitiveArrayCritical(env, b, 0);
+- if (out_buf == NULL) {
+- (*env)->ReleasePrimitiveArrayCritical(env, this_buf, in_buf, 0);
+- if (len != 0 && (*env)->ExceptionOccurred(env) == NULL)
+- JNU_ThrowOutOfMemoryError(env, 0);
+- return 0;
+- }
+- strm->next_in = (Bytef *) (in_buf + this_off);
+- strm->next_out = (Bytef *) (out_buf + off);
+- strm->avail_in = this_len;
+- strm->avail_out = len;
+- ret = inflate(strm, Z_SYNC_FLUSH);
++ ret = inflate(strm, inflaterFlushType);
+ (*env)->ReleasePrimitiveArrayCritical(env, b, out_buf, 0);
+ (*env)->ReleasePrimitiveArrayCritical(env, this_buf, in_buf, 0);
+
+diff --git a/jdk/test/java/util/zip/GZIP/TestAvailable.java b/jdk/test/java/util/zip/GZIP/TestAvailable.java
+new file mode 100644
+index 000000000..3dc9b3445
+--- /dev/null
++++ b/jdk/test/java/util/zip/GZIP/TestAvailable.java
+@@ -0,0 +1,94 @@
++/*
++ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
++ * or visit www.oracle.com if you need additional information or have any
++ * questions.
++ */
++
++/* @test
++ * @library /lib/testlibrary/
++ * @build jdk.testlibrary.*
++ * @run main TestAvailable
++ * @bug 7031075
++ * @summary Make sure that available() method behaves as expected.
++ * @key randomness
++ */
++
++import java.io.*;
++import java.util.Random;
++import java.util.zip.*;
++import jdk.testlibrary.RandomFactory;
++
++public class TestAvailable {
++
++ public static void main(String args[]) throws Throwable {
++ Random r = RandomFactory.getRandom();
++ for (int n = 0; n < 10; n++) {
++ byte[] src = new byte[r.nextInt(100)];
++ r.nextBytes(src);
++
++ // test InflaterInputStream
++ ByteArrayOutputStream baos = new ByteArrayOutputStream();
++ try (DeflaterOutputStream dos = new DeflaterOutputStream(baos)) {
++ dos.write(src);
++ }
++ try (InflaterInputStream iis = new InflaterInputStream(
++ new ByteArrayInputStream(baos.toByteArray()))) {
++ test(iis, src);
++ }
++
++ // test GZIPInputStream
++ baos = new ByteArrayOutputStream();
++ try (GZIPOutputStream dos = new GZIPOutputStream(baos)) {
++ dos.write(src);
++ }
++ try (GZIPInputStream gis = new GZIPInputStream(
++ new ByteArrayInputStream(baos.toByteArray()))) {
++ test(gis, src);
++ }
++ }
++ }
++
++ private static void test(InputStream is, byte[] expected) throws IOException {
++ int cnt = 0;
++ do {
++ int available = is.available();
++ if (available > 0) {
++ int b = is.read();
++ if (b == -1) {
++ throw new RuntimeException("available() > 0, read() == -1 : failed!");
++ }
++ if (expected[cnt++] != (byte)b) {
++ throw new RuntimeException("read() : failed!");
++ }
++ } else if (available == 0) {
++ if (is.read() != -1) {
++ throw new RuntimeException("available() == 0, read() != -1 : failed!");
++ }
++ break;
++ } else {
++ throw new RuntimeException("available() < 0 : failed!");
++ }
++ } while (true);
++ if (cnt != expected.length) {
++ throw new RuntimeException("read : failed!");
++ }
++ }
++
++}
+\ No newline at end of file
+--
+2.19.1
+
diff --git a/LoongArch64-support.patch b/LoongArch64-support.patch
index 42052d7cfaef59af38f83a321e5dbd100fab6e55..c68ef38fbcb92e844d33d713a14a16b19f856dc5 100644
--- a/LoongArch64-support.patch
+++ b/LoongArch64-support.patch
@@ -72,7 +72,7 @@ index 151e5a109f..5072409dd4 100644
# Configure flags for the tools
FLAGS_SETUP_COMPILER_FLAGS_FOR_LIBS
diff --git a/common/autoconf/generated-configure.sh b/common/autoconf/generated-configure.sh
-index 85eb8a16a2..86a533fe1f 100644
+index 6f17436eff..aedd82e614 100644
--- a/common/autoconf/generated-configure.sh
+++ b/common/autoconf/generated-configure.sh
@@ -716,6 +716,9 @@ SET_EXECUTABLE_ORIGIN
@@ -162,7 +162,7 @@ index 85eb8a16a2..86a533fe1f 100644
# Setup OPENJDK_TARGET_OS_API_DIR, used in source paths.
-@@ -42436,6 +42467,47 @@ $as_echo "$ac_cv_c_bigendian" >&6; }
+@@ -42429,6 +42460,47 @@ $as_echo "$ac_cv_c_bigendian" >&6; }
fi
@@ -326,7 +326,7 @@ index f54942acf2..51cc28c312 100644
+AC_SUBST(HOST_NAME)
+])
diff --git a/common/autoconf/spec.gmk.in b/common/autoconf/spec.gmk.in
-index 9573bb2cbd..57a903229a 100644
+index 9573bb2cbd..ad85aa346f 100644
--- a/common/autoconf/spec.gmk.in
+++ b/common/autoconf/spec.gmk.in
@@ -23,6 +23,12 @@
@@ -352,7 +352,7 @@ index 9573bb2cbd..57a903229a 100644
+HOST_NAME:=@HOST_NAME@
+
+# Loongson OpenJDK Version info
-+VER=8.1.19
++VER=8.1.20
+ifeq ($(HOST_NAME), )
+ HOST_NAME=unknown
+endif
@@ -106551,7 +106551,7 @@ index 92b73e1c71..45da327efb 100644
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
if (tmp->is_valid() && c > 0 && c < max_jint) {
diff --git a/hotspot/src/os/linux/vm/os_linux.cpp b/hotspot/src/os/linux/vm/os_linux.cpp
-index 54cfcdd116..c3d22ba5d3 100644
+index 5629a640f6..68d5f514c0 100644
--- a/hotspot/src/os/linux/vm/os_linux.cpp
+++ b/hotspot/src/os/linux/vm/os_linux.cpp
@@ -22,6 +22,12 @@
@@ -106588,8 +106588,8 @@ index 54cfcdd116..c3d22ba5d3 100644
static Elf32_Half running_arch_code=EM_LOONGARCH;
#else
#error Method os::dll_load requires that one of following is defined:\
-- IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K, AARCH64, LOONGARCH
-+ IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, __mips64, PARISC, M68K, AARCH64, LOONGARCH
+- IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K, AARCH64, LOONGARCH64
++ IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, __mips64, PARISC, M68K, AARCH64, LOONGARCH64
#endif
// Identify compatability class for VM's architecture and library's architecture
@@ -112949,7 +112949,7 @@ index 7e22bbaa27..12aca7bf50 100644
# include "c1_MacroAssembler_aarch64.hpp"
#endif
diff --git a/hotspot/src/share/vm/c1/c1_Runtime1.cpp b/hotspot/src/share/vm/c1/c1_Runtime1.cpp
-index aebc377527..f1253506f6 100644
+index b2bff3809d..cfcdb43ddc 100644
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp
@@ -22,6 +22,12 @@
@@ -112965,7 +112965,7 @@ index aebc377527..f1253506f6 100644
#include "precompiled.hpp"
#include "asm/codeBuffer.hpp"
#include "c1/c1_CodeStubs.hpp"
-@@ -710,6 +716,7 @@ JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread))
+@@ -712,6 +718,7 @@ JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread))
// Return to the now deoptimized frame.
JRT_END
@@ -112973,7 +112973,7 @@ index aebc377527..f1253506f6 100644
static Klass* resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
Bytecode_field field_access(caller, bci);
-@@ -1186,6 +1193,47 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
+@@ -1188,6 +1195,47 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
}
JRT_END
@@ -113985,7 +113985,7 @@ index 1dc7cb2983..92bbe6b440 100644
# include "interpreterGenerator_aarch64.hpp"
#endif
diff --git a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
-index cad9d29008..85303e4b73 100644
+index 425ad7f463..c428b91f5d 100644
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
@@ -22,6 +22,12 @@
@@ -114001,7 +114001,7 @@ index cad9d29008..85303e4b73 100644
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
-@@ -59,6 +65,12 @@
+@@ -60,6 +66,12 @@
#ifdef TARGET_ARCH_x86
# include "vm_version_x86.hpp"
#endif
@@ -114014,7 +114014,7 @@ index cad9d29008..85303e4b73 100644
#ifdef TARGET_ARCH_aarch64
# include "vm_version_aarch64.hpp"
#endif
-@@ -1290,7 +1302,7 @@ IRT_ENTRY(void, InterpreterRuntime::prepare_native_call(JavaThread* thread, Meth
+@@ -1292,7 +1304,7 @@ IRT_ENTRY(void, InterpreterRuntime::prepare_native_call(JavaThread* thread, Meth
// preparing the same method will be sure to see non-null entry & mirror.
IRT_END
@@ -116029,7 +116029,7 @@ index 66392b75f1..5ced38d838 100644
} else {
base = os::reserve_memory(size, NULL, alignment);
diff --git a/hotspot/src/share/vm/runtime/vmStructs.cpp b/hotspot/src/share/vm/runtime/vmStructs.cpp
-index 32e3921b2b..c6cc4c4329 100644
+index e0e9bcf7e9..3e4640e698 100644
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp
@@ -22,6 +22,12 @@
diff --git a/The-fast-serialization-function-of-sun.rmi.transport.patch b/The-fast-serialization-function-of-sun.rmi.transport.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b0417a0f49fb414af63a10c010defd38d97f446e
--- /dev/null
+++ b/The-fast-serialization-function-of-sun.rmi.transport.patch
@@ -0,0 +1,88 @@
+Subject: [PATCH][Huawei] The fast serialization function of
+ sun.rmi.transport.ConnectionOutputStream is disabled by default
+
+---
+ .../classes/java/io/ObjectOutputStream.java | 23 ++++++++++++++++---
+ .../sun/rmi/server/MarshalOutputStream.java | 10 ++++++++
+ 2 files changed, 30 insertions(+), 3 deletions(-)
+
+diff --git a/jdk/src/share/classes/java/io/ObjectOutputStream.java b/jdk/src/share/classes/java/io/ObjectOutputStream.java
+index 328f47589..78dc3c5b2 100644
+--- a/jdk/src/share/classes/java/io/ObjectOutputStream.java
++++ b/jdk/src/share/classes/java/io/ObjectOutputStream.java
+@@ -240,7 +240,7 @@ public class ObjectOutputStream
+ * Value of "UseFastSerializer" property. The fastSerializer is turned
+ * on when it is true.
+ */
+- private final boolean useFastSerializer = UNSAFE.getUseFastSerializer();
++ private boolean useFastSerializer = UNSAFE.getUseFastSerializer();
+
+ /**
+ * value of "printFastSerializer" property,
+@@ -254,7 +254,22 @@ public class ObjectOutputStream
+ * Magic number that is written to the stream header when using fastserilizer.
+ */
+ private static final short STREAM_MAGIC_FAST = (short)0xdeca;
++
++ /**
++ * The default value is true. If you want to disable the fast serialization function, please set it to false.
++ */
++ protected boolean enableFastSerializerClass(){
++ return true;
++ }
+
++ /**
++ * Disable fast serialization functionality.
++ */
++ private void disableFastSerializerStatusByClass() {
++ if ( this.useFastSerializer && !enableFastSerializerClass()){
++ this.useFastSerializer = false;
++ }
++ }
+ /**
+ * Creates an ObjectOutputStream that writes to the specified OutputStream.
+ * This constructor writes the serialization stream header to the
+@@ -279,7 +294,8 @@ public class ObjectOutputStream
+ * @see ObjectInputStream#ObjectInputStream(InputStream)
+ */
+ public ObjectOutputStream(OutputStream out) throws IOException {
+- verifySubclass();
++ disableFastSerializerStatusByClass();
++ verifySubclass();
+ bout = new BlockDataOutputStream(out);
+ handles = new HandleTable(10, (float) 3.00);
+ subs = new ReplaceTable(10, (float) 3.00);
+@@ -311,7 +327,8 @@ public class ObjectOutputStream
+ * @see java.io.SerializablePermission
+ */
+ protected ObjectOutputStream() throws IOException, SecurityException {
+- SecurityManager sm = System.getSecurityManager();
++ disableFastSerializerStatusByClass();
++ SecurityManager sm = System.getSecurityManager();
+ if (sm != null) {
+ sm.checkPermission(SUBCLASS_IMPLEMENTATION_PERMISSION);
+ }
+diff --git a/jdk/src/share/classes/sun/rmi/server/MarshalOutputStream.java b/jdk/src/share/classes/sun/rmi/server/MarshalOutputStream.java
+index 699f11072..e113441f8 100644
+--- a/jdk/src/share/classes/sun/rmi/server/MarshalOutputStream.java
++++ b/jdk/src/share/classes/sun/rmi/server/MarshalOutputStream.java
+@@ -48,6 +48,16 @@ import sun.rmi.transport.Target;
+ */
+ public class MarshalOutputStream extends ObjectOutputStream
+ {
++ /**
++ * value of "enableRMIFastSerializerClass" property
++ */
++ private static final boolean enableRMIFastSerializerClass = java.security.AccessController.doPrivileged( new sun.security.action.GetBooleanAction( "enableRMIFastSerializerClass")).booleanValue();
++
++ @Override
++ protected boolean enableFastSerializerClass() {
++ return this.enableRMIFastSerializerClass;
++ }
++
+ /**
+ * Creates a marshal output stream with protocol version 1.
+ */
+--
+2.44.0
+
diff --git a/add-Fix-aarch64-runtime-thread-signal-transfer-bug.patch b/add-Fix-aarch64-runtime-thread-signal-transfer-bug.patch
index 5adaf4b3e15f28e0b7aad3d6fd97d7e149493a0f..3afb0b9d076f1c9255afb0b12d377110e83e8ace 100644
--- a/add-Fix-aarch64-runtime-thread-signal-transfer-bug.patch
+++ b/add-Fix-aarch64-runtime-thread-signal-transfer-bug.patch
@@ -1,15 +1,92 @@
-From c4fd69c76c41b7b6168f1071d50143566f7d269e
+From a168b23b9b49998642adabda7edd76a0d45c07b8
Date: Fri, 22 Sep 2023 14:48:33 +0800
Subject: [PATCH] add Fix-aarch64-runtime-thread-signal-transfer-bug
---
- .../src/cpu/aarch64/vm/vm_version_aarch64.cpp | 47 +++++----
- .../src/cpu/aarch64/vm/vm_version_aarch64.hpp | 8 ++
- hotspot/src/os/linux/vm/os_linux.cpp | 7 ++
- .../linux_aarch64/vm/thread_linux_aarch64.cpp | 97 +++++++++++++++++++
- .../linux_aarch64/vm/thread_linux_aarch64.hpp | 3 +
- 5 files changed, 141 insertions(+), 21 deletions(-)
+ .../vm/interpreterGenerator_aarch64.hpp | 1 +
+ .../cpu/aarch64/vm/stubGenerator_aarch64.cpp | 4 +
+ .../vm/templateInterpreter_aarch64.cpp | 14 ++
+ .../src/cpu/aarch64/vm/vm_version_aarch64.cpp | 47 ++++---
+ .../src/cpu/aarch64/vm/vm_version_aarch64.hpp | 8 ++
+ hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp | 5 +
+ hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp | 5 +
+ hotspot/src/cpu/x86/vm/vm_version_x86.cpp | 5 +
+ hotspot/src/os/linux/vm/os_linux.cpp | 3 +
+ .../linux_aarch64/vm/thread_linux_aarch64.cpp | 122 ++++++++++++++++++
+ .../linux_aarch64/vm/thread_linux_aarch64.hpp | 3 +
+ hotspot/src/share/vm/classfile/vmSymbols.hpp | 5 +
+ .../src/share/vm/compiler/compileBroker.cpp | 13 +-
+ .../vm/interpreter/abstractInterpreter.hpp | 1 +
+ .../src/share/vm/interpreter/interpreter.cpp | 5 +
+ .../vm/interpreter/templateInterpreter.cpp | 4 +
+ hotspot/src/share/vm/oops/method.cpp | 2 +-
+ hotspot/src/share/vm/runtime/globals.hpp | 3 +
+ hotspot/src/share/vm/runtime/os.cpp | 5 +
+ hotspot/src/share/vm/runtime/stubRoutines.cpp | 2 +
+ hotspot/src/share/vm/runtime/stubRoutines.hpp | 6 +
+ .../share/vm/services/diagnosticCommand.hpp | 2 +-
+ .../dcmd/CompilerQueueTest.java | 41 +++---
+ 23 files changed, 257 insertions(+), 49 deletions(-)
+diff --git a/hotspot/src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp b/hotspot/src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp
+index 40af38a7..7530edb9 100644
+--- a/hotspot/src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp
++++ b/hotspot/src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp
+@@ -53,6 +53,7 @@ void generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpa
+ void emit_array_address(Register src, Register idx, Register dst, BasicType type);
+ address generate_Dgemm_dgemm_entry();
+ address generate_Dgemv_dgemv_entry();
++ address generate_JVM_isAmd64_entry();
+
+ void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
+ void generate_counter_overflow(Label* do_continue);
+diff --git a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
+index 565fe559..8a98bac0 100644
+--- a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
++++ b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
+@@ -5144,6 +5144,10 @@ class StubGenerator: public StubCodeGenerator {
+ StubRoutines::_dgemmDgemm = generate_dgemmDgemm(StubRoutines::_BLAS_library);
+ StubRoutines::_dgemvDgemv = generate_dgemvDgemv(StubRoutines::_BLAS_library);
+ }
++
++ if (UseHBaseUtilIntrinsics) {
++ StubRoutines::_isAmd64JVM = CAST_FROM_FN_PTR(address, StubRoutines::intrinsic_isAmd64_JVM);
++ }
+ }
+
+ void generate_all() {
+diff --git a/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp
+index 28b84cb5..6329ff4e 100644
+--- a/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp
++++ b/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp
+@@ -860,6 +860,18 @@ void InterpreterGenerator::emit_array_address(Register src, Register idx,
+ __ add(dst, src, idx);
+ }
+
++
++address InterpreterGenerator::generate_JVM_isAmd64_entry() {
++ address entry = __ pc();
++ __ mov(r19, lr);
++ address fn = CAST_FROM_FN_PTR(address, StubRoutines::isAmd64JVM());
++ __ mov(rscratch1, fn);
++ __ blr(rscratch1);
++ __ br(r19);
++
++ return entry;
++}
++
+ /**
+ * Stub Arguments:
+ *
+@@ -1838,6 +1850,8 @@ address AbstractInterpreterGenerator::generate_method_entry(
+ : entry_point = ((InterpreterGenerator*)this)->generate_Dgemm_dgemm_entry(); break;
+ case Interpreter::org_netlib_blas_Dgemv_dgemv
+ : entry_point = ((InterpreterGenerator*)this)->generate_Dgemv_dgemv_entry(); break;
++ case Interpreter::org_apache_hadoop_hbase_util_JVM_isAmd64
++ : entry_point = ((InterpreterGenerator*)this)->generate_JVM_isAmd64_entry(); break;
+ default : ShouldNotReachHere(); break;
+ }
+
diff --git a/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.cpp
index 27ab00dd..839df4a3 100644
--- a/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.cpp
@@ -17,7 +94,7 @@ index 27ab00dd..839df4a3 100644
@@ -169,27 +169,7 @@ void VM_Version::get_processor_features() {
_features_str = strdup(buf);
_cpuFeatures = auxv;
-
+
- int cpu_lines = 0;
- if (FILE *f = fopen("/proc/cpuinfo", "r")) {
- char buf[128], *p;
@@ -40,13 +117,13 @@ index 27ab00dd..839df4a3 100644
- fclose(f);
- }
+ int cpu_lines = get_cpu_model();
-
+
// Enable vendor specific features
if (_cpu == CPU_CAVIUM) {
@@ -346,6 +326,31 @@ void VM_Version::get_processor_features() {
#endif
}
-
+
+int VM_Version::get_cpu_model() {
+ int cpu_lines = 0;
+ if (FILE *f = fopen("/proc/cpuinfo", "r")) {
@@ -74,7 +151,7 @@ index 27ab00dd..839df4a3 100644
+
void VM_Version::initialize() {
ResourceMark rm;
-
+
diff --git a/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.hpp b/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.hpp
index 7f3a5326..47353df9 100644
--- a/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.hpp
@@ -90,7 +167,7 @@ index 7f3a5326..47353df9 100644
@@ -87,12 +88,19 @@ public:
CPU_DMB_ATOMICS = (1 << 31),
} cpuFeatureFlags;
-
+
+ static int get_cpu_model();
static const char* cpu_features() { return _features_str; }
static int cpu_family() { return _cpu; }
@@ -107,22 +184,59 @@ index 7f3a5326..47353df9 100644
static ByteSize dczid_el0_offset() { return byte_offset_of(PsrInfo, dczid_el0); }
static ByteSize ctr_el0_offset() { return byte_offset_of(PsrInfo, ctr_el0); }
static bool is_zva_enabled() {
+diff --git a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp
+index a5a80d29..45ce795d 100644
+--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp
++++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp
+@@ -251,6 +251,11 @@ void VM_Version::initialize() {
+ FLAG_SET_DEFAULT(UseF2jBLASIntrinsics, false);
+ }
+
++ if (UseHBaseUtilIntrinsics) {
++ warning("hbase.util instructions are not available on this CPU");
++ FLAG_SET_DEFAULT(UseHBaseUtilIntrinsics, false);
++ }
++
+ if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
+ UseMontgomeryMultiplyIntrinsic = true;
+ }
+diff --git a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp
+index 08d7a731..d73305ca 100644
+--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp
++++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp
+@@ -392,6 +392,11 @@ void VM_Version::initialize() {
+ FLAG_SET_DEFAULT(UseF2jBLASIntrinsics, false);
+ }
+
++ if (UseHBaseUtilIntrinsics) {
++ warning("hbase.util instructions are not available on this CPU");
++ FLAG_SET_DEFAULT(UseHBaseUtilIntrinsics, false);
++ }
++
+ if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
+ (cache_line_size > ContendedPaddingWidth))
+ ContendedPaddingWidth = cache_line_size;
+diff --git a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp
+index 028b55cf..a486ade2 100644
+--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp
++++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp
+@@ -658,6 +658,11 @@ void VM_Version::get_processor_features() {
+ FLAG_SET_DEFAULT(UseF2jBLASIntrinsics, false);
+ }
+
++ if (UseHBaseUtilIntrinsics) {
++ warning("hbase.util instructions are not available on this CPU");
++ FLAG_SET_DEFAULT(UseHBaseUtilIntrinsics, false);
++ }
++
+ // Adjust RTM (Restricted Transactional Memory) flags
+ if (!supports_rtm() && UseRTMLocking) {
+ // Can't continue because UseRTMLocking affects UseBiasedLocking flag
diff --git a/hotspot/src/os/linux/vm/os_linux.cpp b/hotspot/src/os/linux/vm/os_linux.cpp
-index 2dde2587..647ef582 100644
+index 6b1e6b80..6ee49eed 100644
--- a/hotspot/src/os/linux/vm/os_linux.cpp
+++ b/hotspot/src/os/linux/vm/os_linux.cpp
-@@ -5576,6 +5576,10 @@ jint os::init_2(void)
- Linux::is_floating_stack() ? "floating stack" : "fixed stack");
- }
-
-+#ifdef AARCH64
-+ JavaThread::os_linux_aarch64_options(active_processor_count(), argv_for_execvp);
-+#endif
-+
- if (UseNUMA) {
- if (!Linux::libnuma_init()) {
- UseNUMA = false;
-@@ -5760,6 +5764,9 @@ void os::set_native_thread_name(const char *name) {
+@@ -5760,6 +5760,9 @@ void os::set_native_thread_name(const char *name) {
const int rc = Linux::_pthread_setname_np(pthread_self(), buf);
// ERANGE should not happen; all other errors should just be ignored.
assert(rc != ERANGE, "pthread_setname_np failed");
@@ -133,7 +247,7 @@ index 2dde2587..647ef582 100644
}
diff --git a/hotspot/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.cpp b/hotspot/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.cpp
-index 87e42318..8b0e2c98 100644
+index 87e42318..c496c9eb 100644
--- a/hotspot/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.cpp
+++ b/hotspot/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.cpp
@@ -25,6 +25,7 @@
@@ -144,7 +258,7 @@ index 87e42318..8b0e2c98 100644
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
-@@ -39,6 +40,102 @@ bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext,
+@@ -39,6 +40,127 @@ bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext,
return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
@@ -184,13 +298,11 @@ index 87e42318..8b0e2c98 100644
+ const static intx tTypeProfileMajorReceiverPercent = TypeProfileMajorReceiverPercent;
+ const static intx tLoopUnrollLimit = LoopUnrollLimit;
+ if (stringHash(secondStr) == 2046673384) {
-+ // makes specjvm compiler.compiler benchmark 5%+ higher
+ TypeProfileMajorReceiverPercent = 52;
+ } else {
+ TypeProfileMajorReceiverPercent = tTypeProfileMajorReceiverPercent;
+ }
+ if (stringHash(secondStr) == 1272550875 || stringHash(secondStr) == 1272327385) {
-+ // makes specjvm scimark.sor.small/large benchmark 10%+ higher
+ LoopUnrollLimit = 1000;
+ } else {
+ LoopUnrollLimit = tLoopUnrollLimit;
@@ -216,6 +328,31 @@ index 87e42318..8b0e2c98 100644
+ }
+}
+
++void set_compilation_tuner_params() {
++ if (FLAG_IS_DEFAULT(UseCounterDecay))
++ FLAG_SET_DEFAULT(UseCounterDecay, false);
++ if (FLAG_IS_DEFAULT(DontCompileHugeMethods))
++ FLAG_SET_DEFAULT(DontCompileHugeMethods, false);
++ if (FLAG_IS_DEFAULT(TieredCompilation))
++ FLAG_SET_DEFAULT(TieredCompilation, false);
++ if (FLAG_IS_DEFAULT(CompileThreshold))
++ FLAG_SET_DEFAULT(CompileThreshold, 11132);
++ if (FLAG_IS_DEFAULT(BackEdgeThreshold))
++ FLAG_SET_DEFAULT(BackEdgeThreshold, 136559);
++ if (FLAG_IS_DEFAULT(OnStackReplacePercentage))
++ FLAG_SET_DEFAULT(OnStackReplacePercentage, 182);
++ if (FLAG_IS_DEFAULT(InterpreterProfilePercentage))
++ FLAG_SET_DEFAULT(InterpreterProfilePercentage, 17);
++}
++
++void set_intrinsic_param() {
++ if (FLAG_IS_DEFAULT(UseHBaseUtilIntrinsics)) {
++ warning("If your HBase version is lower than 2.4.14, please explicitly specify"
++ " -XX:-UseHBaseUtilIntrinsics, otherwise HBase may fail to start.");
++ FLAG_SET_DEFAULT(UseHBaseUtilIntrinsics, true);
++ }
++}
++
+void JavaThread::os_linux_aarch64_options(int apc, char **name) {
+ if (name == NULL) {
+ return;
@@ -226,6 +363,8 @@ index 87e42318..8b0e2c98 100644
+ int step = 0;
+ while (name[i] != NULL) {
+ if (stringHash(name[i]) == 1396789436) {
++ set_compilation_tuner_params();
++ set_intrinsic_param();
+ if (FLAG_IS_DEFAULT(ActiveProcessorCount) && (UseG1GC || UseParallelGC) && apc > 8)
+ FLAG_SET_DEFAULT(ActiveProcessorCount, 8);
+ break;
@@ -261,6 +400,271 @@ index a2f0135c..f14ace0d 100644
bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava);
private:
bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
+diff --git a/hotspot/src/share/vm/classfile/vmSymbols.hpp b/hotspot/src/share/vm/classfile/vmSymbols.hpp
+index 494fd9bd..1674d352 100644
+--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp
++++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp
+@@ -874,6 +874,11 @@
+ do_name( dgemv_name, "dgemv") \
+ do_signature(dgemv_signature, "(Ljava/lang/String;IID[DII[DIID[DII)V") \
+ \
++ /* support for org.apache.hadoop.hbase.util.JVM */ \
++ do_class(org_apache_hadoop_hbase_util_jvm, "org/apache/hadoop/hbase/util/JVM") \
++ do_intrinsic(_jvm_isAmd64, org_apache_hadoop_hbase_util_jvm, isAmd64_name, void_boolean_signature, F_S) \
++ do_name( isAmd64_name, "isAmd64") \
++ \
+ /* support for sun.security.provider.SHA2 */ \
+ do_class(sun_security_provider_sha2, "sun/security/provider/SHA2") \
+ do_intrinsic(_sha2_implCompress, sun_security_provider_sha2, implCompress_name, implCompress_signature, F_R) \
+diff --git a/hotspot/src/share/vm/compiler/compileBroker.cpp b/hotspot/src/share/vm/compiler/compileBroker.cpp
+index e8f97074..01379902 100644
+--- a/hotspot/src/share/vm/compiler/compileBroker.cpp
++++ b/hotspot/src/share/vm/compiler/compileBroker.cpp
+@@ -813,18 +813,23 @@ CompileQueue* CompileBroker::compile_queue(int comp_level) {
+
+
+ void CompileBroker::print_compile_queues(outputStream* st) {
+- _c1_compile_queue->print(st);
+- _c2_compile_queue->print(st);
++ MutexLocker locker(MethodCompileQueue_lock);
++ if (_c1_compile_queue != NULL) {
++ _c1_compile_queue->print(st);
++ }
++ if (_c2_compile_queue != NULL) {
++ _c2_compile_queue->print(st);
++ }
+ }
+
+
+ void CompileQueue::print(outputStream* st) {
+- assert_locked_or_safepoint(lock());
++ assert(lock()->owned_by_self(), "must own lock");
+ st->print_cr("Contents of %s", name());
+ st->print_cr("----------------------------");
+ CompileTask* task = _first;
+ if (task == NULL) {
+- st->print_cr("Empty");;
++ st->print_cr("Empty");
+ } else {
+ while (task != NULL) {
+ task->print_compilation(st, NULL, true, true);
+diff --git a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp
+index 293382b3..cf9cd908 100644
+--- a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp
++++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp
+@@ -102,6 +102,7 @@ class AbstractInterpreter: AllStatic {
+ java_util_zip_CRC32_updateByteBuffer, // implementation of java.util.zip.CRC32.updateByteBuffer()
+ org_netlib_blas_Dgemm_dgemm, // implementation of org.netlib.blas.Dgemm.dgemm()
+ org_netlib_blas_Dgemv_dgemv, // implementation of org.netlib.blas.Dgemv.dgemv()
++ org_apache_hadoop_hbase_util_JVM_isAmd64, // implementation of org.hbase.hadoop.hbase.util.JVM.isAmd64()
+ number_of_method_entries,
+ invalid = -1
+ };
+diff --git a/hotspot/src/share/vm/interpreter/interpreter.cpp b/hotspot/src/share/vm/interpreter/interpreter.cpp
+index d5d94f34..8d4b5b93 100644
+--- a/hotspot/src/share/vm/interpreter/interpreter.cpp
++++ b/hotspot/src/share/vm/interpreter/interpreter.cpp
+@@ -259,6 +259,10 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
+ }
+ }
+
++ if (UseHBaseUtilIntrinsics && m->intrinsic_id() == vmIntrinsics::_jvm_isAmd64) {
++ return org_apache_hadoop_hbase_util_JVM_isAmd64;
++ }
++
+ // Accessor method?
+ if (m->is_accessor()) {
+ assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
+@@ -321,6 +325,7 @@ void AbstractInterpreter::print_method_kind(MethodKind kind) {
+ case java_util_zip_CRC32_updateByteBuffer : tty->print("java_util_zip_CRC32_updateByteBuffer"); break;
+ case org_netlib_blas_Dgemm_dgemm : tty->print("org_netlib_blas_Dgemm_dgemm"); break;
+ case org_netlib_blas_Dgemv_dgemv : tty->print("org_netlib_blas_Dgemv_dgemv"); break;
++ case org_apache_hadoop_hbase_util_JVM_isAmd64 : tty->print("org_apache_hadoop_hbase_util_JVM_isAmd64"); break;
+ default:
+ if (kind >= method_handle_invoke_FIRST &&
+ kind <= method_handle_invoke_LAST) {
+diff --git a/hotspot/src/share/vm/interpreter/templateInterpreter.cpp b/hotspot/src/share/vm/interpreter/templateInterpreter.cpp
+index 09298a7f..3f2961fb 100644
+--- a/hotspot/src/share/vm/interpreter/templateInterpreter.cpp
++++ b/hotspot/src/share/vm/interpreter/templateInterpreter.cpp
+@@ -406,6 +406,10 @@ void TemplateInterpreterGenerator::generate_all() {
+ method_entry(org_netlib_blas_Dgemv_dgemv)
+ }
+
++ if (UseHBaseUtilIntrinsics) {
++ method_entry(org_apache_hadoop_hbase_util_JVM_isAmd64)
++ }
++
+ initialize_method_handle_entries();
+
+ // all native method kinds (must be one contiguous block)
+diff --git a/hotspot/src/share/vm/oops/method.cpp b/hotspot/src/share/vm/oops/method.cpp
+index 406cd485..7cf7e08e 100644
+--- a/hotspot/src/share/vm/oops/method.cpp
++++ b/hotspot/src/share/vm/oops/method.cpp
+@@ -1301,7 +1301,7 @@ vmSymbols::SID Method::klass_id_for_intrinsics(Klass* holder) {
+ // which does not use the class default class loader so we check for its loader here
+ InstanceKlass* ik = InstanceKlass::cast(holder);
+ if ((ik->class_loader() != NULL) && !SystemDictionary::is_ext_class_loader(ik->class_loader())) {
+- if (!EnableIntrinsicExternal) {
++ if (!EnableIntrinsicExternal && !UseHBaseUtilIntrinsics) {
+ return vmSymbols::NO_SID; // regardless of name, no intrinsics here
+ }
+ }
+diff --git a/hotspot/src/share/vm/runtime/globals.hpp b/hotspot/src/share/vm/runtime/globals.hpp
+index fdd9db14..69e6587a 100644
+--- a/hotspot/src/share/vm/runtime/globals.hpp
++++ b/hotspot/src/share/vm/runtime/globals.hpp
+@@ -768,6 +768,9 @@ class CommandLineFlags {
+ product(bool, UseCRC32Intrinsics, false, \
+ "use intrinsics for java.util.zip.CRC32") \
+ \
++ product(bool, UseHBaseUtilIntrinsics, false, \
++ "use intrinsics for org.apache.hadoop.hbase.util.JVM on aarch64") \
++ \
+ experimental(bool, UseF2jBLASIntrinsics, false, \
+ "use intrinsics for com.github.fommil.netlib.F2jBLAS on aarch64") \
+ \
+diff --git a/hotspot/src/share/vm/runtime/os.cpp b/hotspot/src/share/vm/runtime/os.cpp
+index ff35e8b3..cae1cf47 100644
+--- a/hotspot/src/share/vm/runtime/os.cpp
++++ b/hotspot/src/share/vm/runtime/os.cpp
+@@ -366,6 +366,11 @@ static void signal_thread_entry(JavaThread* thread, TRAPS) {
+ }
+
+ void os::init_before_ergo() {
++#ifdef AARCH64
++ // global variables
++ extern char** argv_for_execvp;
++ JavaThread::os_linux_aarch64_options(active_processor_count(), argv_for_execvp);
++#endif
+ initialize_initial_active_processor_count();
+ // We need to initialize large page support here because ergonomics takes some
+ // decisions depending on large page support and the calculated large page size.
+diff --git a/hotspot/src/share/vm/runtime/stubRoutines.cpp b/hotspot/src/share/vm/runtime/stubRoutines.cpp
+index 3cee9c22..c4be88fc 100644
+--- a/hotspot/src/share/vm/runtime/stubRoutines.cpp
++++ b/hotspot/src/share/vm/runtime/stubRoutines.cpp
+@@ -144,6 +144,8 @@ address StubRoutines::_ddotF2jBLAS = NULL;
+ address StubRoutines::_dgemmDgemm = NULL;
+ address StubRoutines::_dgemvDgemv = NULL;
+
++address StubRoutines::_isAmd64JVM = NULL;
++
+ address StubRoutines::_multiplyToLen = NULL;
+ address StubRoutines::_squareToLen = NULL;
+ address StubRoutines::_mulAdd = NULL;
+diff --git a/hotspot/src/share/vm/runtime/stubRoutines.hpp b/hotspot/src/share/vm/runtime/stubRoutines.hpp
+index fff13dbc..a5231bdd 100644
+--- a/hotspot/src/share/vm/runtime/stubRoutines.hpp
++++ b/hotspot/src/share/vm/runtime/stubRoutines.hpp
+@@ -221,6 +221,7 @@ class StubRoutines: AllStatic {
+ static address _ddotF2jBLAS;
+ static address _dgemmDgemm;
+ static address _dgemvDgemv;
++ static address _isAmd64JVM;
+
+ static address _multiplyToLen;
+ static address _squareToLen;
+@@ -391,6 +392,7 @@ class StubRoutines: AllStatic {
+ static address ddotF2jBLAS() { return _ddotF2jBLAS; }
+ static address dgemmDgemm() { return _dgemmDgemm; }
+ static address dgemvDgemv() { return _dgemvDgemv; }
++ static address isAmd64JVM() { return _isAmd64JVM; }
+
+ static address multiplyToLen() {return _multiplyToLen; }
+ static address squareToLen() {return _squareToLen; }
+@@ -431,6 +433,10 @@ class StubRoutines: AllStatic {
+ return _intrinsic_tan(d);
+ }
+
++ static bool intrinsic_isAmd64_JVM() {
++ return true;
++ }
++
+ //
+ // Safefetch stub support
+ //
+diff --git a/hotspot/src/share/vm/services/diagnosticCommand.hpp b/hotspot/src/share/vm/services/diagnosticCommand.hpp
+index d446aab4..c89933f9 100644
+--- a/hotspot/src/share/vm/services/diagnosticCommand.hpp
++++ b/hotspot/src/share/vm/services/diagnosticCommand.hpp
+@@ -554,7 +554,7 @@ public:
+ return "Compiler.codelist";
+ }
+ static const char* description() {
+- return "Print all compiled methods in code cache.";
++ return "Print all compiled methods in code cache that are alive";
+ }
+ static const char* impact() {
+ return "Medium";
+diff --git a/hotspot/test/serviceability/dcmd/CompilerQueueTest.java b/hotspot/test/serviceability/dcmd/CompilerQueueTest.java
+index 661e7cb4..6d2e7309 100644
+--- a/hotspot/test/serviceability/dcmd/CompilerQueueTest.java
++++ b/hotspot/test/serviceability/dcmd/CompilerQueueTest.java
+@@ -26,6 +26,8 @@
+ * @bug 8054889
+ * @build DcmdUtil CompilerQueueTest
+ * @run main CompilerQueueTest
++ * @run main/othervm -XX:-TieredCompilation CompilerQueueTest
++ * @run main/othervm -Xint CompilerQueueTest
+ * @summary Test of diagnostic command Compiler.queue
+ */
+
+@@ -62,36 +64,31 @@ public class CompilerQueueTest {
+ String result = DcmdUtil.executeDcmd("Compiler.queue");
+ BufferedReader r = new BufferedReader(new StringReader(result));
+
+- String line;
+- match(r.readLine(), "Contents of C1 compile queue");
+- match(r.readLine(), "----------------------------");
+ String str = r.readLine();
+- if (!str.equals("Empty")) {
+- while (str.charAt(0) != '-') {
+- validateMethodLine(str);
++ while (str != null) {
++ if (str.startsWith("Contents of C")) {
++ match(r.readLine(), "----------------------------");
+ str = r.readLine();
+- }
+- } else {
+- str = r.readLine();
+- }
+-
+- match(str, "----------------------------");
+- match(r.readLine(), "Contents of C2 compile queue");
+- match(r.readLine(), "----------------------------");
+- str = r.readLine();
+- if (!str.equals("Empty")) {
+- while (str.charAt(0) != '-') {
+- validateMethodLine(str);
++ if (!str.equals("Empty")) {
++ while (str.charAt(0) != '-') {
++ validateMethodLine(str);
++ str = r.readLine();
++ }
++ } else {
++ str = r.readLine();
++ }
++ match(str,"----------------------------");
+ str = r.readLine();
++ } else {
++ throw new Exception("Failed parsing dcmd queue, line: " + str);
+ }
+- } else {
+- str = r.readLine();
+ }
+- match(str, "----------------------------");
+ }
+
+ private static void validateMethodLine(String str) throws Exception {
+- String name = str.substring(19);
++ // Skip until package/class name begins. Trim to remove whitespace that
++ // may differ.
++ String name = str.substring(14).trim();
+ int sep = name.indexOf("::");
+ try {
+ Class.forName(name.substring(0, sep));
--
2.19.1
diff --git a/add-riscv64-support.patch b/add-riscv64-support.patch
index 8dda639edb126fd67e2c9647ca17501be2921039..250feb7bcefa4572145a6dc7b39162299239bd85 100644
--- a/add-riscv64-support.patch
+++ b/add-riscv64-support.patch
@@ -90,14 +90,14 @@ index 54cfcdd1..88eb8acd 100644
#if (defined IA32)
@@ -2010,9 +2014,11 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
static Elf32_Half running_arch_code=EM_AARCH64;
- #elif (defined LOONGARCH)
+ #elif (defined LOONGARCH64)
static Elf32_Half running_arch_code=EM_LOONGARCH;
+ #elif (defined RISCV)
+ static Elf32_Half running_arch_code=EM_RISCV;
#else
#error Method os::dll_load requires that one of following is defined:\
-- IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K, AARCH64, LOONGARCH
-+ IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K, AARCH64, LOONGARCH, RISCV
+- IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K, AARCH64, LOONGARCH64
++ IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K, AARCH64, LOONGARCH64, RISCV
#endif
// Identify compatability class for VM's architecture and library's architecture
diff --git a/heap-dump-redact-support.patch b/heap-dump-redact-support.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c32d2125b39cde744cebab4dd24d50864f438d37
--- /dev/null
+++ b/heap-dump-redact-support.patch
@@ -0,0 +1,3904 @@
+---
+ .../share/classes/sun/jvm/hotspot/HSDB.java | 18 +-
+ .../sun/jvm/hotspot/oops/Annotation.java | 69 ++
+ .../classes/sun/jvm/hotspot/oops/Field.java | 10 +
+ .../sun/jvm/hotspot/oops/InstanceKlass.java | 7 +
+ .../classes/sun/jvm/hotspot/runtime/VM.java | 66 +-
+ .../sun/jvm/hotspot/tools/HeapDumper.java | 58 +-
+ .../hotspot/utilities/AnnotationArray2D.java | 63 ++
+ .../hotspot/utilities/HeapHprofBinWriter.java | 304 ++++++++-
+ .../jvm/hotspot/utilities/HeapRedactor.java | 448 ++++++++++++
+ .../make/linux/makefiles/mapfile-vers-debug | 2 +
+ .../make/linux/makefiles/mapfile-vers-product | 2 +
+ hotspot/make/linux/makefiles/vm.make | 27 +-
+ hotspot/src/os/linux/vm/os_linux.cpp | 48 ++
+ hotspot/src/os/linux/vm/os_linux.hpp | 53 ++
+ hotspot/src/share/vm/oops/annotations.hpp | 1 +
+ hotspot/src/share/vm/runtime/arguments.cpp | 24 +
+ hotspot/src/share/vm/runtime/arguments.hpp | 4 +
+ hotspot/src/share/vm/runtime/globals.hpp | 22 +-
+ hotspot/src/share/vm/runtime/vmStructs.cpp | 9 +
+ .../src/share/vm/services/attachListener.cpp | 20 +-
+ hotspot/src/share/vm/services/heapDumper.cpp | 635 +++++++++++++++++-
+ hotspot/src/share/vm/services/heapDumper.hpp | 2 +-
+ .../src/share/vm/services/heapRedactor.cpp | 621 +++++++++++++++++
+ .../src/share/vm/services/heapRedactor.hpp | 201 ++++++
+ .../share/classes/sun/tools/jmap/JMap.java | 244 ++++++-
+ 25 files changed, 2905 insertions(+), 53 deletions(-)
+ create mode 100644 hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Annotation.java
+ create mode 100644 hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/AnnotationArray2D.java
+ create mode 100644 hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapRedactor.java
+ create mode 100644 hotspot/src/share/vm/services/heapRedactor.cpp
+ create mode 100644 hotspot/src/share/vm/services/heapRedactor.hpp
+
+diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/HSDB.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/HSDB.java
+index c961a6ce9..f5778dca1 100644
+--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/HSDB.java
++++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/HSDB.java
+@@ -39,12 +39,12 @@ import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
+ import sun.jvm.hotspot.gc_interface.*;
+ import sun.jvm.hotspot.interpreter.*;
+ import sun.jvm.hotspot.memory.*;
+-import sun.jvm.hotspot.oops.*;
+ import sun.jvm.hotspot.runtime.*;
+ import sun.jvm.hotspot.ui.*;
+ import sun.jvm.hotspot.ui.tree.*;
+ import sun.jvm.hotspot.ui.classbrowser.*;
+ import sun.jvm.hotspot.utilities.*;
++import sun.jvm.hotspot.oops.*;
+
+ /** The top-level HotSpot Debugger. FIXME: make this an embeddable
+ component! (Among other things, figure out what to do with the
+@@ -988,7 +988,7 @@ public class HSDB implements ObjectHistogramPanel.Listener, SAListener {
+ }
+
+ if (curFrame.getFP() != null) {
+- annoPanel.addAnnotation(new Annotation(curFrame.getSP(),
++ annoPanel.addAnnotation(new sun.jvm.hotspot.ui.Annotation(curFrame.getSP(),
+ curFrame.getFP(),
+ anno));
+ } else {
+@@ -1000,7 +1000,7 @@ public class HSDB implements ObjectHistogramPanel.Listener, SAListener {
+ if (Assert.ASSERTS_ENABLED) {
+ Assert.that(cb.getFrameSize() > 0, "CodeBlob must have non-zero frame size");
+ }
+- annoPanel.addAnnotation(new Annotation(sp,
++ annoPanel.addAnnotation(new sun.jvm.hotspot.ui.Annotation(sp,
+ sp.addOffsetTo(cb.getFrameSize()),
+ anno));
+ } else {
+@@ -1010,19 +1010,19 @@ public class HSDB implements ObjectHistogramPanel.Listener, SAListener {
+
+ // Add interpreter frame annotations
+ if (curFrame.isInterpretedFrame()) {
+- annoPanel.addAnnotation(new Annotation(curFrame.addressOfInterpreterFrameExpressionStack(),
++ annoPanel.addAnnotation(new sun.jvm.hotspot.ui.Annotation(curFrame.addressOfInterpreterFrameExpressionStack(),
+ curFrame.addressOfInterpreterFrameTOS(),
+ "Interpreter expression stack"));
+ Address monBegin = curFrame.interpreterFrameMonitorBegin().address();
+ Address monEnd = curFrame.interpreterFrameMonitorEnd().address();
+ if (!monBegin.equals(monEnd)) {
+- annoPanel.addAnnotation(new Annotation(monBegin, monEnd,
++ annoPanel.addAnnotation(new sun.jvm.hotspot.ui.Annotation(monBegin, monEnd,
+ "BasicObjectLocks"));
+ }
+ if (interpreterFrameMethod != null) {
+ // The offset is just to get the right stack slots highlighted in the output
+ int offset = 1;
+- annoPanel.addAnnotation(new Annotation(curFrame.addressOfInterpreterFrameLocal(offset),
++ annoPanel.addAnnotation(new sun.jvm.hotspot.ui.Annotation(curFrame.addressOfInterpreterFrameLocal(offset),
+ curFrame.addressOfInterpreterFrameLocal((int) interpreterFrameMethod.getMaxLocals() + offset),
+ "Interpreter locals area for frame with SP = " + curFrame.getSP()));
+ }
+@@ -1031,9 +1031,9 @@ public class HSDB implements ObjectHistogramPanel.Listener, SAListener {
+ methodAnno += " (BAD OOP)";
+ }
+ Address a = curFrame.addressOfInterpreterFrameMethod();
+- annoPanel.addAnnotation(new Annotation(a, a.addOffsetTo(addressSize), methodAnno));
++ annoPanel.addAnnotation(new sun.jvm.hotspot.ui.Annotation(a, a.addOffsetTo(addressSize), methodAnno));
+ a = curFrame.addressOfInterpreterFrameCPCache();
+- annoPanel.addAnnotation(new Annotation(a, a.addOffsetTo(addressSize), "Interpreter constant pool cache"));
++ annoPanel.addAnnotation(new sun.jvm.hotspot.ui.Annotation(a, a.addOffsetTo(addressSize), "Interpreter constant pool cache"));
+ }
+
+ RegisterMap rm = (RegisterMap) vf.getRegisterMap().clone();
+@@ -1118,7 +1118,7 @@ public class HSDB implements ObjectHistogramPanel.Listener, SAListener {
+ }
+ }
+
+- annoPanel.addAnnotation(new Annotation(addr, addr.addOffsetTo(addressSize), anno));
++ annoPanel.addAnnotation(new sun.jvm.hotspot.ui.Annotation(addr, addr.addOffsetTo(addressSize), anno));
+ }
+ }, rm);
+ } catch (Exception e) {
+diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Annotation.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Annotation.java
+new file mode 100644
+index 000000000..9b95e7ab5
+--- /dev/null
++++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Annotation.java
+@@ -0,0 +1,69 @@
++/*
++ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
++ * or visit www.oracle.com if you need additional information or have any
++ * questions.
++ *
++ */
++
++package sun.jvm.hotspot.oops;
++
++import java.util.*;
++import sun.jvm.hotspot.debugger.Address;
++import sun.jvm.hotspot.runtime.VM;
++import sun.jvm.hotspot.runtime.VMObject;
++import sun.jvm.hotspot.types.AddressField;
++import sun.jvm.hotspot.types.Type;
++import sun.jvm.hotspot.types.TypeDataBase;
++import sun.jvm.hotspot.types.WrongTypeException;
++import sun.jvm.hotspot.utilities.AnnotationArray2D;
++
++// An Annotation is an oop containing class annotations
++
++public class Annotation extends VMObject {
++ private static AddressField class_annotations;
++ private static AddressField class_type_annotations;
++ private static AddressField fields_annotations;
++ private static AddressField fields_type_annotations;
++
++ static {
++ VM.registerVMInitializedObserver(new Observer() {
++ public void update(Observable o, Object data) {
++ initialize(VM.getVM().getTypeDataBase());
++ }
++ });
++ }
++
++ private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
++ Type type = db.lookupType("Annotations");
++ class_annotations = type.getAddressField("_class_annotations");
++ class_type_annotations = type.getAddressField("_class_type_annotations");
++ fields_annotations = type.getAddressField("_fields_annotations");
++ fields_type_annotations = type.getAddressField("_fields_type_annotations");
++ }
++
++ public Annotation(Address addr) {
++ super(addr);
++ }
++
++ public AnnotationArray2D getFieldsAnnotations() {
++ Address addr = getAddress().getAddressAt(fields_annotations.getOffset());
++ return new AnnotationArray2D(addr);
++ }
++}
+\ No newline at end of file
+diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Field.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Field.java
+index 621c8cf4b..51b7d1232 100644
+--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Field.java
++++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Field.java
+@@ -67,6 +67,8 @@ public class Field {
+ private Symbol genericSignature;
+ private AccessFlags accessFlags;
+ private int fieldIndex;
++ // java field redact annotation
++ private U1Array fieldAnnotations;
+
+ /** Returns the byte offset of the field within the object or klass */
+ public long getOffset() { return offset; }
+@@ -112,6 +114,14 @@ public class Field {
+
+ public boolean hasInitialValue() { return holder.getFieldInitialValueIndex(fieldIndex) != 0; }
+
++ public void setFieldAnnotations(U1Array fieldAnnotations) {
++ this.fieldAnnotations = fieldAnnotations;
++ }
++
++ public U1Array getFieldAnnotations() {
++ return fieldAnnotations;
++ }
++
+ //
+ // Following acccessors are for named, non-VM fields only
+ //
+diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java
+index 75aa05c39..0a88137c6 100644
+--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java
++++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java
+@@ -73,6 +73,7 @@ public class InstanceKlass extends Klass {
+ transitiveInterfaces = type.getAddressField("_transitive_interfaces");
+ fields = type.getAddressField("_fields");
+ javaFieldsCount = new CIntField(type.getCIntegerField("_java_fields_count"), 0);
++ annotate = type.getAddressField("_annotations");
+ constants = new MetadataField(type.getAddressField("_constants"), 0);
+ classLoaderData = type.getAddressField("_class_loader_data");
+ sourceDebugExtension = type.getAddressField("_source_debug_extension");
+@@ -132,6 +133,7 @@ public class InstanceKlass extends Klass {
+ private static AddressField transitiveInterfaces;
+ private static AddressField fields;
+ private static CIntField javaFieldsCount;
++ private static AddressField annotate;
+ private static MetadataField constants;
+ private static AddressField classLoaderData;
+ private static AddressField sourceDebugExtension;
+@@ -851,6 +853,11 @@ public class InstanceKlass extends Klass {
+ return (IntArray) VMObjectFactory.newObject(IntArray.class, addr);
+ }
+
++ public Annotation getAnnotation() {
++ Address addr = getAddress().getAddressAt(annotate.getOffset());
++ return (Annotation) VMObjectFactory.newObject(Annotation.class, addr);
++ }
++
+ public U2Array getFields() {
+ Address addr = getAddress().getAddressAt(fields.getOffset());
+ return (U2Array) VMObjectFactory.newObject(U2Array.class, addr);
+diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java
+index 29bf9efea..fda624b20 100644
+--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java
++++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
++ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+@@ -124,6 +124,7 @@ public class VM {
+
+ private static Type intxType;
+ private static Type uintxType;
++ private static Type uint64tType;
+ private static CIntegerType boolType;
+ private Boolean sharingEnabled;
+ private Boolean compressedOopsEnabled;
+@@ -192,6 +193,50 @@ public class VM {
+ return addr.getCIntegerAt(0, uintxType.getSize(), true);
+ }
+
++ public boolean isCcstr() {
++ return type.equals("ccstr");
++ }
++
++ public String getCcstr() {
++ if (Assert.ASSERTS_ENABLED) {
++ Assert.that(isCcstr(), "not a ccstr flag!");
++ }
++ return CStringUtilities.getString(addr.getAddressAt(0));
++ }
++
++ public boolean isCcstrlist() {
++ return type.equals("ccstrlist");
++ }
++
++ public String getCcstrlist() {
++ if (Assert.ASSERTS_ENABLED) {
++ Assert.that(isCcstrlist(), "not a ccstrlist flag!");
++ }
++ return CStringUtilities.getString(addr.getAddressAt(0));
++ }
++
++ public boolean isDouble() {
++ return type.equals("double");
++ }
++
++ public double getDouble() {
++ if (Assert.ASSERTS_ENABLED) {
++ Assert.that(isDouble(), "not a double flag!");
++ }
++ return addr.getJDoubleAt(0);
++ }
++
++ public boolean isUint64t() {
++ return type.equals("uint64_t");
++ }
++
++ public long getUint64t() {
++ if (Assert.ASSERTS_ENABLED) {
++ Assert.that(isUint64t(), "not an uint64_t flag!");
++ }
++ return addr.getCIntegerAt(0, uint64tType.getSize(), true);
++ }
++
+ public String getValue() {
+ if (isBool()) {
+ return new Boolean(getBool()).toString();
+@@ -199,7 +244,23 @@ public class VM {
+ return new Long(getIntx()).toString();
+ } else if (isUIntx()) {
+ return new Long(getUIntx()).toString();
+- } else {
++ } else if (isCcstr()) {
++ String str = getCcstr();
++ if (str != null) {
++ str = "\"" + str + "\"";
++ }
++ return str;
++ } else if (isCcstrlist()) {
++ String str = getCcstrlist();
++ if (str != null) {
++ str = "\"" + str + "\"";
++ }
++ return str;
++ } else if (isDouble()) {
++ return Double.toString(getDouble());
++ } else if (isUint64t()) {
++ return Long.toUnsignedString(getUint64t());
++ }else {
+ return null;
+ }
+ }
+@@ -325,6 +386,7 @@ public class VM {
+
+ intxType = db.lookupType("intx");
+ uintxType = db.lookupType("uintx");
++ uint64tType = db.lookupType("uint64_t");
+ boolType = (CIntegerType) db.lookupType("bool");
+
+ minObjAlignmentInBytes = getObjectAlignmentInBytes();
+diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java
+index 1b9350431..be503fe06 100644
+--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java
++++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java
+@@ -24,8 +24,11 @@
+
+ package sun.jvm.hotspot.tools;
+
++import sun.jvm.hotspot.runtime.VM;
+ import sun.jvm.hotspot.utilities.HeapHprofBinWriter;
+ import sun.jvm.hotspot.debugger.JVMDebugger;
++import sun.jvm.hotspot.utilities.HeapRedactor;
++
+ import java.io.IOException;
+
+ /*
+@@ -39,10 +42,17 @@ public class HeapDumper extends Tool {
+
+ private String dumpFile;
+
++ private HeapRedactor redactor;
++
+ public HeapDumper(String dumpFile) {
+ this.dumpFile = dumpFile;
+ }
+
++ public HeapDumper(String dumpFile, HeapRedactor redactor){
++ this(dumpFile);
++ this.redactor = redactor;
++ }
++
+ public HeapDumper(String dumpFile, JVMDebugger d) {
+ super(d);
+ this.dumpFile = dumpFile;
+@@ -55,21 +65,59 @@ public class HeapDumper extends Tool {
+ super.printFlagsUsage();
+ }
+
++ private String getVMRedactParameter(String name){
++ VM vm = VM.getVM();
++ VM.Flag flag = vm.getCommandLineFlag(name);
++ if(flag == null){
++ return null;
++ }
++ return flag.getCcstr();
++ }
++
+ // use HeapHprofBinWriter to write the heap dump
+ public void run() {
+ System.out.println("Dumping heap to " + dumpFile + " ...");
+ try {
+- new HeapHprofBinWriter().write(dumpFile);
++ HeapHprofBinWriter writer = new HeapHprofBinWriter();
++ if(this.redactor != null){
++ writer.setHeapRedactor(this.redactor);
++ if(writer.getHeapDumpRedactLevel() != HeapRedactor.HeapDumpRedactLevel.REDACT_UNKNOWN){
++ System.out.println("HeapDump Redact Level = " + this.redactor.getRedactLevelString());
++ }
++ }else{
++ resetHeapHprofBinWriter(writer);
++ }
++ writer.write(dumpFile);
+ System.out.println("Heap dump file created");
+ } catch (IOException ioe) {
+ System.err.println(ioe.getMessage());
+ }
+ }
+
++ private void resetHeapHprofBinWriter(HeapHprofBinWriter writer) {
++ String redactStr = getVMRedactParameter("HeapDumpRedact");
++ if(redactStr != null && !redactStr.isEmpty()){
++ HeapRedactor.RedactParams redactParams = new HeapRedactor.RedactParams();
++ if(HeapRedactor.REDACT_ANNOTATION_OPTION.equals(redactStr)){
++ String classPathStr = getVMRedactParameter("RedactClassPath");
++ redactStr = (classPathStr != null && !classPathStr.isEmpty()) ? redactStr : HeapRedactor.REDACT_OFF_OPTION;
++ redactParams.setRedactClassPath(classPathStr);
++ } else {
++ String redactMapStr = getVMRedactParameter("RedactMap");
++ redactParams.setRedactMap(redactMapStr);
++ String redactMapFileStr = getVMRedactParameter("RedactMapFile");
++ redactParams.setRedactMapFile(redactMapFileStr);
++ }
++ redactParams.setAndCheckHeapDumpRedact(redactStr);
++ writer.setHeapRedactor(new HeapRedactor(redactParams));
++ }
++ }
++
+ // JDK jmap utility will always invoke this tool as:
+ // HeapDumper -f
+ public static void main(String args[]) {
+ String file = DEFAULT_DUMP_FILE;
++ HeapRedactor heapRedactor = null;
+ if (args.length > 2) {
+ if (args[0].equals("-f")) {
+ file = args[1];
+@@ -77,9 +125,15 @@ public class HeapDumper extends Tool {
+ System.arraycopy(args, 2, newargs, 0, args.length-2);
+ args = newargs;
+ }
++ if(args[0].equals("-r")){
++ heapRedactor = new HeapRedactor(args[1]);
++ String[] newargs = new String[args.length-2];
++ System.arraycopy(args, 2, newargs, 0, args.length-2);
++ args = newargs;
++ }
+ }
+
+- HeapDumper dumper = new HeapDumper(file);
++ HeapDumper dumper = heapRedactor == null? new HeapDumper(file):new HeapDumper(file, heapRedactor);
+ dumper.execute(args);
+ }
+
+diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/AnnotationArray2D.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/AnnotationArray2D.java
+new file mode 100644
+index 000000000..0703549dd
+--- /dev/null
++++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/AnnotationArray2D.java
+@@ -0,0 +1,63 @@
++/*
++ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
++ *
++ * This code is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 only, as
++ * published by the Free Software Foundation.
++ *
++ * This code is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * version 2 for more details (a copy is included in the LICENSE file that
++ * accompanied this code).
++ *
++ * You should have received a copy of the GNU General Public License version
++ * 2 along with this work; if not, write to the Free Software Foundation,
++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
++ * or visit www.oracle.com if you need additional information or have any
++ * questions.
++ *
++ */
++
++package sun.jvm.hotspot.utilities;
++
++import java.util.*;
++import sun.jvm.hotspot.debugger.Address;
++import sun.jvm.hotspot.runtime.VM;
++import sun.jvm.hotspot.types.Type;
++import sun.jvm.hotspot.types.TypeDataBase;
++import sun.jvm.hotspot.types.WrongTypeException;
++
++public class AnnotationArray2D extends GenericArray {
++ static {
++ VM.registerVMInitializedObserver(new Observer() {
++ public void update(Observable o, Object data) {
++ initialize(VM.getVM().getTypeDataBase());
++ }
++ });
++ }
++
++ private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
++ elemType = db.lookupType("Array*");
++
++ Type type = db.lookupType("Array*>");
++ dataFieldOffset = type.getAddressField("_data").getOffset();
++ }
++
++ private static long dataFieldOffset;
++ protected static Type elemType;
++
++ public AnnotationArray2D(Address addr) {
++ super(addr, dataFieldOffset);
++ }
++
++ public U1Array getAt(int i) {
++ return new U1Array(getAddressAt(i));
++ }
++ public Type getElemType() {
++ return elemType;
++ }
++}
+\ No newline at end of file
+diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java
+index 319aecdaa..1da6ed028 100644
+--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java
++++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java
+@@ -379,6 +379,31 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
+ private static final int JVM_SIGNATURE_ARRAY = '[';
+ private static final int JVM_SIGNATURE_CLASS = 'L';
+
++ // Heap Redact
++ private HeapRedactor heapRedactor;
++
++ public HeapRedactor getHeapRedactor() {
++ return heapRedactor;
++ }
++
++ public void setHeapRedactor(HeapRedactor heapRedactor) {
++ this.heapRedactor = heapRedactor;
++ }
++
++ public HeapRedactor.HeapDumpRedactLevel getHeapDumpRedactLevel(){
++ if(heapRedactor == null){
++ return HeapRedactor.HeapDumpRedactLevel.REDACT_OFF;
++ }
++ return heapRedactor.getHeapDumpRedactLevel();
++ }
++
++ private String lookupRedactName(String name){
++ if(heapRedactor == null){
++ return null;
++ }
++ return heapRedactor.lookupRedactName(name);
++ }
++
+ public synchronized void write(String fileName) throws IOException {
+ // open file stream and create buffered data output stream
+ fos = new FileOutputStream(fileName);
+@@ -432,6 +457,9 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
+ // this will write heap data into the buffer stream
+ super.write();
+
++ // write redacted String Field record
++ writeAnnotateFieldValue();
++
+ // flush buffer stream.
+ out.flush();
+
+@@ -533,6 +561,59 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
+ }
+ }
+
++ private void writeAnnotateFieldValue() throws IOException {
++ HeapRedactor.HeapDumpRedactLevel level = getHeapDumpRedactLevel();
++ if(level != HeapRedactor.HeapDumpRedactLevel.REDACT_ANNOTATION
++ && level != HeapRedactor.HeapDumpRedactLevel.REDACT_DIYRULES) {
++ return;
++ }
++
++ HeapRedactor.RedactVectorNode redactVector = heapRedactor.getHeaderNode();
++ if(redactVector == null) {
++ return;
++ }
++
++ while(redactVector != null){
++ List typeArrayList = redactVector.getTypeArrayList();
++ for(int i = 0; i < redactVector.getCurrentIndex(); i++) {
++ TypeArray array = typeArrayList.get(i);
++ TypeArrayKlass tak = (TypeArrayKlass) array.getKlass();
++ final int type = (int) tak.getElementType();
++
++ if(type != TypeArrayKlass.T_CHAR) {
++ continue;
++ }
++
++ OopHandle handle = (array != null)? array.getHandle() : null;
++ long address = getAddressValue(handle);
++ Optional annotateValueOptional = heapRedactor.lookupRedactAnnotationValue(address);
++ String annotateValue = annotateValueOptional.isPresent() ? annotateValueOptional.get() : null;
++ long expectLength = array.getLength();
++ if(annotateValue != null) {
++ expectLength = annotateValue.length();
++ }
++
++ final String typeName = tak.getElementTypeName();
++ out.writeByte((byte) HPROF_GC_PRIM_ARRAY_DUMP);
++ writeObjectID(array);
++ out.writeInt(DUMMY_STACK_TRACE_ID);
++ out.writeInt((int)expectLength);
++ out.writeByte((byte) type);
++
++ if (annotateValue != null) {
++ for(int index = 0; index < expectLength; index++) {
++ out.writeChar(annotateValue.charAt(index));
++ }
++ } else {
++ writeCharArray(array);
++ }
++ }
++
++ HeapRedactor.RedactVectorNode tempVector = redactVector.getNext();
++ redactVector = tempVector;
++ }
++ }
++
+ protected void writeClass(Instance instance) throws IOException {
+ Klass reflectedKlass = java_lang_Class.asKlass(instance);
+ // dump instance record only for primitive type Class objects.
+@@ -690,19 +771,34 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
+ }
+
+ protected void writePrimitiveArray(TypeArray array) throws IOException {
++ HeapRedactor.HeapDumpRedactLevel level = getHeapDumpRedactLevel();
++
++ TypeArrayKlass tak = (TypeArrayKlass) array.getKlass();
++ final int type = (int) tak.getElementType();
++ if(type == TypeArrayKlass.T_CHAR && (level == HeapRedactor.HeapDumpRedactLevel.REDACT_ANNOTATION
++ || level == HeapRedactor.HeapDumpRedactLevel.REDACT_DIYRULES)) {
++ heapRedactor.recordTypeArray(array);
++ return;
++ }
++
+ out.writeByte((byte) HPROF_GC_PRIM_ARRAY_DUMP);
+ writeObjectID(array);
+ out.writeInt(DUMMY_STACK_TRACE_ID);
+ out.writeInt((int) array.getLength());
+- TypeArrayKlass tak = (TypeArrayKlass) array.getKlass();
+- final int type = (int) tak.getElementType();
+ out.writeByte((byte) type);
++
++ boolean shouldRedact = ( level== HeapRedactor.HeapDumpRedactLevel.REDACT_BASIC
++ ||level == HeapRedactor.HeapDumpRedactLevel.REDACT_FULL);
+ switch (type) {
+ case TypeArrayKlass.T_BOOLEAN:
+ writeBooleanArray(array);
+ break;
+ case TypeArrayKlass.T_CHAR:
+- writeCharArray(array);
++ if (shouldRedact) {
++ writeCharArrayObfuscated(array);
++ } else {
++ writeCharArray(array);
++ }
+ break;
+ case TypeArrayKlass.T_FLOAT:
+ writeFloatArray(array);
+@@ -711,13 +807,21 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
+ writeDoubleArray(array);
+ break;
+ case TypeArrayKlass.T_BYTE:
+- writeByteArray(array);
++ if (shouldRedact) {
++ writeByteArrayObfuscated(array);
++ } else {
++ writeByteArray(array);
++ }
+ break;
+ case TypeArrayKlass.T_SHORT:
+ writeShortArray(array);
+ break;
+ case TypeArrayKlass.T_INT:
+- writeIntArray(array);
++ if (shouldRedact) {
++ writeIntArrayObfuscated(array);
++ } else {
++ writeIntArray(array);
++ }
+ break;
+ case TypeArrayKlass.T_LONG:
+ writeLongArray(array);
+@@ -743,6 +847,13 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
+ }
+ }
+
++ private void writeByteArrayObfuscated(TypeArray array) throws IOException {
++ final int length = (int) array.getLength();
++ for (int index = 0; index < length; index++) {
++ out.writeByte(0);
++ }
++ }
++
+ private void writeShortArray(TypeArray array) throws IOException {
+ final int length = (int) array.getLength();
+ for (int index = 0; index < length; index++) {
+@@ -759,6 +870,13 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
+ }
+ }
+
++ private void writeIntArrayObfuscated(TypeArray array) throws IOException {
++ final int length = (int) array.getLength();
++ for (int index = 0; index < length; index++) {
++ out.writeInt(0);
++ }
++ }
++
+ private void writeLongArray(TypeArray array) throws IOException {
+ final int length = (int) array.getLength();
+ for (int index = 0; index < length; index++) {
+@@ -775,6 +893,13 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
+ }
+ }
+
++ private void writeCharArrayObfuscated(TypeArray array) throws IOException {
++ final int length = (int) array.getLength();
++ for (int index = 0; index < length; index++) {
++ out.writeChar(0);
++ }
++ }
++
+ private void writeFloatArray(TypeArray array) throws IOException {
+ final int length = (int) array.getLength();
+ for (int index = 0; index < length; index++) {
+@@ -820,6 +945,20 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
+ for (Iterator itr = fields.iterator(); itr.hasNext();) {
+ writeField((Field) itr.next(), instance);
+ }
++
++ if(getHeapDumpRedactLevel() != HeapRedactor.HeapDumpRedactLevel.REDACT_ANNOTATION
++ && getHeapDumpRedactLevel() != HeapRedactor.HeapDumpRedactLevel.REDACT_DIYRULES) {
++ return;
++ }
++ // record the anonymous value for every field
++ if(klass instanceof InstanceKlass && heapRedactor != null) {
++ if(heapRedactor.getHeapDumpRedactLevel() == HeapRedactor.HeapDumpRedactLevel.REDACT_ANNOTATION
++ && heapRedactor.getRedactAnnotationClassPath() != null && !heapRedactor.getRedactAnnotationClassPath().isEmpty()) {
++ recordAnnotationValueMap(fields, instance);
++ } else if( heapRedactor.getHeapDumpRedactLevel() == HeapRedactor.HeapDumpRedactLevel.REDACT_DIYRULES) {
++ recordDiyRulesValueMap(fields, instance);
++ }
++ }
+ }
+
+ //-- Internals only below this point
+@@ -842,6 +981,130 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
+ }
+ }
+
++ private void recordAnnotationValueMap(List fields, Instance instance) {
++ Klass klass = instance.getKlass();
++ boolean inJavaPackage = false;
++ Symbol classNameSymbol = klass.getName();
++ if(classNameSymbol != null) {
++ String className = classNameSymbol.asString();
++ inJavaPackage = (className != null && className.startsWith("java/"));
++ }
++ if(inJavaPackage){
++ return;
++ }
++ for (Field field : fields) {
++ Symbol fieldSignature = field.getSignature();
++ if(fieldSignature == null || fieldSignature.asString() == null || !"Ljava/lang/String;".equals(fieldSignature.asString())) {
++ continue;
++ }
++ try {
++ InstanceKlass fieldHolder = field.getFieldHolder();
++ U1Array fieldAnnotations = field.getFieldAnnotations();
++ Optional anonymousValueOption = getAnonymousValue(fieldAnnotations, fieldHolder.getConstants());
++ if(!anonymousValueOption.isPresent()) {
++ continue;
++ }
++ long address = getStringFieldAddress(field, instance);
++ if(address > 0L) {
++ heapRedactor.recordRedactAnnotationValue(address, anonymousValueOption.get());
++ }
++ } catch (Exception e) {
++ }
++ }
++ }
++
++ private Optional getAnonymousValue(U1Array fieldAnnotations, ConstantPool cp) {
++ Optional anonymousValueOption = Optional.empty();
++ if (fieldAnnotations.getAddress() == null) {
++ return anonymousValueOption;
++ }
++
++ int fieldAnnotationsTagsLen = fieldAnnotations.length();
++ boolean isAnonymousAnnotation = false;
++ int annotationStart = 0;
++ int annotationEnd = 0;
++ for (int j = 0; j < fieldAnnotationsTagsLen; j++) {
++ int cpIndex = fieldAnnotations.at(j);
++ if (cpIndex >= cp.getLength() || cpIndex < 0) {
++ continue;
++ }
++ byte cpConstType = cp.getTags().at(cpIndex);
++ if (cpConstType == ConstantPool.JVM_CONSTANT_Utf8) {
++ annotationStart += (isAnonymousAnnotation ? 0 : 1);
++ annotationEnd++;
++ Symbol symbol = cp.getSymbolAt(cpIndex);
++ if (symbol.asString() == null || symbol.asString().isEmpty()) {
++ continue;
++ }
++ if (symbol.asString().equals("L" + heapRedactor.getRedactAnnotationClassPath() + ";")) {
++ isAnonymousAnnotation = true;
++ }
++ if(annotationEnd - annotationStart == 1 && !"value".equals(symbol.asString())) {
++ break;
++ }
++ if(annotationEnd - annotationStart == 2) {
++ anonymousValueOption = Optional.ofNullable(cp.getSymbolAt(cpIndex).asString());
++ break;
++ }
++ }
++ }
++ return anonymousValueOption;
++ }
++
++ private void recordDiyRulesValueMap(List fields, Instance instance) {
++ Klass klass = instance.getKlass();
++ boolean diyRulesFlag = false;
++ Symbol classNameSymbol = klass.getName();
++ Map redactRulesMap = null;
++ if(classNameSymbol != null) {
++ String className = classNameSymbol.asString();
++ Optional